Option for using fp16 weight when apply lora

This commit is contained in:
Kohaku-Blueleaf
2023-11-21 19:59:34 +08:00
parent b2e039d07b
commit 370a77f8e7
4 changed files with 25 additions and 7 deletions

View File

@@ -178,6 +178,7 @@ def configure_opts_onchange():
shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
shared.opts.onchange("fp8_storage", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
shared.opts.onchange("cache_fp16_weight", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
startup_timer.record("opts onchange")