run basic torch calculation at startup in parallel to reduce the performance impact of first generation

This commit is contained in:
AUTOMATIC
2023-05-21 21:55:14 +03:00
parent 1f3182924b
commit 8faac8b963
2 changed files with 21 additions and 1 deletions

View File

@@ -20,7 +20,7 @@ import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
from modules import paths, timer, import_hook, errors # noqa: F401
from modules import paths, timer, import_hook, errors, devices # noqa: F401
startup_timer = timer.Timer()
@@ -295,6 +295,8 @@ def initialize_rest(*, reload_script_modules=False):
# (when reloading, this does nothing)
Thread(target=lambda: shared.sd_model).start()
Thread(target=devices.first_time_calculation).start()
shared.reload_hypernetworks()
startup_timer.record("reload hypernetworks")