add more stuff to ignore when creating model from config

prevent .vae.safetensors files from being listed as stable diffusion models
This commit is contained in:
AUTOMATIC
2023-01-10 16:51:04 +03:00
parent 0c3feb202c
commit ce3f639ec8
3 changed files with 56 additions and 9 deletions

View File

@@ -2,6 +2,7 @@ import collections
import os.path
import sys
import gc
import time
from collections import namedtuple
import torch
import re
@@ -61,7 +62,7 @@ def find_checkpoint_config(info):
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
@@ -288,6 +289,17 @@ def enable_midas_autodownload():
midas.api.load_model = load_model_wrapper
class Timer:
def __init__(self):
self.start = time.time()
def elapsed(self):
end = time.time()
res = end - self.start
self.start = end
return res
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -319,11 +331,17 @@ def load_model(checkpoint_info=None):
if shared.cmd_opts.no_half:
sd_config.model.params.unet_config.params.use_fp16 = False
timer = Timer()
with sd_disable_initialization.DisableInitialization():
sd_model = instantiate_from_config(sd_config.model)
elapsed_create = timer.elapsed()
load_model_weights(sd_model, checkpoint_info)
elapsed_load_weights = timer.elapsed()
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
@@ -338,7 +356,9 @@ def load_model(checkpoint_info=None):
script_callbacks.model_loaded_callback(sd_model)
print("Model loaded.")
elapsed_the_rest = timer.elapsed()
print(f"Model loaded in {elapsed_create + elapsed_load_weights + elapsed_the_rest:.1f}s ({elapsed_create:.1f}s create model, {elapsed_load_weights:.1f}s load weights).")
return sd_model
@@ -349,7 +369,7 @@ def reload_model_weights(sd_model=None, info=None):
if not sd_model:
sd_model = shared.sd_model
if sd_model is None: # previous model load failed
if sd_model is None: # previous model load failed
current_checkpoint_info = None
else:
current_checkpoint_info = sd_model.sd_checkpoint_info
@@ -371,6 +391,8 @@ def reload_model_weights(sd_model=None, info=None):
sd_hijack.model_hijack.undo_hijack(sd_model)
timer = Timer()
try:
load_model_weights(sd_model, checkpoint_info)
except Exception as e:
@@ -384,6 +406,8 @@ def reload_model_weights(sd_model=None, info=None):
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print("Weights loaded.")
elapsed = timer.elapsed()
print(f"Weights loaded in {elapsed:.1f}s.")
return sd_model