mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-08-08 05:12:35 +00:00
keep textual inversion dataset latents in CPU memory to save a bit of VRAM
This commit is contained in:
@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
|
||||
|
||||
with torch.autocast("cuda"):
|
||||
c = cond_model([text])
|
||||
|
||||
x = x.to(devices.device)
|
||||
loss = shared.sd_model(x.unsqueeze(0), c)[0]
|
||||
del x
|
||||
|
||||
losses[embedding.step % losses.shape[0]] = loss.item()
|
||||
|
||||
|
Reference in New Issue
Block a user