keep textual inversion dataset latents in CPU memory to save a bit of VRAM

This commit is contained in:
AUTOMATIC
2022-10-02 22:59:01 +03:00
parent c7543d4940
commit 6785331e22
3 changed files with 7 additions and 2 deletions

View File

@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
with torch.autocast("cuda"):
c = cond_model([text])
x = x.to(devices.device)
loss = shared.sd_model(x.unsqueeze(0), c)[0]
del x
losses[embedding.step % losses.shape[0]] = loss.item()