mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-08-08 13:19:54 +00:00
keep textual inversion dataset latents in CPU memory to save a bit of VRAM
This commit is contained in:
@@ -8,6 +8,7 @@ from torchvision import transforms
|
||||
|
||||
import random
|
||||
import tqdm
|
||||
from modules import devices
|
||||
|
||||
|
||||
class PersonalizedBase(Dataset):
|
||||
@@ -47,6 +48,7 @@ class PersonalizedBase(Dataset):
|
||||
torchdata = torch.moveaxis(torchdata, 2, 0)
|
||||
|
||||
init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze()
|
||||
init_latent = init_latent.to(devices.cpu)
|
||||
|
||||
self.dataset.append((init_latent, filename_tokens))
|
||||
|
||||
|
Reference in New Issue
Block a user