mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-08-04 11:12:35 +00:00
Merge remote-tracking branch 'origin/dev' into soft-inpainting
# Conflicts: # modules/processing.py
This commit is contained in:
@@ -149,7 +149,7 @@ class StableDiffusionProcessing:
|
||||
masks_for_overlay: list = None
|
||||
eta: float = None
|
||||
do_not_reload_embeddings: bool = False
|
||||
denoising_strength: float = 0
|
||||
denoising_strength: float = None
|
||||
ddim_discretize: str = None
|
||||
s_min_uncond: float = None
|
||||
s_churn: float = None
|
||||
@@ -303,7 +303,7 @@ class StableDiffusionProcessing:
|
||||
return conditioning
|
||||
|
||||
def edit_image_conditioning(self, source_image):
|
||||
conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
|
||||
conditioning_image = shared.sd_model.encode_first_stage(source_image).mode()
|
||||
|
||||
return conditioning_image
|
||||
|
||||
@@ -537,6 +537,7 @@ class Processed:
|
||||
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
|
||||
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
|
||||
self.infotexts = infotexts or [info]
|
||||
self.version = program_version()
|
||||
|
||||
def js(self):
|
||||
obj = {
|
||||
@@ -571,6 +572,7 @@ class Processed:
|
||||
"job_timestamp": self.job_timestamp,
|
||||
"clip_skip": self.clip_skip,
|
||||
"is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
|
||||
"version": self.version,
|
||||
}
|
||||
|
||||
return json.dumps(obj)
|
||||
@@ -713,7 +715,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
||||
if p.scripts is not None:
|
||||
p.scripts.before_process(p)
|
||||
|
||||
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
|
||||
stored_opts = {k: opts.data[k] if k in opts.data else opts.get_default(k) for k in p.override_settings.keys() if k in opts.data}
|
||||
|
||||
try:
|
||||
# if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
|
||||
@@ -801,7 +803,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
||||
|
||||
infotexts = []
|
||||
output_images = []
|
||||
|
||||
with torch.no_grad(), p.sd_model.ema_scope():
|
||||
with devices.autocast():
|
||||
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
|
||||
@@ -876,7 +877,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
||||
else:
|
||||
if opts.sd_vae_decode_method != 'Full':
|
||||
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
|
||||
|
||||
# Generate the mask(s) based on similarity between the original and denoised latent vectors
|
||||
if getattr(p, "image_mask", None) is not None:
|
||||
# latent_mask = p.nmask[0].float().cpu()
|
||||
@@ -943,6 +943,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
state.nextjob()
|
||||
|
||||
if p.scripts is not None:
|
||||
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
|
||||
|
||||
@@ -1025,7 +1027,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
state.nextjob()
|
||||
if not infotexts:
|
||||
infotexts.append(Processed(p, []).infotext(p, 0))
|
||||
|
||||
p.color_corrections = None
|
||||
|
||||
@@ -1211,6 +1214,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
|
||||
if not self.enable_hr:
|
||||
return samples
|
||||
devices.torch_gc()
|
||||
|
||||
if self.latent_scale_mode is None:
|
||||
decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
|
||||
@@ -1220,8 +1224,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
with sd_models.SkipWritingToConfig():
|
||||
sd_models.reload_model_weights(info=self.hr_checkpoint_info)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
|
||||
|
||||
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
|
||||
@@ -1229,7 +1231,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
return samples
|
||||
|
||||
self.is_hr_pass = True
|
||||
|
||||
target_width = self.hr_upscale_to_x
|
||||
target_height = self.hr_upscale_to_y
|
||||
|
||||
@@ -1318,7 +1319,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
|
||||
|
||||
self.is_hr_pass = False
|
||||
|
||||
return decoded_samples
|
||||
|
||||
def close(self):
|
||||
|
Reference in New Issue
Block a user