From 0a6628bad0615a640efd99937eb6d10d6d648975 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 6 Jul 2024 10:31:08 +0300 Subject: [PATCH] remove mentions of specific samplers from CFG denoiser code --- modules/sd_samplers_cfg_denoiser.py | 16 +++++++--------- modules/sd_samplers_timesteps_impl.py | 3 +++ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index c8eeedad3..b6fbf3372 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -58,6 +58,9 @@ class CFGDenoiser(torch.nn.Module): self.model_wrap = None self.p = None + self.cond_scale_miltiplier = 1.0 + + self.need_last_noise_uncond = False self.last_noise_uncond = None # NOTE: masking before denoising can cause the original latents to be oversmoothed @@ -162,8 +165,6 @@ class CFGDenoiser(torch.nn.Module): # so is_edit_model is set to False to support AND composition. is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - is_cfg_pp = 'CFG++' in self.sampler.config.name - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) @@ -277,18 +278,15 @@ class CFGDenoiser(torch.nn.Module): denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) cfg_denoised_callback(denoised_params) - if is_cfg_pp: - self.last_noise_uncond = x_out[-uncond.shape[0]:] - self.last_noise_uncond = torch.clone(self.last_noise_uncond) + if self.need_last_noise_uncond: + self.last_noise_uncond = torch.clone(x_out[-uncond.shape[0]:]) if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale * self.cond_scale_miltiplier) elif skip_uncond: denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - elif is_cfg_pp: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale/12.5) # CFG++ scale of (0, 1) maps to (1.0, 12.5) else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale * self.cond_scale_miltiplier) # Blend in the original latents (after) if not self.mask_before_denoising and self.mask is not None: diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index 8896cfc9a..180e43899 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -52,6 +52,9 @@ def ddim_cfgpp(model, x, timesteps, extra_args=None, callback=None, disable=None sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) + model.cond_scale_miltiplier = 1 / 12.5 + model.need_last_noise_uncond = True + extra_args = {} if extra_args is None else extra_args s_in = x.new_ones((x.shape[0])) s_x = x.new_ones((x.shape[0], 1, 1, 1))