mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-08-04 03:10:21 +00:00
add token merging options to infotext when necessary. Bump tomesd
version
This commit is contained in:
@@ -31,6 +31,12 @@ from einops import repeat, rearrange
|
||||
from blendmodes.blend import blendLayers, BlendType
|
||||
import tomesd
|
||||
|
||||
# add a logger for the processing module
|
||||
logger = logging.getLogger(__name__)
|
||||
# manually set output level here since there is no option to do so yet through launch options
|
||||
# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s')
|
||||
|
||||
|
||||
# some of those options should not be changed at all because they would break the model, so I removed them from options.
|
||||
opt_C = 4
|
||||
opt_f = 8
|
||||
@@ -477,6 +483,14 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
||||
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
|
||||
"Clip skip": None if clip_skip <= 1 else clip_skip,
|
||||
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
|
||||
"Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio,
|
||||
"Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr,
|
||||
"Token merging random": None if opts.token_merging_random is False else opts.token_merging_random,
|
||||
"Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention,
|
||||
"Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention,
|
||||
"Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp,
|
||||
"Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x,
|
||||
"Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y
|
||||
}
|
||||
|
||||
generation_params.update(p.extra_generation_params)
|
||||
@@ -502,16 +516,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
||||
sd_vae.reload_vae_weights()
|
||||
|
||||
if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only:
|
||||
print("\nApplying token merging\n")
|
||||
sd_models.apply_token_merging(sd_model=p.sd_model, hr=False)
|
||||
logger.debug('Token merging applied')
|
||||
|
||||
res = process_images_inner(p)
|
||||
|
||||
finally:
|
||||
# undo model optimizations made by tomesd
|
||||
if opts.token_merging or cmd_opts.token_merging:
|
||||
print('\nRemoving token merging model optimizations\n')
|
||||
tomesd.remove_patch(p.sd_model)
|
||||
logger.debug('Token merging model optimizations removed')
|
||||
|
||||
# restore opts to original state
|
||||
if p.override_settings_restore_afterwards:
|
||||
@@ -954,11 +968,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
# case where user wants to use separate merge ratios
|
||||
if not opts.token_merging_hr_only:
|
||||
# clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive)
|
||||
print('Temporarily reverting token merging optimizations in preparation for next pass')
|
||||
tomesd.remove_patch(self.sd_model)
|
||||
logger.debug('Temporarily removed token merging optimizations in preparation for next pass')
|
||||
|
||||
print("\nApplying token merging for high-res pass\n")
|
||||
sd_models.apply_token_merging(sd_model=self.sd_model, hr=True)
|
||||
logger.debug('Applied token merging for high-res pass')
|
||||
|
||||
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
|
||||
|
||||
|
Reference in New Issue
Block a user