mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-08-04 11:12:35 +00:00
Add CPU fp8 support
Since norm layer need fp32, I only convert the linear operation layer(conv2d/linear) And TE have some pytorch function not support bf16 amp in CPU. I add a condition to indicate if the autocast is for unet.
This commit is contained in:
@@ -391,12 +391,24 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
||||
|
||||
devices.dtype_unet = torch.float16
|
||||
timer.record("apply half()")
|
||||
if shared.cmd_opts.opt_unet_fp8_storage:
|
||||
|
||||
if shared.cmd_opts.opt_unet_fp8_storage:
|
||||
enable_fp8 = True
|
||||
elif model.is_sdxl and shared.cmd_opts.opt_unet_fp8_storage_xl:
|
||||
enable_fp8 = True
|
||||
|
||||
if enable_fp8:
|
||||
devices.fp8 = True
|
||||
if devices.device == devices.cpu:
|
||||
for module in model.model.diffusion_model.modules():
|
||||
if isinstance(module, torch.nn.Conv2d):
|
||||
module.to(torch.float8_e4m3fn)
|
||||
elif isinstance(module, torch.nn.Linear):
|
||||
module.to(torch.float8_e4m3fn)
|
||||
timer.record("apply fp8 unet for cpu")
|
||||
else:
|
||||
model.model.diffusion_model = model.model.diffusion_model.to(torch.float8_e4m3fn)
|
||||
timer.record("apply fp8 unet")
|
||||
elif model.is_sdxl and shared.cmd_opts.opt_unet_fp8_storage_xl:
|
||||
model.model.diffusion_model = model.model.diffusion_model.to(torch.float8_e4m3fn)
|
||||
timer.record("apply fp8 unet for sdxl")
|
||||
|
||||
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
||||
|
||||
|
Reference in New Issue
Block a user