mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-06-07 21:20:49 +00:00
Add CPU fp8 support
Since norm layer need fp32, I only convert the linear operation layer(conv2d/linear) And TE have some pytorch function not support bf16 amp in CPU. I add a condition to indicate if the autocast is for unet.
This commit is contained in:
parent
5f9ddfa46f
commit
eaa9f5162f
@ -71,6 +71,7 @@ def enable_tf32():
|
|||||||
errors.run(enable_tf32, "Enabling TF32")
|
errors.run(enable_tf32, "Enabling TF32")
|
||||||
|
|
||||||
cpu: torch.device = torch.device("cpu")
|
cpu: torch.device = torch.device("cpu")
|
||||||
|
fp8: bool = False
|
||||||
device: torch.device = None
|
device: torch.device = None
|
||||||
device_interrogate: torch.device = None
|
device_interrogate: torch.device = None
|
||||||
device_gfpgan: torch.device = None
|
device_gfpgan: torch.device = None
|
||||||
@ -93,10 +94,13 @@ def cond_cast_float(input):
|
|||||||
nv_rng = None
|
nv_rng = None
|
||||||
|
|
||||||
|
|
||||||
def autocast(disable=False):
|
def autocast(disable=False, unet=False):
|
||||||
if disable:
|
if disable:
|
||||||
return contextlib.nullcontext()
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
|
if unet and fp8 and device==cpu:
|
||||||
|
return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
|
||||||
|
|
||||||
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
|
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
|
||||||
return contextlib.nullcontext()
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
|
@ -865,7 +865,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
if p.n_iter > 1:
|
if p.n_iter > 1:
|
||||||
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
|
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
|
||||||
|
|
||||||
with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
|
with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(unet=True):
|
||||||
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
|
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
|
||||||
|
|
||||||
if getattr(samples_ddim, 'already_decoded', False):
|
if getattr(samples_ddim, 'already_decoded', False):
|
||||||
|
@ -391,12 +391,24 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
|||||||
|
|
||||||
devices.dtype_unet = torch.float16
|
devices.dtype_unet = torch.float16
|
||||||
timer.record("apply half()")
|
timer.record("apply half()")
|
||||||
|
|
||||||
if shared.cmd_opts.opt_unet_fp8_storage:
|
if shared.cmd_opts.opt_unet_fp8_storage:
|
||||||
|
enable_fp8 = True
|
||||||
|
elif model.is_sdxl and shared.cmd_opts.opt_unet_fp8_storage_xl:
|
||||||
|
enable_fp8 = True
|
||||||
|
|
||||||
|
if enable_fp8:
|
||||||
|
devices.fp8 = True
|
||||||
|
if devices.device == devices.cpu:
|
||||||
|
for module in model.model.diffusion_model.modules():
|
||||||
|
if isinstance(module, torch.nn.Conv2d):
|
||||||
|
module.to(torch.float8_e4m3fn)
|
||||||
|
elif isinstance(module, torch.nn.Linear):
|
||||||
|
module.to(torch.float8_e4m3fn)
|
||||||
|
timer.record("apply fp8 unet for cpu")
|
||||||
|
else:
|
||||||
model.model.diffusion_model = model.model.diffusion_model.to(torch.float8_e4m3fn)
|
model.model.diffusion_model = model.model.diffusion_model.to(torch.float8_e4m3fn)
|
||||||
timer.record("apply fp8 unet")
|
timer.record("apply fp8 unet")
|
||||||
elif model.is_sdxl and shared.cmd_opts.opt_unet_fp8_storage_xl:
|
|
||||||
model.model.diffusion_model = model.model.diffusion_model.to(torch.float8_e4m3fn)
|
|
||||||
timer.record("apply fp8 unet for sdxl")
|
|
||||||
|
|
||||||
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user