diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 8f29057a9..89634fbf5 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -279,7 +279,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): original_mean = z.mean() z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() - z = z * (original_mean / new_mean) + + if not getattr(opts, "disable_normalize_embeddings", False): + z = z * (original_mean / new_mean) if pooled is not None: z.pooled = pooled diff --git a/modules/shared_options.py b/modules/shared_options.py index bdd066c4a..417a42b28 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -155,6 +155,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), { "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "disable_normalize_embeddings": OptionInfo(False, "Disable normalize embeddings").info("Do not normalize embeddings after calculating emphasis. It can be expected to be effective in preventing artifacts in SDXL."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),