diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 3dcef5a6e..2ed1d2739 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -14,8 +14,11 @@ import modules.images def load_model(filename): # this code is adapted from https://github.com/xinntao/ESRGAN - - pretrained_net = torch.load(filename) + if torch.has_mps: + map_l = 'cpu' + else: + map_l = None + pretrained_net = torch.load(filename, map_location=map_l) crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) if 'conv_first.weight' in pretrained_net: diff --git a/modules/img2img.py b/modules/img2img.py index 3129798da..c23923050 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,5 +1,7 @@ import math -from PIL import Image +import cv2 +import numpy as np +from PIL import Image, ImageOps, ImageChops from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state @@ -16,7 +18,9 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index if is_inpaint: image = init_img_with_mask['image'] - mask = init_img_with_mask['mask'] + alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') + mask = ImageChops.lighter(alpha_mask, init_img_with_mask['mask'].convert('L')).convert('RGBA') + image = image.convert('RGB') else: image = init_img mask = None @@ -57,8 +61,19 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index state.job_count = n_iter + do_color_correction = False + try: + from skimage import exposure + do_color_correction = True + except: + print("Install scikit-image to perform color correction on loopback") + + for i in range(n_iter): + if do_color_correction and i == 0: + correction_target = cv2.cvtColor(np.asarray(init_img.copy()), cv2.COLOR_RGB2LAB) + p.n_iter = 1 p.batch_size = 1 p.do_not_save_grid = True @@ -69,8 +84,20 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index if initial_seed is None: initial_seed = processed.seed initial_info = processed.info + + init_img = processed.images[0] - p.init_images = [processed.images[0]] + if do_color_correction and correction_target is not None: + init_img = Image.fromarray(cv2.cvtColor(exposure.match_histograms( + cv2.cvtColor( + np.asarray(init_img), + cv2.COLOR_RGB2LAB + ), + correction_target, + channel_axis=2 + ), cv2.COLOR_LAB2RGB).astype("uint8")) + + p.init_images = [init_img] p.seed = processed.seed + 1 p.denoising_strength = max(p.denoising_strength * 0.95, 0.1) history.append(processed.images[0]) diff --git a/modules/lowvram.py b/modules/lowvram.py index 4b78deab7..bd1174915 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -2,9 +2,12 @@ import torch module_in_gpu = None cpu = torch.device("cpu") -gpu = torch.device("cuda") -device = gpu if torch.cuda.is_available() else cpu - +if torch.has_cuda: + device = gpu = torch.device("cuda") +elif torch.has_mps: + device = gpu = torch.device("mps") +else: + device = gpu = torch.device("cpu") def setup_for_low_vram(sd_model, use_medvram): parents = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 2d26b5f71..1084e2484 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -232,7 +232,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = outputs.last_hidden_state # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device) + batch_multipliers = torch.asarray(batch_multipliers).to(device) original_mean = z.mean() z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() diff --git a/modules/shared.py b/modules/shared.py index beb6f9bb0..e529ec27a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") cmd_opts = parser.parse_args() -cpu = torch.device("cpu") -gpu = torch.device("cuda") -device = gpu if torch.cuda.is_available() else cpu +if torch.has_cuda: + device = torch.device("cuda") +elif torch.has_mps: + device = torch.device("mps") +else: + device = torch.device("cpu") batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram diff --git a/modules/ui.py b/modules/ui.py index f5564d0ef..b1a8c776d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -323,7 +323,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with gr.Group(): switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'Loopback', 'SD upscale'], value='Redraw whole image', type="index", show_label=False) init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil") - init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA") resize_mode = gr.Radio(label="Resize mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) diff --git a/requirements.txt b/requirements.txt index c9e3f2fce..ba1bc2815 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,5 +10,6 @@ omegaconf pytorch_lightning diffusers invisible-watermark +scikit-image git+https://github.com/crowsonkb/k-diffusion.git git+https://github.com/TencentARC/GFPGAN.git diff --git a/requirements_versions.txt b/requirements_versions.txt index 177c6b58e..e8a7470cd 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -8,3 +8,4 @@ torch transformers==4.19.2 omegaconf==2.1.1 pytorch_lightning==1.7.2 +scikit-image==0.19.2 diff --git a/script.js b/script.js index 51ace27fc..f2cd8877e 100644 --- a/script.js +++ b/script.js @@ -172,3 +172,19 @@ function submit(){ } return res } + +window.addEventListener('paste', e => { + const files = e.clipboardData.files; + if (!files || files.length !== 1) { + return; + } + if (!['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type)) { + return; + } + [...gradioApp().querySelectorAll('input[type=file][accept="image/x-png,image/gif,image/jpeg"]')] + .filter(input => !input.matches('.\\!hidden input[type=file]')) + .forEach(input => { + input.files = files; + input.dispatchEvent(new Event('change')) + }); +}); diff --git a/webui.bat b/webui.bat index 055a19b04..0de2ab88d 100644 --- a/webui.bat +++ b/webui.bat @@ -35,7 +35,7 @@ echo Unable to create venv in directory %VENV_DIR% goto :show_stdout_stderr :activate_venv -set PYTHON=%~dp0%VENV_DIR%\Scripts\Python.exe +set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" %PYTHON% --version echo venv %PYTHON% goto :install_torch