mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-06-07 21:20:49 +00:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
61785cef65
@ -14,8 +14,11 @@ import modules.images
|
|||||||
|
|
||||||
def load_model(filename):
|
def load_model(filename):
|
||||||
# this code is adapted from https://github.com/xinntao/ESRGAN
|
# this code is adapted from https://github.com/xinntao/ESRGAN
|
||||||
|
if torch.has_mps:
|
||||||
pretrained_net = torch.load(filename)
|
map_l = 'cpu'
|
||||||
|
else:
|
||||||
|
map_l = None
|
||||||
|
pretrained_net = torch.load(filename, map_location=map_l)
|
||||||
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
|
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
|
||||||
|
|
||||||
if 'conv_first.weight' in pretrained_net:
|
if 'conv_first.weight' in pretrained_net:
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import math
|
import math
|
||||||
from PIL import Image
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image, ImageOps, ImageChops
|
||||||
|
|
||||||
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
|
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
|
||||||
from modules.shared import opts, state
|
from modules.shared import opts, state
|
||||||
@ -16,7 +18,9 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
|
|||||||
|
|
||||||
if is_inpaint:
|
if is_inpaint:
|
||||||
image = init_img_with_mask['image']
|
image = init_img_with_mask['image']
|
||||||
mask = init_img_with_mask['mask']
|
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
|
||||||
|
mask = ImageChops.lighter(alpha_mask, init_img_with_mask['mask'].convert('L')).convert('RGBA')
|
||||||
|
image = image.convert('RGB')
|
||||||
else:
|
else:
|
||||||
image = init_img
|
image = init_img
|
||||||
mask = None
|
mask = None
|
||||||
@ -57,8 +61,19 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
|
|||||||
|
|
||||||
state.job_count = n_iter
|
state.job_count = n_iter
|
||||||
|
|
||||||
|
do_color_correction = False
|
||||||
|
try:
|
||||||
|
from skimage import exposure
|
||||||
|
do_color_correction = True
|
||||||
|
except:
|
||||||
|
print("Install scikit-image to perform color correction on loopback")
|
||||||
|
|
||||||
|
|
||||||
for i in range(n_iter):
|
for i in range(n_iter):
|
||||||
|
|
||||||
|
if do_color_correction and i == 0:
|
||||||
|
correction_target = cv2.cvtColor(np.asarray(init_img.copy()), cv2.COLOR_RGB2LAB)
|
||||||
|
|
||||||
p.n_iter = 1
|
p.n_iter = 1
|
||||||
p.batch_size = 1
|
p.batch_size = 1
|
||||||
p.do_not_save_grid = True
|
p.do_not_save_grid = True
|
||||||
@ -70,7 +85,19 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
|
|||||||
initial_seed = processed.seed
|
initial_seed = processed.seed
|
||||||
initial_info = processed.info
|
initial_info = processed.info
|
||||||
|
|
||||||
p.init_images = [processed.images[0]]
|
init_img = processed.images[0]
|
||||||
|
|
||||||
|
if do_color_correction and correction_target is not None:
|
||||||
|
init_img = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
|
||||||
|
cv2.cvtColor(
|
||||||
|
np.asarray(init_img),
|
||||||
|
cv2.COLOR_RGB2LAB
|
||||||
|
),
|
||||||
|
correction_target,
|
||||||
|
channel_axis=2
|
||||||
|
), cv2.COLOR_LAB2RGB).astype("uint8"))
|
||||||
|
|
||||||
|
p.init_images = [init_img]
|
||||||
p.seed = processed.seed + 1
|
p.seed = processed.seed + 1
|
||||||
p.denoising_strength = max(p.denoising_strength * 0.95, 0.1)
|
p.denoising_strength = max(p.denoising_strength * 0.95, 0.1)
|
||||||
history.append(processed.images[0])
|
history.append(processed.images[0])
|
||||||
|
@ -2,9 +2,12 @@ import torch
|
|||||||
|
|
||||||
module_in_gpu = None
|
module_in_gpu = None
|
||||||
cpu = torch.device("cpu")
|
cpu = torch.device("cpu")
|
||||||
gpu = torch.device("cuda")
|
if torch.has_cuda:
|
||||||
device = gpu if torch.cuda.is_available() else cpu
|
device = gpu = torch.device("cuda")
|
||||||
|
elif torch.has_mps:
|
||||||
|
device = gpu = torch.device("mps")
|
||||||
|
else:
|
||||||
|
device = gpu = torch.device("cpu")
|
||||||
|
|
||||||
def setup_for_low_vram(sd_model, use_medvram):
|
def setup_for_low_vram(sd_model, use_medvram):
|
||||||
parents = {}
|
parents = {}
|
||||||
|
@ -232,7 +232,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
z = outputs.last_hidden_state
|
z = outputs.last_hidden_state
|
||||||
|
|
||||||
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
||||||
batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device)
|
batch_multipliers = torch.asarray(batch_multipliers).to(device)
|
||||||
original_mean = z.mean()
|
original_mean = z.mean()
|
||||||
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
||||||
new_mean = z.mean()
|
new_mean = z.mean()
|
||||||
|
@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o
|
|||||||
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
||||||
cmd_opts = parser.parse_args()
|
cmd_opts = parser.parse_args()
|
||||||
|
|
||||||
cpu = torch.device("cpu")
|
if torch.has_cuda:
|
||||||
gpu = torch.device("cuda")
|
device = torch.device("cuda")
|
||||||
device = gpu if torch.cuda.is_available() else cpu
|
elif torch.has_mps:
|
||||||
|
device = torch.device("mps")
|
||||||
|
else:
|
||||||
|
device = torch.device("cpu")
|
||||||
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
|
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
|
||||||
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
|
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
|
||||||
|
|
||||||
|
@ -323,7 +323,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||||||
with gr.Group():
|
with gr.Group():
|
||||||
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'Loopback', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
|
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'Loopback', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
|
||||||
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
|
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
|
||||||
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False)
|
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
|
||||||
resize_mode = gr.Radio(label="Resize mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
|
resize_mode = gr.Radio(label="Resize mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
|
||||||
|
|
||||||
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
|
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
|
||||||
|
@ -10,5 +10,6 @@ omegaconf
|
|||||||
pytorch_lightning
|
pytorch_lightning
|
||||||
diffusers
|
diffusers
|
||||||
invisible-watermark
|
invisible-watermark
|
||||||
|
scikit-image
|
||||||
git+https://github.com/crowsonkb/k-diffusion.git
|
git+https://github.com/crowsonkb/k-diffusion.git
|
||||||
git+https://github.com/TencentARC/GFPGAN.git
|
git+https://github.com/TencentARC/GFPGAN.git
|
||||||
|
@ -8,3 +8,4 @@ torch
|
|||||||
transformers==4.19.2
|
transformers==4.19.2
|
||||||
omegaconf==2.1.1
|
omegaconf==2.1.1
|
||||||
pytorch_lightning==1.7.2
|
pytorch_lightning==1.7.2
|
||||||
|
scikit-image==0.19.2
|
||||||
|
16
script.js
16
script.js
@ -172,3 +172,19 @@ function submit(){
|
|||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
window.addEventListener('paste', e => {
|
||||||
|
const files = e.clipboardData.files;
|
||||||
|
if (!files || files.length !== 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
[...gradioApp().querySelectorAll('input[type=file][accept="image/x-png,image/gif,image/jpeg"]')]
|
||||||
|
.filter(input => !input.matches('.\\!hidden input[type=file]'))
|
||||||
|
.forEach(input => {
|
||||||
|
input.files = files;
|
||||||
|
input.dispatchEvent(new Event('change'))
|
||||||
|
});
|
||||||
|
});
|
||||||
|
@ -35,7 +35,7 @@ echo Unable to create venv in directory %VENV_DIR%
|
|||||||
goto :show_stdout_stderr
|
goto :show_stdout_stderr
|
||||||
|
|
||||||
:activate_venv
|
:activate_venv
|
||||||
set PYTHON=%~dp0%VENV_DIR%\Scripts\Python.exe
|
set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe"
|
||||||
%PYTHON% --version
|
%PYTHON% --version
|
||||||
echo venv %PYTHON%
|
echo venv %PYTHON%
|
||||||
goto :install_torch
|
goto :install_torch
|
||||||
|
Loading…
Reference in New Issue
Block a user