Revert "Merge pull request #7931 from space-nuko/img2img-enhance"

This reverts commit 4268759370, reversing
changes made to 1b63afbedc.
This commit is contained in:
AUTOMATIC 2023-03-28 20:36:57 +03:00
parent 4268759370
commit 433b3ab701
7 changed files with 13 additions and 131 deletions

View File

@ -132,14 +132,7 @@ function create_tab_index_args(tabId, args){
function get_img2img_tab_index() {
let res = args_to_array(arguments)
res.splice(-2) // gradio also sends outputs to the arguments, pop them off
res[0] = get_tab_index('mode_img2img')
return res
}
function get_img2img_tab_index_for_res_preview() {
let res = args_to_array(arguments)
res.splice(-1) // gradio also sends outputs to the arguments, pop them off
res.splice(-2)
res[0] = get_tab_index('mode_img2img')
return res
}
@ -368,16 +361,3 @@ function selectCheckpoint(name){
desiredCheckpointName = name;
gradioApp().getElementById('change_checkpoint').click()
}
function onCalcResolutionImg2Img(mode, scale, width, height, resize_mode, init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint){
i2iScale = gradioApp().getElementById('img2img_scale')
i2iWidth = gradioApp().getElementById('img2img_width')
i2iHeight = gradioApp().getElementById('img2img_height')
setInactive(i2iScale, scale == 1)
setInactive(i2iWidth, scale > 1)
setInactive(i2iHeight, scale > 1)
return [];
}

View File

@ -282,9 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0
res["Hires resize-2"] = 0
if "Img2Img upscale" not in res:
res["Img2Img upscale"] = 1
restore_old_hires_fix_params(res)
return res

View File

@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@ -149,8 +149,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
scale=scale,
upscaler=upscaler,
)
p.scripts = modules.scripts.scripts_txt2img

View File

@ -946,7 +946,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@ -966,37 +966,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.mask = None
self.nmask = None
self.image_conditioning = None
self.scale = scale
self.upscaler = upscaler
def get_final_size(self):
if self.scale > 1:
img = self.init_images[0]
width = int(img.width * self.scale)
height = int(img.height * self.scale)
return width, height
else:
return self.width, self.height
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None
if self.scale > 1:
self.extra_generation_params["Img2Img upscale"] = self.scale
# Non-latent upscalers are run before sampling
# Latent upscalers are run during sampling
init_upscaler = None
if self.upscaler is not None:
self.extra_generation_params["Img2Img upscaler"] = self.upscaler
if self.upscaler not in shared.latent_upscale_modes:
assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
init_upscaler = self.upscaler
self.width, self.height = self.get_final_size()
image_mask = self.image_mask
if image_mask is not None:
@ -1019,7 +993,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
@ -1036,7 +1010,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3:
image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
@ -1081,9 +1055,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
if latent_scale_mode is not None:
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
if self.resize_mode == 3:
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
if image_mask is not None:
init_mask = latent_mask

View File

@ -15,7 +15,6 @@ import warnings
import gradio as gr
import gradio.routes
import gradio.utils
from gradio.events import Releaseable
import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
@ -128,26 +127,6 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz
return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images):
init_img = None
if mode in {0, 1, 3, 4}:
init_img = i2i_images[mode]
elif mode == 2:
init_img = i2i_images[mode]["image"]
if not init_img:
return ""
if scale > 1:
width = int(init_img.width * scale)
height = int(init_img.height * scale)
else:
width = resize_x
height = resize_y
return f"resize: from <span class='resolution'>{init_img.width}x{init_img.height}</span> to <span class='resolution'>{width}x{height}</span>"
def apply_styles(prompt, prompt_neg, styles):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
@ -756,7 +735,7 @@ def create_ui():
)
with FormRow():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
for category in ordered_ui_categories():
if category == "sampler":
@ -765,11 +744,6 @@ def create_ui():
elif category == "dimensions":
with FormRow():
with gr.Column(elem_id="img2img_column_size", scale=4):
with FormRow(variant="compact"):
final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False)
with FormRow(variant="compact"):
scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale")
with FormRow(variant="compact"):
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
@ -786,8 +760,6 @@ def create_ui():
with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
with FormRow():
upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed":
@ -841,39 +813,6 @@ def create_ui():
outputs=[inpaint_controls, mask_alpha],
)
img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js
scale, width, height, resize_mode,
init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint]
for input in img2img_resolution_preview_inputs[1:]:
if isinstance(input, Releaseable):
input.release(
fn=calc_resolution_img2img,
_js="get_img2img_tab_index_for_res_preview",
inputs=img2img_resolution_preview_inputs,
outputs=[final_resolution],
show_progress=False,
).success(
None,
_js="onCalcResolutionImg2Img",
inputs=img2img_resolution_preview_inputs,
outputs=[],
show_progress=False,
)
else:
input.change(
fn=calc_resolution_img2img,
_js="get_img2img_tab_index_for_res_preview",
inputs=img2img_resolution_preview_inputs,
outputs=[final_resolution],
show_progress=False,
).success(
None,
_js="onCalcResolutionImg2Img",
inputs=img2img_resolution_preview_inputs,
outputs=[],
show_progress=False,
)
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
@ -922,8 +861,6 @@ def create_ui():
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
scale,
upscaler,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
@ -1009,8 +946,6 @@ def create_ui():
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(scale, "Img2Img upscale"),
(upscaler, "Img2Img upscaler"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),

View File

@ -220,7 +220,6 @@ axis_options = [
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),

View File

@ -287,13 +287,13 @@ button.custom-button{
border-radius: 0 0.5rem 0.5rem 0;
}
#txtimg_hr_finalres, #img2img_finalres {
#txtimg_hr_finalres{
min-height: 0 !important;
padding: .625rem .75rem;
margin-left: -0.75em
}
#txtimg_hr_finalres .resolution, #img2img_finalres .resolution{
#txtimg_hr_finalres .resolution{
font-weight: bold;
}