diff --git a/.gitignore b/.gitignore index 78cf719e2..fa3329f49 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ __pycache__ /log /webui.settings.bat /embeddings +/styles.csv diff --git a/modules/img2img.py b/modules/img2img.py index 008e86882..8da2d80e3 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -11,7 +11,7 @@ from modules.ui import plaintext_to_html import modules.images as images import modules.scripts -def img2img(prompt: str, negative_prompt: str, init_img, init_img_with_mask, init_mask, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, mode: int, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, denoising_strength_change_factor: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, resize_mode: int, upscaler_index: str, upscale_overlap: int, inpaint_full_res: bool, inpainting_mask_invert: int, *args): +def img2img(prompt: str, negative_prompt: str, prompt_style: int, init_img, init_img_with_mask, init_mask, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, mode: int, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, denoising_strength_change_factor: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, resize_mode: int, upscaler_index: str, upscale_overlap: int, inpaint_full_res: bool, inpainting_mask_invert: int, *args): is_inpaint = mode == 1 is_loopback = mode == 2 is_upscale = mode == 3 @@ -38,6 +38,7 @@ def img2img(prompt: str, negative_prompt: str, init_img, init_img_with_mask, ini outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids, prompt=prompt, negative_prompt=negative_prompt, + prompt_style=prompt_style, seed=seed, subseed=subseed, subseed_strength=subseed_strength, diff --git a/modules/processing.py b/modules/processing.py index 09680fbf7..7e6cd8ee6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -16,6 +16,7 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.face_restoration import modules.images as images +import modules.styles # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -29,13 +30,14 @@ def torch_gc(): class StableDiffusionProcessing: - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", prompt_style=0, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None): self.sd_model = sd_model self.outpath_samples: str = outpath_samples self.outpath_grids: str = outpath_grids self.prompt: str = prompt self.prompt_for_display: str = None self.negative_prompt: str = (negative_prompt or "") + self.prompt_style: int = prompt_style self.seed: int = seed self.subseed: int = subseed self.subseed_strength: float = subseed_strength @@ -154,8 +156,6 @@ def fix_seed(p): def process_images(p: StableDiffusionProcessing) -> Processed: """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch""" - prompt = p.prompt - assert p.prompt is not None torch_gc() @@ -168,10 +168,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed: comments = [] - if type(prompt) == list: - all_prompts = prompt + modules.styles.apply_style(p, shared.prompt_styles[p.prompt_style]) + + if type(p.prompt) == list: + all_prompts = p.prompt else: - all_prompts = p.batch_size * p.n_iter * [prompt] + all_prompts = p.batch_size * p.n_iter * [p.prompt] if type(p.seed) == list: all_seeds = int(p.seed) @@ -207,7 +209,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else "" - return f"{p.prompt_for_display or prompt}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments]) + return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments]) if os.path.exists(cmd_opts.embeddings_dir): model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model) diff --git a/modules/shared.py b/modules/shared.py index e577332d1..5985d09e3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -9,7 +9,7 @@ import tqdm import modules.artists from modules.paths import script_path, sd_path -import modules.codeformer_model +import modules.styles config_filename = "config.json" @@ -75,8 +75,10 @@ state = State() artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv')) -face_restorers = [] +styles_filename = os.path.join(script_path, 'styles.csv') +prompt_styles = modules.styles.load_styles(styles_filename) +face_restorers = [] def find_any_font(): fonts = ['/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf'] diff --git a/modules/styles.py b/modules/styles.py new file mode 100644 index 000000000..58fb7d751 --- /dev/null +++ b/modules/styles.py @@ -0,0 +1,41 @@ +import csv +import os.path +from collections import namedtuple + +PromptStyle = namedtuple("PromptStyle", ["name", "text"]) + + +def load_styles(filename): + res = {"None": PromptStyle("None", "")} + + if os.path.exists(filename): + with open(filename, "r", encoding="utf8", newline='') as file: + reader = csv.DictReader(file) + + for row in reader: + res[row["name"]] = PromptStyle(row["name"], row["text"]) + + return res + + +def apply_style_text(style_text, prompt): + return prompt + ", " + style_text if prompt else style_text + + +def apply_style(p, style): + if type(p.prompt) == list: + p.prompt = [apply_style_text(style.text, x) for x in p.prompt] + else: + p.prompt = apply_style_text(style.text, p.prompt) + + +def save_style(filename, style): + with open(filename, "a", encoding="utf8", newline='') as file: + atstart = file.tell() == 0 + + writer = csv.DictWriter(file, fieldnames=["name", "text"]) + + if atstart: + writer.writeheader() + + writer.writerow({"name": style.name, "text": style.text}) diff --git a/modules/txt2img.py b/modules/txt2img.py index 606421eab..070bd0943 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -6,12 +6,13 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: int, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, prompt=prompt, + prompt_style=prompt_style, negative_prompt=negative_prompt, seed=seed, subseed=subseed, diff --git a/modules/ui.py b/modules/ui.py index 65076edbd..63ae62ab5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -25,6 +25,7 @@ import modules.realesrgan_model as realesrgan import modules.scripts import modules.gfpgan_model import modules.codeformer_model +import modules.styles # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -226,11 +227,26 @@ def create_seed_inputs(): return seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w +def add_style(style_name, text): + if style_name is None: + return [gr_show(), gr_show()] + + style = modules.styles.PromptStyle(style_name, text) + + modules.styles.save_style(shared.styles_filename, style) + + shared.prompt_styles[style.name] = style + + update = {"visible": True, "choices": [k for k, v in shared.prompt_styles.items()], "__type__": "update"} + return [update, update] + + def create_ui(txt2img, img2img, run_extras, run_pnginfo): with gr.Blocks(analytics_enabled=False) as txt2img_interface: with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1) + txt2img_prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1) negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=cmd_opts.show_negative_prompt) + txt2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1) roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists) > 0) submit = gr.Button('Generate', elem_id="txt2img_generate", variant='primary') check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False) @@ -272,6 +288,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): send_to_inpaint = gr.Button('Send to inpaint') send_to_extras = gr.Button('Send to extras') interrupt = gr.Button('Interrupt') + txt2img_save_style = gr.Button('Save prompt as style') progressbar = gr.HTML(elem_id="progressbar") @@ -284,8 +301,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): fn=txt2img, _js="submit", inputs=[ - prompt, + txt2img_prompt, negative_prompt, + txt2img_prompt_style, steps, sampler_index, restore_faces, @@ -305,7 +323,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): ] ) - prompt.submit(**txt2img_args) + txt2img_prompt.submit(**txt2img_args) submit.click(**txt2img_args) check_progress.click( @@ -338,18 +356,19 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): roll.click( fn=roll_artist, inputs=[ - prompt, + txt2img_prompt, ], outputs=[ - prompt + txt2img_prompt, ] ) with gr.Blocks(analytics_enabled=False) as img2img_interface: with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1) + img2img_prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1) negative_prompt = gr.Textbox(label="Negative prompt", elem_id="img2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=cmd_opts.show_negative_prompt) + img2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1) submit = gr.Button('Generate', elem_id="img2img_generate", variant='primary') check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False) @@ -413,8 +432,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): img2img_send_to_inpaint = gr.Button('Send to inpaint') img2img_send_to_extras = gr.Button('Send to extras') interrupt = gr.Button('Interrupt') + img2img_save_style = gr.Button('Save prompt as style') progressbar = gr.HTML(elem_id="progressbar") + style_dummpy = gr.Textbox(visible=False) with gr.Group(): html_info = gr.HTML() @@ -480,8 +501,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): fn=img2img, _js="submit", inputs=[ - prompt, + img2img_prompt, negative_prompt, + img2img_prompt_style, init_img, init_img_with_mask, init_mask, @@ -515,7 +537,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): ] ) - prompt.submit(**img2img_args) + img2img_prompt.submit(**img2img_args) submit.click(**img2img_args) check_progress.click( @@ -572,6 +594,14 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): outputs=[init_img_with_mask], ) + for button, propmt in zip([txt2img_save_style, img2img_save_style], [txt2img_prompt, img2img_prompt]): + button.click( + fn=add_style, + _js="ask_for_style_name", + inputs=[style_dummpy, propmt], + outputs=[txt2img_prompt_style, img2img_prompt_style], + ) + with gr.Blocks(analytics_enabled=False) as extras_interface: with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): diff --git a/script.js b/script.js index ed3765085..7637089e8 100644 --- a/script.js +++ b/script.js @@ -194,3 +194,12 @@ window.addEventListener('paste', e => { input.dispatchEvent(new Event('change')) }); }); + +function ask_for_style_name(style_name, text){ + input = prompt('Style name:'); + if (input === null) { + return [null, null] + } + + return [input, text] +} diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index 8d4a4e7e4..f8da0fdd3 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -74,7 +74,6 @@ class Script(scripts.Script): p.prompt = all_prompts p.prompt_for_display = original_prompt - p.seed = len(all_prompts) * [seed] processed = process_images(p) grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) @@ -82,6 +81,6 @@ class Script(scripts.Script): processed.images.insert(0, grid) if opts.grid_save: - images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=seed) + images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed) return processed diff --git a/style.css b/style.css index 57b3665de..bebd0a539 100644 --- a/style.css +++ b/style.css @@ -19,6 +19,18 @@ max-width: 4em; } +#style_index{ + min-width: 9em; + max-width: 9em; + padding-left: 0; + padding-right: 0; +} + +#component-1 div{ + border: none; + gap: 0; +} + #resize_mode{ flex: 1.5; } @@ -28,9 +40,12 @@ button{ } #img2img_prompt, #txt2img_prompt, #img2img_negative_prompt, #txt2img_negative_prompt{ - padding: 0; border: none !important; } +#img2img_prompt textarea, #txt2img_prompt textarea, #img2img_negative_prompt textarea, #txt2img_negative_prompt textarea{ + border: none !important; +} + #img2maskimg .h-60{ height: 30rem;