Merge remote-tracking branch 'origin/master'

This commit is contained in:
AUTOMATIC 2022-09-11 23:25:35 +03:00
commit 81d91cea29
7 changed files with 92 additions and 52 deletions

1
.gitignore vendored
View File

@ -12,5 +12,6 @@ __pycache__
/webui.settings.bat
/embeddings
/styles.csv
/styles.csv.bak
/webui-user.bat
/interrogate

View File

@ -243,16 +243,32 @@ def sanitize_filename_part(text):
return text.replace(' ', '_').translate({ord(x): '' for x in invalid_filename_chars})[:128]
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters'):
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters', process_info=None):
# would be better to add this as an argument in future, but will do for now
is_a_grid = basename != ""
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = f"-{seed}"
file_decoration = opts.samples_filename_format or "[SEED]"
else:
file_decoration = f"-{seed}-{sanitize_filename_part(prompt)[:128]}"
file_decoration = opts.samples_filename_format or "[SEED]-[PROMPT]"
#file_decoration = f"-{seed}-{sanitize_filename_part(prompt)[:128]}"
#Add new filenames tags here
file_decoration = "-" + file_decoration
if seed is not None:
file_decoration = file_decoration.replace("[SEED]", str(seed))
if prompt is not None:
file_decoration = file_decoration.replace("[PROMPT]", sanitize_filename_part(prompt)[:128])
file_decoration = file_decoration.replace("[PROMPT_SPACES]", prompt.translate({ord(x): '' for x in invalid_filename_chars})[:128])
if process_info is not None:
file_decoration = file_decoration.replace("[STEPS]", str(process_info.steps))
file_decoration = file_decoration.replace("[CFG]", str(process_info.cfg_scale))
file_decoration = file_decoration.replace("[WIDTH]", str(process_info.width))
file_decoration = file_decoration.replace("[HEIGHT]", str(process_info.height))
file_decoration = file_decoration.replace("[SAMPLER]", str(process_info.sampler))
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()

View File

@ -275,7 +275,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
image = image.convert('RGB')
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i))
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), process_info = Processed(p, output_images, all_seeds[0], infotext()))
output_images.append(image)

View File

@ -92,6 +92,7 @@ class Options:
data = None
data_labels = {
"samples_filename_format": OptionInfo("", "Samples filename format using following tags: [STEPS],[CFG],[PROMPT],[PROMPT_SPACES],[WIDTH],[HEIGHT],[SAMPLER],[SEED]. Leave blank for default."),
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to two directories below"),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images'),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images'),

View File

@ -1,44 +1,68 @@
# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime
from __future__ import annotations
import csv
import os
import os.path
from collections import namedtuple
import typing
import collections.abc as abc
import tempfile
import shutil
PromptStyle = namedtuple("PromptStyle", ["name", "text"])
if typing.TYPE_CHECKING:
# Only import this when code is being type-checked, it doesn't have any effect at runtime
from .processing import StableDiffusionProcessing
def load_styles(filename):
res = {"None": PromptStyle("None", "")}
class PromptStyle(typing.NamedTuple):
name: str
prompt: str
negative_prompt: str
if os.path.exists(filename):
with open(filename, "r", encoding="utf8", newline='') as file:
def load_styles(path: str) -> dict[str, PromptStyle]:
styles = {"None": PromptStyle("None", "", "")}
if os.path.exists(path):
with open(path, "r", encoding="utf8", newline='') as file:
reader = csv.DictReader(file)
for row in reader:
res[row["name"]] = PromptStyle(row["name"], row["text"])
# Support loading old CSV format with "name, text"-columns
prompt = row["prompt"] if "prompt" in row else row["text"]
negative_prompt = row.get("negative_prompt", "")
styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt)
return res
return styles
def apply_style_text(style_text, prompt):
if style_text == "":
return prompt
return prompt + ", " + style_text if prompt else style_text
def merge_prompts(style_prompt: str, prompt: str) -> str:
parts = filter(None, (prompt.strip(), style_prompt.strip()))
return ", ".join(parts)
def apply_style(p, style):
if type(p.prompt) == list:
p.prompt = [apply_style_text(style.text, x) for x in p.prompt]
def apply_style(processing: StableDiffusionProcessing, style: PromptStyle) -> None:
if isinstance(processing.prompt, list):
processing.prompt = [merge_prompts(style.prompt, p) for p in processing.prompt]
else:
p.prompt = apply_style_text(style.text, p.prompt)
processing.prompt = merge_prompts(style.prompt, processing.prompt)
if isinstance(processing.negative_prompt, list):
processing.negative_prompt = [merge_prompts(style.negative_prompt, p) for p in processing.negative_prompt]
else:
processing.negative_prompt = merge_prompts(style.negative_prompt, processing.negative_prompt)
def save_style(filename, style):
with open(filename, "a", encoding="utf8", newline='') as file:
atstart = file.tell() == 0
def save_styles(path: str, styles: abc.Iterable[PromptStyle]) -> None:
# Write to temporary file first, so we don't nuke the file if something goes wrong
fd, temp_path = tempfile.mkstemp(".csv")
with os.fdopen(fd, "w", encoding="utf8", newline='') as file:
# _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
# and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
writer.writeheader()
writer.writerows(style._asdict() for style in styles)
writer = csv.DictWriter(file, fieldnames=["name", "text"])
if atstart:
writer.writeheader()
writer.writerow({"name": style.name, "text": style.text})
# Always keep a backup file around
if os.path.exists(path):
shutil.move(path, path + ".bak")
shutil.move(temp_path, path)

View File

@ -228,17 +228,17 @@ def create_seed_inputs():
return seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
def add_style(style_name, text):
if style_name is None:
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(style_name, text)
modules.styles.save_style(shared.styles_filename, style)
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
modules.styles.save_styles(shared.styles_filename, shared.prompt_styles.values())
update = {"visible": True, "choices": [k for k, v in shared.prompt_styles.items()], "__type__": "update"}
update = {"visible": True, "choices": list(shared.prompt_styles), "__type__": "update"}
return [update, update]
@ -251,7 +251,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
with gr.Row(elem_id="toprow"):
txt2img_prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1)
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1)
txt2img_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1)
txt2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1)
roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists) > 0)
submit = gr.Button('Generate', elem_id="txt2img_generate", variant='primary')
@ -308,7 +308,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
_js="submit",
inputs=[
txt2img_prompt,
negative_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
steps,
sampler_index,
@ -372,7 +372,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as img2img_interface:
with gr.Row(elem_id="toprow"):
img2img_prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1)
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="img2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1)
img2img_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="img2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1)
img2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1)
img2img_interrogate = gr.Button('Interrogate', elem_id="img2img_interrogate", variant='primary')
submit = gr.Button('Generate', elem_id="img2img_generate", variant='primary')
@ -441,7 +441,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
img2img_save_style = gr.Button('Save prompt as style')
progressbar = gr.HTML(elem_id="progressbar")
style_dummpy = gr.Textbox(visible=False)
with gr.Group():
html_info = gr.HTML()
@ -510,7 +509,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
_js="submit",
inputs=[
img2img_prompt,
negative_prompt,
img2img_negative_prompt,
img2img_prompt_style,
init_img,
init_img_with_mask,
@ -580,11 +579,14 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
]
)
for button, propmt in zip([txt2img_save_style, img2img_save_style], [txt2img_prompt, img2img_prompt]):
dummy_component = gr.Label(visible=False)
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]):
button.click(
fn=add_style,
_js="ask_for_style_name",
inputs=[style_dummpy, propmt],
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style],
)

View File

@ -186,11 +186,7 @@ window.addEventListener('paste', e => {
});
});
function ask_for_style_name(style_name, text){
input = prompt('Style name:');
if (input === null) {
return [null, null]
}
return [input, text]
function ask_for_style_name(_, prompt_text, negative_prompt_text) {
name_ = prompt('Style name:')
return name_ === null ? [null, null, null]: [name_, prompt_text, negative_prompt_text]
}