mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-06-07 21:20:49 +00:00
Merge branch 'AUTOMATIC1111:master' into master
This commit is contained in:
commit
272d979d1c
23
launch.py
23
launch.py
@ -94,6 +94,15 @@ def prepare_enviroment():
|
|||||||
|
|
||||||
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
|
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
|
||||||
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
|
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
|
||||||
|
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26")
|
||||||
|
|
||||||
|
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
|
||||||
|
|
||||||
|
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
|
||||||
|
taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
|
||||||
|
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
|
||||||
|
codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
|
||||||
|
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
|
||||||
|
|
||||||
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
|
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
|
||||||
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
|
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
|
||||||
@ -131,23 +140,23 @@ def prepare_enviroment():
|
|||||||
|
|
||||||
if (not is_installed("xformers") or reinstall_xformers) and xformers and platform.python_version().startswith("3.10"):
|
if (not is_installed("xformers") or reinstall_xformers) and xformers and platform.python_version().startswith("3.10"):
|
||||||
if platform.system() == "Windows":
|
if platform.system() == "Windows":
|
||||||
run_pip("install -U -I --no-deps https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
|
run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
|
||||||
elif platform.system() == "Linux":
|
elif platform.system() == "Linux":
|
||||||
run_pip("install xformers", "xformers")
|
run_pip("install xformers", "xformers")
|
||||||
|
|
||||||
if not is_installed("deepdanbooru") and deepdanbooru:
|
if not is_installed("deepdanbooru") and deepdanbooru:
|
||||||
run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
|
run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
|
||||||
|
|
||||||
if not is_installed("pyngrok") and ngrok:
|
if not is_installed("pyngrok") and ngrok:
|
||||||
run_pip("install pyngrok", "ngrok")
|
run_pip("install pyngrok", "ngrok")
|
||||||
|
|
||||||
os.makedirs(dir_repos, exist_ok=True)
|
os.makedirs(dir_repos, exist_ok=True)
|
||||||
|
|
||||||
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
|
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
|
||||||
git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
|
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
|
||||||
git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
|
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
|
||||||
git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
|
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
|
||||||
git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
|
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
|
||||||
|
|
||||||
if not is_installed("lpips"):
|
if not is_installed("lpips"):
|
||||||
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
|
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
|
||||||
|
@ -20,12 +20,13 @@ import gradio as gr
|
|||||||
cached_images = {}
|
cached_images = {}
|
||||||
|
|
||||||
|
|
||||||
def run_extras(extras_mode, resize_mode, image, image_folder, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
|
|
||||||
imageArr = []
|
imageArr = []
|
||||||
# Also keep track of original file names
|
# Also keep track of original file names
|
||||||
imageNameArr = []
|
imageNameArr = []
|
||||||
|
outputs = []
|
||||||
|
|
||||||
if extras_mode == 1:
|
if extras_mode == 1:
|
||||||
#convert file to pillow image
|
#convert file to pillow image
|
||||||
@ -33,13 +34,26 @@ def run_extras(extras_mode, resize_mode, image, image_folder, gfpgan_visibility,
|
|||||||
image = Image.open(img)
|
image = Image.open(img)
|
||||||
imageArr.append(image)
|
imageArr.append(image)
|
||||||
imageNameArr.append(os.path.splitext(img.orig_name)[0])
|
imageNameArr.append(os.path.splitext(img.orig_name)[0])
|
||||||
|
elif extras_mode == 2:
|
||||||
|
assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
|
||||||
|
|
||||||
|
if input_dir == '':
|
||||||
|
return outputs, "Please select an input directory.", ''
|
||||||
|
image_list = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
|
||||||
|
for img in image_list:
|
||||||
|
image = Image.open(img)
|
||||||
|
imageArr.append(image)
|
||||||
|
imageNameArr.append(img)
|
||||||
else:
|
else:
|
||||||
imageArr.append(image)
|
imageArr.append(image)
|
||||||
imageNameArr.append(None)
|
imageNameArr.append(None)
|
||||||
|
|
||||||
|
if extras_mode == 2 and output_dir != '':
|
||||||
|
outpath = output_dir
|
||||||
|
else:
|
||||||
outpath = opts.outdir_samples or opts.outdir_extras_samples
|
outpath = opts.outdir_samples or opts.outdir_extras_samples
|
||||||
|
|
||||||
outputs = []
|
|
||||||
for image, image_name in zip(imageArr, imageNameArr):
|
for image, image_name in zip(imageArr, imageNameArr):
|
||||||
if image is None:
|
if image is None:
|
||||||
return outputs, "Please select an input image.", ''
|
return outputs, "Please select an input image.", ''
|
||||||
@ -112,6 +126,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, gfpgan_visibility,
|
|||||||
image.info = existing_pnginfo
|
image.info = existing_pnginfo
|
||||||
image.info["extras"] = info
|
image.info["extras"] = info
|
||||||
|
|
||||||
|
if extras_mode != 2 or show_extras_results :
|
||||||
outputs.append(image)
|
outputs.append(image)
|
||||||
|
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
from pyngrok import ngrok, conf, exception
|
from pyngrok import ngrok, conf, exception
|
||||||
|
|
||||||
|
|
||||||
def connect(token, port):
|
def connect(token, port, region):
|
||||||
if token == None:
|
if token == None:
|
||||||
token = 'None'
|
token = 'None'
|
||||||
conf.get_default().auth_token = token
|
config = conf.PyngrokConfig(
|
||||||
|
auth_token=token, region=region
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
public_url = ngrok.connect(port).public_url
|
public_url = ngrok.connect(port, pyngrok_config=config).public_url
|
||||||
except exception.PyngrokNgrokError:
|
except exception.PyngrokNgrokError:
|
||||||
print(f'Invalid ngrok authtoken, ngrok connection aborted.\n'
|
print(f'Invalid ngrok authtoken, ngrok connection aborted.\n'
|
||||||
f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
|
f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
|
||||||
|
@ -53,7 +53,7 @@ def get_correct_sampler(p):
|
|||||||
return sd_samplers.samplers_for_img2img
|
return sd_samplers.samplers_for_img2img
|
||||||
|
|
||||||
class StableDiffusionProcessing:
|
class StableDiffusionProcessing:
|
||||||
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
|
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None, do_not_reload_embeddings=False):
|
||||||
self.sd_model = sd_model
|
self.sd_model = sd_model
|
||||||
self.outpath_samples: str = outpath_samples
|
self.outpath_samples: str = outpath_samples
|
||||||
self.outpath_grids: str = outpath_grids
|
self.outpath_grids: str = outpath_grids
|
||||||
@ -80,6 +80,7 @@ class StableDiffusionProcessing:
|
|||||||
self.extra_generation_params: dict = extra_generation_params or {}
|
self.extra_generation_params: dict = extra_generation_params or {}
|
||||||
self.overlay_images = overlay_images
|
self.overlay_images = overlay_images
|
||||||
self.eta = eta
|
self.eta = eta
|
||||||
|
self.do_not_reload_embeddings = do_not_reload_embeddings
|
||||||
self.paste_to = None
|
self.paste_to = None
|
||||||
self.color_corrections = None
|
self.color_corrections = None
|
||||||
self.denoising_strength: float = 0
|
self.denoising_strength: float = 0
|
||||||
@ -364,7 +365,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
|||||||
def infotext(iteration=0, position_in_batch=0):
|
def infotext(iteration=0, position_in_batch=0):
|
||||||
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
|
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
|
||||||
|
|
||||||
if os.path.exists(cmd_opts.embeddings_dir):
|
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
|
||||||
model_hijack.embedding_db.load_textual_inversion_embeddings()
|
model_hijack.embedding_db.load_textual_inversion_embeddings()
|
||||||
|
|
||||||
infotexts = []
|
infotexts = []
|
||||||
|
@ -40,6 +40,7 @@ parser.add_argument("--unload-gfpgan", action='store_true', help="does not do an
|
|||||||
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
|
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
|
||||||
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
|
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
|
||||||
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
|
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
|
||||||
|
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
|
||||||
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
|
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
|
||||||
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
|
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
|
||||||
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
|
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
|
||||||
|
@ -296,6 +296,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
|
|||||||
sd_model=shared.sd_model,
|
sd_model=shared.sd_model,
|
||||||
do_not_save_grid=True,
|
do_not_save_grid=True,
|
||||||
do_not_save_samples=True,
|
do_not_save_samples=True,
|
||||||
|
do_not_reload_embeddings=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if preview_from_txt2img:
|
if preview_from_txt2img:
|
||||||
|
@ -56,7 +56,7 @@ if not cmd_opts.share and not cmd_opts.listen:
|
|||||||
if cmd_opts.ngrok != None:
|
if cmd_opts.ngrok != None:
|
||||||
import modules.ngrok as ngrok
|
import modules.ngrok as ngrok
|
||||||
print('ngrok authtoken detected, trying to connect...')
|
print('ngrok authtoken detected, trying to connect...')
|
||||||
ngrok.connect(cmd_opts.ngrok, cmd_opts.port if cmd_opts.port != None else 7860)
|
ngrok.connect(cmd_opts.ngrok, cmd_opts.port if cmd_opts.port != None else 7860, cmd_opts.ngrok_region)
|
||||||
|
|
||||||
|
|
||||||
def gr_show(visible=True):
|
def gr_show(visible=True):
|
||||||
@ -508,9 +508,11 @@ def create_toprow(is_img2img):
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column(scale=1, elem_id="style_pos_col"):
|
with gr.Column(scale=1, elem_id="style_pos_col"):
|
||||||
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
|
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
|
||||||
|
prompt_style.save_to_config = True
|
||||||
|
|
||||||
with gr.Column(scale=1, elem_id="style_neg_col"):
|
with gr.Column(scale=1, elem_id="style_neg_col"):
|
||||||
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
|
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
|
||||||
|
prompt_style2.save_to_config = True
|
||||||
|
|
||||||
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
|
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
|
||||||
|
|
||||||
@ -566,6 +568,24 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
import modules.img2img
|
import modules.img2img
|
||||||
import modules.txt2img
|
import modules.txt2img
|
||||||
|
|
||||||
|
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
|
||||||
|
def refresh():
|
||||||
|
refresh_method()
|
||||||
|
args = refreshed_args() if callable(refreshed_args) else refreshed_args
|
||||||
|
|
||||||
|
for k, v in args.items():
|
||||||
|
setattr(refresh_component, k, v)
|
||||||
|
|
||||||
|
return gr.update(**(args or {}))
|
||||||
|
|
||||||
|
refresh_button = gr.Button(value=refresh_symbol, elem_id=elem_id)
|
||||||
|
refresh_button.click(
|
||||||
|
fn = refresh,
|
||||||
|
inputs = [],
|
||||||
|
outputs = [refresh_component]
|
||||||
|
)
|
||||||
|
return refresh_button
|
||||||
|
|
||||||
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
|
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
|
||||||
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
|
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
|
||||||
dummy_component = gr.Label(visible=False)
|
dummy_component = gr.Label(visible=False)
|
||||||
@ -1016,6 +1036,15 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
with gr.TabItem('Batch Process'):
|
with gr.TabItem('Batch Process'):
|
||||||
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
|
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
|
||||||
|
|
||||||
|
with gr.TabItem('Batch from Directory'):
|
||||||
|
extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs,
|
||||||
|
placeholder="A directory on the same machine where the server is running."
|
||||||
|
)
|
||||||
|
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs,
|
||||||
|
placeholder="Leave blank to save images to the default path."
|
||||||
|
)
|
||||||
|
show_extras_results = gr.Checkbox(label='Show result images', value=True)
|
||||||
|
|
||||||
with gr.Tabs(elem_id="extras_resize_mode"):
|
with gr.Tabs(elem_id="extras_resize_mode"):
|
||||||
with gr.TabItem('Scale by'):
|
with gr.TabItem('Scale by'):
|
||||||
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
|
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
|
||||||
@ -1060,6 +1089,9 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
dummy_component,
|
dummy_component,
|
||||||
extras_image,
|
extras_image,
|
||||||
image_batch,
|
image_batch,
|
||||||
|
extras_batch_input_dir,
|
||||||
|
extras_batch_output_dir,
|
||||||
|
show_extras_results,
|
||||||
gfpgan_visibility,
|
gfpgan_visibility,
|
||||||
codeformer_visibility,
|
codeformer_visibility,
|
||||||
codeformer_weight,
|
codeformer_weight,
|
||||||
@ -1191,8 +1223,12 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
|
|
||||||
with gr.Tab(label="Train"):
|
with gr.Tab(label="Train"):
|
||||||
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 1:1 ratio images</p>")
|
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 1:1 ratio images</p>")
|
||||||
|
with gr.Row():
|
||||||
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
|
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
|
||||||
|
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
|
||||||
|
with gr.Row():
|
||||||
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()])
|
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()])
|
||||||
|
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
|
||||||
learn_rate = gr.Textbox(label='Learning rate', placeholder="Learning rate", value="0.005")
|
learn_rate = gr.Textbox(label='Learning rate', placeholder="Learning rate", value="0.005")
|
||||||
batch_size = gr.Number(label='Batch size', value=1, precision=0)
|
batch_size = gr.Number(label='Batch size', value=1, precision=0)
|
||||||
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
|
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
|
||||||
@ -1343,26 +1379,11 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
if info.refresh is not None:
|
if info.refresh is not None:
|
||||||
if is_quicksettings:
|
if is_quicksettings:
|
||||||
res = comp(label=info.label, value=fun, **(args or {}))
|
res = comp(label=info.label, value=fun, **(args or {}))
|
||||||
refresh_button = gr.Button(value=refresh_symbol, elem_id="refresh_"+key)
|
refresh_button = create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
|
||||||
else:
|
else:
|
||||||
with gr.Row(variant="compact"):
|
with gr.Row(variant="compact"):
|
||||||
res = comp(label=info.label, value=fun, **(args or {}))
|
res = comp(label=info.label, value=fun, **(args or {}))
|
||||||
refresh_button = gr.Button(value=refresh_symbol, elem_id="refresh_" + key)
|
refresh_button = create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
|
||||||
|
|
||||||
def refresh():
|
|
||||||
info.refresh()
|
|
||||||
refreshed_args = info.component_args() if callable(info.component_args) else info.component_args
|
|
||||||
|
|
||||||
for k, v in refreshed_args.items():
|
|
||||||
setattr(res, k, v)
|
|
||||||
|
|
||||||
return gr.update(**(refreshed_args or {}))
|
|
||||||
|
|
||||||
refresh_button.click(
|
|
||||||
fn=refresh,
|
|
||||||
inputs=[],
|
|
||||||
outputs=[res],
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
res = comp(label=info.label, value=fun, **(args or {}))
|
res = comp(label=info.label, value=fun, **(args or {}))
|
||||||
|
|
||||||
@ -1703,7 +1724,9 @@ Requested path was: {f}
|
|||||||
saved_value = ui_settings.get(key, None)
|
saved_value = ui_settings.get(key, None)
|
||||||
if saved_value is None:
|
if saved_value is None:
|
||||||
ui_settings[key] = getattr(obj, field)
|
ui_settings[key] = getattr(obj, field)
|
||||||
elif condition is None or condition(saved_value):
|
elif condition and not condition(saved_value):
|
||||||
|
print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
|
||||||
|
else:
|
||||||
setattr(obj, field, saved_value)
|
setattr(obj, field, saved_value)
|
||||||
|
|
||||||
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
|
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
|
||||||
@ -1727,6 +1750,11 @@ Requested path was: {f}
|
|||||||
if type(x) == gr.Number:
|
if type(x) == gr.Number:
|
||||||
apply_field(x, 'value')
|
apply_field(x, 'value')
|
||||||
|
|
||||||
|
# Since there are many dropdowns that shouldn't be saved,
|
||||||
|
# we only mark dropdowns that should be saved.
|
||||||
|
if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False):
|
||||||
|
apply_field(x, 'value', lambda val: val in x.choices)
|
||||||
|
|
||||||
visit(txt2img_interface, loadsave, "txt2img")
|
visit(txt2img_interface, loadsave, "txt2img")
|
||||||
visit(img2img_interface, loadsave, "img2img")
|
visit(img2img_interface, loadsave, "img2img")
|
||||||
visit(extras_interface, loadsave, "extras")
|
visit(extras_interface, loadsave, "extras")
|
||||||
|
@ -478,7 +478,7 @@ input[type="range"]{
|
|||||||
padding: 0;
|
padding: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork{
|
#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name{
|
||||||
max-width: 2.5em;
|
max-width: 2.5em;
|
||||||
min-width: 2.5em;
|
min-width: 2.5em;
|
||||||
height: 2.4em;
|
height: 2.4em;
|
||||||
|
Loading…
Reference in New Issue
Block a user