From 1742c04bab5ff88b53dee60cfb9b90076dd98512 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 17:10:13 +0100 Subject: [PATCH 001/160] Add polling callback --- modules/script_callbacks.py | 12 ++++++++++++ webui.py | 1 + 2 files changed, 13 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 8e22f8755..715e1830d 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -63,6 +63,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], + callbacks_on_polling=[], ) @@ -78,6 +79,12 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'app_started_callback') +def app_polling_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_polling']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_polling') def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: @@ -184,6 +191,11 @@ def on_app_started(callback): add_callback(callback_map['callbacks_app_started'], callback) +def on_polling(callback): + """register a function to be called on each polling of the server.""" + add_callback(callback_map['callbacks_on_polling'], callback) + + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument""" diff --git a/webui.py b/webui.py index c2d0c6be8..5f7a53f9c 100644 --- a/webui.py +++ b/webui.py @@ -106,6 +106,7 @@ def create_api(app): def wait_on_server(demo=None): while 1: time.sleep(0.5) + modules.script_callbacks.app_polling_callback(None, demo) if shared.state.need_restart: shared.state.need_restart = False time.sleep(0.5) From 0c8825b2bec3a68836eacf37718306c9c78554a0 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 18:31:20 +0100 Subject: [PATCH 002/160] Add a callback called before reloading the server --- modules/script_callbacks.py | 13 ++++++++++++- webui.py | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 715e1830d..b646b0f97 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -64,6 +64,7 @@ callback_map = dict( callbacks_before_component=[], callbacks_after_component=[], callbacks_on_polling=[], + callbacks_on_reload=[], ) @@ -71,7 +72,6 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() - def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -86,6 +86,14 @@ def app_polling_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'callbacks_on_polling') +def app_reload_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_reload']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_reload') + + def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: try: @@ -195,6 +203,9 @@ def on_polling(callback): """register a function to be called on each polling of the server.""" add_callback(callback_map['callbacks_on_polling'], callback) +def on_before_reload(callback): + """register a function to be called just before the server reloads.""" + add_callback(callback_map['callbacks_on_reload'], callback) def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is diff --git a/webui.py b/webui.py index 5f7a53f9c..dda34249b 100644 --- a/webui.py +++ b/webui.py @@ -108,6 +108,7 @@ def wait_on_server(demo=None): time.sleep(0.5) modules.script_callbacks.app_polling_callback(None, demo) if shared.state.need_restart: + modules.script_callbacks.app_reload_callback(None, demo) shared.state.need_restart = False time.sleep(0.5) demo.close() From b921a52071cf2a5e551c31a6073af6eaebbf7847 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 28 Jan 2023 03:19:10 -0600 Subject: [PATCH 003/160] basic image next and prev control with joystick --- javascript/imageviewer.js | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 67916536e..0488bfb43 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -210,6 +210,19 @@ document.addEventListener("DOMContentLoaded", function() { modal.id = "lightboxModal"; modal.tabIndex = 0 modal.addEventListener('keydown', modalKeyHandler, true) + window.addEventListener('gamepadconnected', (e) => { + console.log("Gamepad connected!") + const gamepad = e.gamepad; + setInterval(() => { + const xValue = gamepad.axes[0].toFixed(2); + if (xValue < -0.3) { + modalPrevImage(); + } else if (xValue > 0.3) { + modalNextImage(); + } + + }, 350); + }); const modalControls = document.createElement('div') modalControls.className = 'modalControls gradio-container'; From 3662a274e2b6482c4ad831cc2d7976d919b40212 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 17:10:13 +0100 Subject: [PATCH 004/160] Add polling callback --- modules/script_callbacks.py | 12 ++++++++++++ webui.py | 1 + 2 files changed, 13 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 4bb45ec74..7763936f7 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -74,6 +74,7 @@ callback_map = dict( callbacks_infotext_pasted=[], callbacks_script_unloaded=[], callbacks_before_ui=[], + callbacks_on_polling=[], ) @@ -89,6 +90,12 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'app_started_callback') +def app_polling_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_polling']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_polling') def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: @@ -227,6 +234,11 @@ def on_app_started(callback): add_callback(callback_map['callbacks_app_started'], callback) +def on_polling(callback): + """register a function to be called on each polling of the server.""" + add_callback(callback_map['callbacks_on_polling'], callback) + + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument; this function is also called when the script is reloaded. """ diff --git a/webui.py b/webui.py index 5b5c21399..6c2b511cf 100644 --- a/webui.py +++ b/webui.py @@ -171,6 +171,7 @@ def create_api(app): def wait_on_server(demo=None): while 1: time.sleep(0.5) + modules.script_callbacks.app_polling_callback(None, demo) if shared.state.need_restart: shared.state.need_restart = False time.sleep(0.5) From eb5eb8aa117c3d9ff9c55c59bc589ad8e983919e Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 18:31:20 +0100 Subject: [PATCH 005/160] Add a callback called before reloading the server --- modules/script_callbacks.py | 13 ++++++++++++- webui.py | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 7763936f7..91fd21f43 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -75,6 +75,7 @@ callback_map = dict( callbacks_script_unloaded=[], callbacks_before_ui=[], callbacks_on_polling=[], + callbacks_on_reload=[], ) @@ -82,7 +83,6 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() - def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -97,6 +97,14 @@ def app_polling_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'callbacks_on_polling') +def app_reload_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_reload']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_reload') + + def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: try: @@ -238,6 +246,9 @@ def on_polling(callback): """register a function to be called on each polling of the server.""" add_callback(callback_map['callbacks_on_polling'], callback) +def on_before_reload(callback): + """register a function to be called just before the server reloads.""" + add_callback(callback_map['callbacks_on_reload'], callback) def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is diff --git a/webui.py b/webui.py index 6c2b511cf..ddccf870e 100644 --- a/webui.py +++ b/webui.py @@ -173,6 +173,7 @@ def wait_on_server(demo=None): time.sleep(0.5) modules.script_callbacks.app_polling_callback(None, demo) if shared.state.need_restart: + modules.script_callbacks.app_reload_callback(None, demo) shared.state.need_restart = False time.sleep(0.5) demo.close() From bfa14db2cb0e266121317e3624b93708b29f2e88 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 7 Feb 2023 16:54:12 -0600 Subject: [PATCH 006/160] enable gallery scrolling functionality for horizontal scroll and gamepads --- javascript/imageviewer.js | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 3f7b12894..e5355d0be 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -219,18 +219,37 @@ document.addEventListener("DOMContentLoaded", function() { modal.id = "lightboxModal"; modal.tabIndex = 0 modal.addEventListener('keydown', modalKeyHandler, true) + + let delay = 350//ms window.addEventListener('gamepadconnected', (e) => { console.log("Gamepad connected!") const gamepad = e.gamepad; setInterval(() => { const xValue = gamepad.axes[0].toFixed(2); if (xValue < -0.3) { - modalPrevImage(); + modalPrevImage(e); } else if (xValue > 0.3) { - modalNextImage(); + modalNextImage(e); } - }, 350); + }, delay); + }); + + + let isScrolling = false; + window.addEventListener('wheel', (e) => { + if (isScrolling) return; + isScrolling = true; + + if (e.deltaX <= -0.6) { + modalPrevImage(e); + } else if (e.deltaX >= 0.6) { + modalNextImage(e); + } + + setTimeout(() => { + isScrolling = false; + }, delay); }); const modalControls = document.createElement('div') From d78c4375833f063370fd40eab7b5322455c93683 Mon Sep 17 00:00:00 2001 From: ParityError Date: Sun, 12 Mar 2023 12:41:27 -0700 Subject: [PATCH 007/160] Update webui-user.sh Installation should not be assumed to be located within ~/home directory. User should expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. --- webui-user.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-user.sh b/webui-user.sh index bfa53cb7c..74e8800c4 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -4,7 +4,7 @@ ######################################################### # Install directory without trailing slash -#install_dir="/home/$(whoami)" +#install_dir="$(pwd)" # Name of the subdirectory #clone_dir="stable-diffusion-webui" From 6439e72df2cd2126734ce4a69c277502c4c9982d Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Sun, 12 Mar 2023 15:08:26 -0700 Subject: [PATCH 008/160] Update webui.sh Installation should not be assumed to be located within ~/home directory. User should be expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. See issue #8534 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 8cdad22d3..b01e4fa95 100755 --- a/webui.sh +++ b/webui.sh @@ -23,7 +23,7 @@ fi # Install directory without trailing slash if [[ -z "${install_dir}" ]] then - install_dir="/home/$(whoami)" + install_dir="($pwd)" fi # Name of the subdirectory (defaults to stable-diffusion-webui) From 5c051c0618bec1417827910b601ba915d0ca6c4e Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Sun, 12 Mar 2023 15:10:44 -0700 Subject: [PATCH 009/160] Update webui.sh Installation should not be assumed to be located within ~/home directory. User should be expected to install project anywhere and run the startup scripts while in stable-diffusion-webui directory. See issue #8534 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index b01e4fa95..cf6496646 100755 --- a/webui.sh +++ b/webui.sh @@ -23,7 +23,7 @@ fi # Install directory without trailing slash if [[ -z "${install_dir}" ]] then - install_dir="($pwd)" + install_dir="$(pwd)" fi # Name of the subdirectory (defaults to stable-diffusion-webui) From 40dc0132df0aae62078b29384600570688e3eb79 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 13 Mar 2023 03:39:02 -0500 Subject: [PATCH 010/160] modularize --- javascript/imageviewer.js | 35 ++++--------------------------- javascript/imageviewerGamepad.js | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 31 deletions(-) create mode 100644 javascript/imageviewerGamepad.js diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index e5355d0be..fa7a67d54 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -220,37 +220,10 @@ document.addEventListener("DOMContentLoaded", function() { modal.tabIndex = 0 modal.addEventListener('keydown', modalKeyHandler, true) - let delay = 350//ms - window.addEventListener('gamepadconnected', (e) => { - console.log("Gamepad connected!") - const gamepad = e.gamepad; - setInterval(() => { - const xValue = gamepad.axes[0].toFixed(2); - if (xValue < -0.3) { - modalPrevImage(e); - } else if (xValue > 0.3) { - modalNextImage(e); - } - - }, delay); - }); - - - let isScrolling = false; - window.addEventListener('wheel', (e) => { - if (isScrolling) return; - isScrolling = true; - - if (e.deltaX <= -0.6) { - modalPrevImage(e); - } else if (e.deltaX >= 0.6) { - modalNextImage(e); - } - - setTimeout(() => { - isScrolling = false; - }, delay); - }); + // detect gamepads and enable related functionality + let gamepadScript = document.createElement('script'); + gamepadScript.src = 'imageviewerGamepad.js'; + document.body.appendChild(gamepadScript); const modalControls = document.createElement('div') modalControls.className = 'modalControls gradio-container'; diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js new file mode 100644 index 000000000..29bd71403 --- /dev/null +++ b/javascript/imageviewerGamepad.js @@ -0,0 +1,36 @@ + let delay = 350//ms + window.addEventListener('gamepadconnected', (e) => { + console.log("Gamepad connected!") + const gamepad = e.gamepad; + setInterval(() => { + const xValue = gamepad.axes[0].toFixed(2); + if (xValue < -0.3) { + modalPrevImage(e); + } else if (xValue > 0.3) { + modalNextImage(e); + } + + }, delay); + }); + + + /* + Primarily for vr controller type pointer devices. + I use the wheel event because there's currently no way to do it properly with web xr. + */ + + let isScrolling = false; + window.addEventListener('wheel', (e) => { + if (isScrolling) return; + isScrolling = true; + + if (e.deltaX <= -0.6) { + modalPrevImage(e); + } else if (e.deltaX >= 0.6) { + modalNextImage(e); + } + + setTimeout(() => { + isScrolling = false; + }, delay); + }); \ No newline at end of file From 54291f9d63f0c7dc445b90f3afb37fc330739557 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 15 Mar 2023 04:33:38 -0500 Subject: [PATCH 011/160] remove redundant load --- javascript/imageviewer.js | 5 ----- 1 file changed, 5 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index fa7a67d54..aac2ee823 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -220,11 +220,6 @@ document.addEventListener("DOMContentLoaded", function() { modal.tabIndex = 0 modal.addEventListener('keydown', modalKeyHandler, true) - // detect gamepads and enable related functionality - let gamepadScript = document.createElement('script'); - gamepadScript.src = 'imageviewerGamepad.js'; - document.body.appendChild(gamepadScript); - const modalControls = document.createElement('div') modalControls.className = 'modalControls gradio-container'; modal.append(modalControls); From a80d7d090ce19d9b275f6b0d6b8dbbf61a1992e0 Mon Sep 17 00:00:00 2001 From: Rucadi Date: Tue, 21 Mar 2023 18:47:05 +0100 Subject: [PATCH 012/160] Update script_callbacks.py --- modules/script_callbacks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index f758a1ee9..8e80dd8dc 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -75,7 +75,6 @@ callback_map = dict( callbacks_script_unloaded=[], callbacks_before_ui=[], callbacks_on_reload=[], - callbacks_on_reload=[], callbacks_on_polling=[], ) From d86beb822832c9162714cf0a3567ad087839a2ac Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Thu, 23 Mar 2023 17:09:59 -0400 Subject: [PATCH 013/160] Remove "do not add watermark to images" option --- javascript/hints.js | 1 - modules/shared.py | 1 - requirements.txt | 1 - 3 files changed, 3 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b23..d12e86a93 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -85,7 +85,6 @@ titles = { "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", - "Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.", "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", "Filename join string": "This string will be used to join split words into a single line if the option above is enabled.", diff --git a/modules/shared.py b/modules/shared.py index f28a12ccc..030cd7e0b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -342,7 +342,6 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), diff --git a/requirements.txt b/requirements.txt index 6d53f0893..c6eb2449d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,6 @@ fonts font-roboto gfpgan gradio==3.16.2 -invisible-watermark numpy omegaconf opencv-contrib-python From b2fc7dba2edead2b2e880ea90bd6b5494115b330 Mon Sep 17 00:00:00 2001 From: kurilee Date: Sat, 25 Mar 2023 22:45:41 +0800 Subject: [PATCH 014/160] Add option "keep original size" to textual inversion images preprocess --- modules/textual_inversion/preprocess.py | 10 +++++++--- modules/ui.py | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 2239cb842..9ad1d3f46 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -11,7 +11,7 @@ from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop -def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): +def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): try: if process_caption: shared.interrogator.load() @@ -19,7 +19,7 @@ def preprocess(id_task, process_src, process_dst, process_width, process_height, if process_caption_deepbooru: deepbooru.model.start() - preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) + preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) finally: @@ -131,7 +131,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr return wh and center_crop(image, *wh) -def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): +def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): width = process_width height = process_height src = os.path.abspath(process_src) @@ -223,6 +223,10 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)") process_default_resize = False + if process_keep_original_size: + save_pic(img, index, params, existing_caption=existing_caption) + process_default_resize = False + if process_default_resize: img = images.resize_image(1, img, width, height) save_pic(img, index, params, existing_caption=existing_caption) diff --git a/modules/ui.py b/modules/ui.py index af8546c29..974f2a307 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1098,6 +1098,7 @@ def create_ui(): preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") @@ -1264,6 +1265,7 @@ def create_ui(): process_width, process_height, preprocess_txt_action, + process_keep_original_size, process_flip, process_split, process_caption, From c9647c8d23efa8c939c6af39878784e246082122 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sat, 25 Mar 2023 16:11:41 -0400 Subject: [PATCH 015/160] Support Gradio's theme API --- modules/shared.py | 35 +++++++++++++++++++++++++++++++++++ modules/ui.py | 2 +- webui.py | 1 + 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 11be3985d..2f7892cd6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,6 +4,7 @@ import json import os import sys import time +import requests from PIL import Image import gradio as gr @@ -54,6 +55,21 @@ ui_reorder_categories = [ "scripts", ] +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "freddyaboulton/dracula_revamped", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/pakistan", + "dawood/microsoft_windows", + "ysharma/steampunk" +] + + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -387,6 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), + "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) options_templates.update(options_section(('ui', "Live previews"), { @@ -599,6 +616,24 @@ clip_model = None progress_print_out = sys.stdout +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + if theme_name == "Default": + gradio_theme = gr.themes.Default() + else: + try: + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + except requests.exceptions.ConnectionError: + print("Can't access HuggingFace Hub, falling back to default Gradio theme") + gradio_theme = gr.themes.Default() + + class TotalTQDM: def __init__(self): diff --git a/modules/ui.py b/modules/ui.py index af8546c29..6e0498811 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1592,7 +1592,7 @@ def create_ui(): for _interface, label, _ifid in interfaces: shared.tab_names.append(label) - with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: + with gr.Blocks(css=css, theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) diff --git a/webui.py b/webui.py index 30f3e4a1f..6986e576a 100644 --- a/webui.py +++ b/webui.py @@ -150,6 +150,7 @@ def initialize(): shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) + shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) startup_timer.record("opts onchange") shared.reload_hypernetworks() From 88515267b94c7518d48d34e037163fa8c3ceca33 Mon Sep 17 00:00:00 2001 From: Reimoo Date: Sun, 26 Mar 2023 10:29:19 -0700 Subject: [PATCH 016/160] Changed: extra network height css Changed it so cards take up a set amount of vertical space but added the ability to scroll and resize. --- style.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/style.css b/style.css index 0dcc3e25d..aca32fcf2 100644 --- a/style.css +++ b/style.css @@ -596,6 +596,12 @@ footer { /* extra networks UI */ +.extra-network-cards{ + height: 400px; + overflow: scroll; + resize: vertical; +} + .extra-networks > div > [id *= '_extra_']{ margin: 0.3em; } From 527680cd70267f3fb76439de42dd21757ec442cc Mon Sep 17 00:00:00 2001 From: Reimoo Date: Mon, 27 Mar 2023 10:00:01 -0700 Subject: [PATCH 017/160] Update style.css Co-authored-by: missionfloyd --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index aca32fcf2..1a58c4a72 100644 --- a/style.css +++ b/style.css @@ -596,7 +596,7 @@ footer { /* extra networks UI */ -.extra-network-cards{ +#txt2img_extra_tabs > .tabitem, #img2img_extra_tabs > .tabitem{ height: 400px; overflow: scroll; resize: vertical; From 6f77567e13227b49626ebaa827cc0f39350bcb2e Mon Sep 17 00:00:00 2001 From: Reimoo Date: Mon, 27 Mar 2023 10:08:42 -0700 Subject: [PATCH 018/160] Update style.css --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 1a58c4a72..aca32fcf2 100644 --- a/style.css +++ b/style.css @@ -596,7 +596,7 @@ footer { /* extra networks UI */ -#txt2img_extra_tabs > .tabitem, #img2img_extra_tabs > .tabitem{ +.extra-network-cards{ height: 400px; overflow: scroll; resize: vertical; From 9ecf3471339d011983f2e3c878e920e49718ff90 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Mon, 27 Mar 2023 20:01:19 +0200 Subject: [PATCH 019/160] fix: lightboxModal, selectedTab --- javascript/generationParams.js | 2 +- javascript/imageviewer.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f050939..06a771bc7 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,7 +16,7 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText + let selectedTab = gradioApp().querySelector('#tabs div button')?.innerText if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') gradioApp().getElementById(selectedTab+"_generation_info_button").click() }); diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index d64835622..bb61ee244 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,7 +251,7 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().appendChild(modal) + gradioApp().body.appendChild(modal) document.body.appendChild(modal); From d667fc435f6210575ba50a6f3a05d3853b233caa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 28 Mar 2023 22:23:40 +0300 Subject: [PATCH 020/160] add "resize by" and "resize to" tabs to img2img --- javascript/ui.js | 5 +++++ modules/img2img.py | 8 +++++++- modules/ui.py | 48 +++++++++++++++++++++++++++++++++++++++++++--- style.css | 19 +++++++++++++++++- 4 files changed, 75 insertions(+), 5 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 4a440193b..dc5382311 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -361,3 +361,8 @@ function selectCheckpoint(name){ desiredCheckpointName = name; gradioApp().getElementById('change_checkpoint').click() } + +function currentImg2imgSourceResolution(_, _, scaleBy){ + var img = gradioApp().querySelector('#mode_img2img > div[style="display: block;"] img') + return img ? [img.naturalWidth, img.naturalHeight, scaleBy] : [0, 0, scaleBy] +} diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2d..d54728b7e 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -114,6 +114,12 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if image is not None: image = ImageOps.exif_transpose(image) + if selected_scale_tab == 1: + assert image, "Can't scale by because no image is selected" + + width = int(image.width * scale_by) + height = int(image.height * scale_by) + assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' p = StableDiffusionProcessingImg2Img( diff --git a/modules/ui.py b/modules/ui.py index eb5fcd3fb..653eb6654 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -127,6 +127,16 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + def apply_styles(prompt, prompt_neg, styles): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) @@ -673,6 +683,8 @@ def create_ui(): copy_image_buttons.append((button, name, elem)) with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=480) add_copy_image_controls('img2img', init_img) @@ -715,6 +727,12 @@ def create_ui(): img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + def copy_image(img): if isinstance(img, dict) and 'image' in img: return img['image'] @@ -744,8 +762,30 @@ def create_ui(): elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to") as tab_scale_to: + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + + with gr.Tab(label="Resize by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + + scale_by.change( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") @@ -806,7 +846,7 @@ def create_ui(): def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - for i, elem in enumerate([tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]): + for i, elem in enumerate(img2img_tabs): elem.select( fn=lambda tab=i: select_img2img_tab(tab), inputs=[], @@ -859,8 +899,10 @@ def create_ui(): denoising_strength, seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, height, width, + scale_by, resize_mode, inpaint_full_res, inpaint_full_res_padding, diff --git a/style.css b/style.css index de16a7f2f..c1c9015b5 100644 --- a/style.css +++ b/style.css @@ -293,7 +293,12 @@ button.custom-button{ margin-left: -0.75em } -#txtimg_hr_finalres .resolution{ +#img2img_scale_resolution_preview.block{ + display: flex; + align-items: end; +} + +#txtimg_hr_finalres .resolution, #img2img_scale_resolution_preview .resolution{ font-weight: bold; } @@ -333,6 +338,18 @@ div.dimensions-tools{ overflow-wrap: break-word; } +#img2img_column_batch{ + align-self: end; + margin-bottom: 0.9em; +} + +#img2img_unused_scale_by_slider{ + visibility: hidden; + width: 0.5em; + max-width: 0.5em; + min-width: 0.5em; +} + /* settings */ #quicksettings { width: fit-content; From 5a25826d841a13574ab6afbeb9c50c81a491fa21 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Tue, 28 Mar 2023 23:28:46 +0200 Subject: [PATCH 021/160] try both versions of appendChild --- javascript/imageviewer.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index bb61ee244..3deffa9be 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,8 +251,11 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().body.appendChild(modal) - + try { + gradioApp().appendChild(modal); + } catch (e) { + gradioApp().body.appendChild(modal); + } document.body.appendChild(modal); From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: [PATCH 022/160] performance increase --- modules/processing.py | 4 +++- modules/sd_samplers_kdiffusion.py | 22 +++++++++++++++++----- modules/shared.py | 1 + scripts/xyz_grid.py | 1 + 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..9f00ce3cc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -105,7 +105,7 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -140,6 +140,7 @@ class StableDiffusionProcessing: self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None self.ddim_discretize = ddim_discretize or opts.ddim_discretize + self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option @@ -162,6 +163,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + @property def sd_model(self): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518f..6a54ce32b 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -76,7 +76,7 @@ class CFGDenoiser(torch.nn.Module): return denoised - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -116,6 +116,12 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + sigma_thresh = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] + if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: cond_in = torch.cat([tensor, uncond]) @@ -144,7 +150,8 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if uncond.shape[0]: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -157,7 +164,10 @@ class CFGDenoiser(torch.nn.Module): sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if uncond.shape[0]: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + else: + denoised = x_out else: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) @@ -165,7 +175,6 @@ class CFGDenoiser(torch.nn.Module): denoised = self.init_latent * self.mask + self.nmask * denoised self.step += 1 - return denoised @@ -244,6 +253,7 @@ class KDiffusionSampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -326,6 +336,7 @@ class KDiffusionSampler: 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond } samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) @@ -359,7 +370,8 @@ class KDiffusionSampler: 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) return samples diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecbd..0bdd30b8c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,6 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..d6a44b1c8 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -212,6 +212,7 @@ axis_options = [ AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From bc90592031d26d3a6ed5c1b65ee9801452b5ece5 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 20:59:31 -0400 Subject: [PATCH 023/160] increase range of negative guidance minimum sigma option --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 0bdd30b8c..0e9f2d549 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,7 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), From fb68d93b6a579a424919b22682cf067ce9a8e13f Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Tue, 28 Mar 2023 18:27:44 -0700 Subject: [PATCH 024/160] Update webui-user.sh --- webui-user.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-user.sh b/webui-user.sh index 74e8800c4..bfa53cb7c 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -4,7 +4,7 @@ ######################################################### # Install directory without trailing slash -#install_dir="$(pwd)" +#install_dir="/home/$(whoami)" # Name of the subdirectory #clone_dir="stable-diffusion-webui" From f867d7b429d23f8fb82a979c22ba6c90955e0842 Mon Sep 17 00:00:00 2001 From: ParityError <36368048+ParityError@users.noreply.github.com> Date: Tue, 28 Mar 2023 18:34:02 -0700 Subject: [PATCH 025/160] Update README.md Updated to reflect change in webui.sh, so that the installation directory is not absolute (/home/user). --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b67e2296a..c2a3578fd 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ sudo dnf install wget git python3 # Arch-based: sudo pacman -S wget git python3 ``` -2. To install in `/home/$(whoami)/stable-diffusion-webui/`, run: +2. Navigate to the directory you would like the webui to be installed and execute the following command: ```bash bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` @@ -158,4 +158,4 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Security advice - RyotaK - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. -- (You) \ No newline at end of file +- (You) From 70a0a11783747d2e77a08a63d9feeceb7d2d4f63 Mon Sep 17 00:00:00 2001 From: Vespinian Date: Tue, 28 Mar 2023 23:52:51 -0400 Subject: [PATCH 026/160] Changed behavior that puts the args from alwayson_script request in the script_args, so don't accidently resize the arg list if we get less arg then or default list has --- modules/api/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61f..8c5cd1853 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -272,7 +272,9 @@ class Api: raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: - script_args[alwayson_script.args_from:alwayson_script.args_to] = request.alwayson_scripts[alwayson_script_name]["args"] + # min between arg length in scriptrunner and arg length in the request + for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))): + script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): From 79d57d02f15aa406e38997d65ec8ed99374691b7 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 01:52:34 -0500 Subject: [PATCH 027/160] Improve custom code extension - Uses `gr.Code` component - Includes example - Can return out of body --- scripts/custom_code.py | 63 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index d29113e67..4071d86d8 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -1,9 +1,40 @@ import modules.scripts as scripts import gradio as gr +import ast +import copy from modules.processing import Processed from modules.shared import opts, cmd_opts, state + +def convertExpr2Expression(expr): + expr.lineno = 0 + expr.col_offset = 0 + result = ast.Expression(expr.value, lineno=0, col_offset = 0) + + return result + + +def exec_with_return(code, module): + """ + like exec() but can return values + https://stackoverflow.com/a/52361938/5862977 + """ + code_ast = ast.parse(code) + + init_ast = copy.deepcopy(code_ast) + init_ast.body = code_ast.body[:-1] + + last_ast = copy.deepcopy(code_ast) + last_ast.body = code_ast.body[-1:] + + exec(compile(init_ast, "", "exec"), module.__dict__) + if type(last_ast.body[0]) == ast.Expr: + return eval(compile(convertExpr2Expression(last_ast.body[0]), "", "eval"), module.__dict__) + else: + exec(compile(last_ast, "", "exec"), module.__dict__) + + class Script(scripts.Script): def title(self): @@ -13,12 +44,23 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code")) + example = """from modules.processing import process_images - return [code] +p.width = 768 +p.height = 768 +p.batch_size = 2 +p.steps = 10 + +return process_images(p) +""" - def run(self, p, code): + code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code")) + indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level")) + + return [code, indent_level] + + def run(self, p, code, indent_level): assert cmd_opts.allow_code, '--allow-code option must be enabled' display_result_data = [[], -1, ""] @@ -29,13 +71,20 @@ class Script(scripts.Script): display_result_data[2] = i from types import ModuleType - compiled = compile(code, '', 'exec') module = ModuleType("testmodule") module.__dict__.update(globals()) module.p = p module.display = display - exec(compiled, module.__dict__) + + indent = " " * indent_level + indented = code.replace('\n', '\n' + indent) + body = f"""def __webuitemp__(): +{indent}{indented} +__webuitemp__()""" + + result = exec_with_return(body, module) + + if isinstance(result, Processed): + return result return Processed(p, *display_result_data) - - \ No newline at end of file From 67955ca9e5cb6b3cc539333d0a7d9591009bc800 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:04:02 -0500 Subject: [PATCH 028/160] Make selected tab configurable with UI config --- modules/ui.py | 30 ++++++++++++++++++++++++------ modules/ui_extensions.py | 6 +++--- modules/ui_extra_networks.py | 8 ++++---- modules/ui_postprocessing.py | 6 +++--- 4 files changed, 34 insertions(+), 16 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b5..3595b20d9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -94,6 +94,9 @@ def send_gradio_gallery_to_image(x): def visit(x, func, path=""): if hasattr(x, 'children'): + if isinstance(x, gr.Tabs) and x.elem_id is not None: + # Tabs element can't have a label, have to use elem_id instead + func(f"{path}/Tabs@{x.elem_id}", x) for c in x.children: visit(c, func, path) elif x.label is not None: @@ -1048,7 +1051,7 @@ def create_ui(): with gr.Row(variant="compact").style(equal_height=False): with gr.Tabs(elem_id="train_tabs"): - with gr.Tab(label="Create embedding"): + with gr.Tab(label="Create embedding", id="create_embedding"): new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") @@ -1061,7 +1064,7 @@ def create_ui(): with gr.Column(): create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - with gr.Tab(label="Create hypernetwork"): + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") @@ -1079,7 +1082,7 @@ def create_ui(): with gr.Column(): create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - with gr.Tab(label="Preprocess images"): + with gr.Tab(label="Preprocess images", id="preprocess_images"): process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") @@ -1146,7 +1149,7 @@ def create_ui(): def get_textual_inversion_template_names(): return sorted([x for x in textual_inversion.textual_inversion_templates]) - with gr.Tab(label="Train"): + with gr.Tab(label="Train", id="train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") with FormRow(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) @@ -1479,7 +1482,7 @@ def create_ui(): current_row.__exit__() current_tab.__exit__() - with gr.TabItem("Actions"): + with gr.TabItem("Actions", id="actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") @@ -1487,7 +1490,7 @@ def create_ui(): unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model") reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model") - with gr.TabItem("Licenses"): + with gr.TabItem("Licenses", id="licenses"): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") @@ -1735,12 +1738,27 @@ def create_ui(): apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) + def check_tab_id(tab_id): + tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) + if type(tab_id) == str: + tab_ids = [t.id for t in tab_items] + return tab_id in tab_ids + elif type(tab_id) == int: + return tab_id >= 0 and tab_id < len(tab_items) + else: + return False + + if type(x) == gr.Tabs: + apply_field(x, 'selected', check_tab_id) + visit(txt2img_interface, loadsave, "txt2img") visit(img2img_interface, loadsave, "img2img") visit(extras_interface, loadsave, "extras") visit(modelmerger_interface, loadsave, "modelmerger") visit(train_interface, loadsave, "train") + loadsave(f"webui/Tabs@{tabs.elem_id}", tabs) + if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)): with open(ui_config_file, "w", encoding="utf8") as file: json.dump(ui_settings, file, indent=4) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda28..5ec11f03c 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -294,7 +294,7 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions") as tabs: - with gr.TabItem("Installed"): + with gr.TabItem("Installed", id="installed"): with gr.Row(elem_id="extensions_installed_top"): apply = gr.Button(value="Apply and restart UI", variant="primary") @@ -327,7 +327,7 @@ def create_ui(): outputs=[extensions_table, info], ) - with gr.TabItem("Available"): + with gr.TabItem("Available", id="available"): with gr.Row(): refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json", label="Extension index URL").style(container=False) @@ -374,7 +374,7 @@ def create_ui(): outputs=[available_extensions_table, install_result] ) - with gr.TabItem("Install from URL"): + with gr.TabItem("Install from URL", id="install_from_url"): install_url = gr.Text(label="URL for extension's git repository") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 25eb464b9..ad98f0831 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -241,9 +241,9 @@ def create_ui(container, button, tabname): with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: for page in ui.stored_extra_pages: - with gr.Tab(page.title): + with gr.Tab(page.title, id=page.title.lower().replace(" ", "_")): - page_elem = gr.HTML(page.create_html(ui.tabname)) + page_elem = gr.HTML("") ui.pages.append(page_elem) filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) @@ -284,7 +284,7 @@ def setup_ui(ui, gallery): def save_preview(index, images, filename): if len(images) == 0: print("There is no image in gallery to save as a preview.") - return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] + return ["" for page in ui.stored_extra_pages] index = int(index) index = 0 if index < 0 else index @@ -309,7 +309,7 @@ def setup_ui(ui, gallery): else: image.save(filename) - return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] + return ["" for page in ui.stored_extra_pages] ui.button_save_preview.click( fn=save_preview, diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d9553..81decfc42 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -9,13 +9,13 @@ def create_ui(): with gr.Row().style(equal_height=False, variant='compact'): with gr.Column(variant='compact'): with gr.Tabs(elem_id="mode_extras"): - with gr.TabItem('Single Image', elem_id="extras_single_tab") as tab_single: + with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") - with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: + with gr.TabItem('Batch Process', id="batch_process", elem_id="extras_batch_process_tab") as tab_batch: image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") - with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: + with gr.TabItem('Batch from Directory', id="batch_from_directory", elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") From 3c7b928914eb2fe1b2ab456faa390b846de12b64 Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 16:52:45 -0400 Subject: [PATCH 029/160] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b67e2296a..abf40a32f 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Automatic Installation on Windows -1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH". +1. Install [Python 3.10.9](https://www.python.org/downloads/release/python-3109/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. From baef594e4a2c49103d472b48544b6848dfcaf2a6 Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 16:58:56 -0400 Subject: [PATCH 030/160] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index abf40a32f..671ee2aa8 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Automatic Installation on Windows -1. Install [Python 3.10.9](https://www.python.org/downloads/release/python-3109/) (Newer version of Python does not support torch), checking "Add Python to PATH". +1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. From 384bfe22cd86377aca87f82d2746125326a2fe1f Mon Sep 17 00:00:00 2001 From: Thierry Date: Wed, 29 Mar 2023 17:00:20 -0400 Subject: [PATCH 031/160] Update launch.py --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 68e08114d..b76f17983 100644 --- a/launch.py +++ b/launch.py @@ -49,7 +49,7 @@ or any other error regarding unsuccessful package (library) installation, please downgrade (or upgrade) to the latest version of 3.10 Python and delete current Python and "venv" folder in WebUI's directory. -You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/ +You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ {"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} From ad5afcaae0b47e9e68b49aacf04cc3ad59d41a8e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 16:46:03 -0500 Subject: [PATCH 032/160] Save/restore working webui/extension configs --- javascript/extensions.js | 16 ++++ modules/extensions.py | 23 +++-- modules/paths_internal.py | 1 + modules/shared.py | 1 + modules/ui_extensions.py | 186 +++++++++++++++++++++++++++++++++++++- webui.py | 25 ++++- 6 files changed, 243 insertions(+), 9 deletions(-) diff --git a/javascript/extensions.js b/javascript/extensions.js index 72924a28c..c27864998 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -47,3 +47,19 @@ function install_extension_from_index(button, url){ gradioApp().querySelector('#install_extension_button').click() } + +function config_state_confirm_restore(_, config_state_name, config_restore_type) { + if (config_state_name == "Current") { + return [false, config_state_name]; + } + let restored = ""; + if (config_restore_type == "extensions") { + restored = "all saved extension versions"; + } else if (config_restore_type == "webui") { + restored = "the webui version"; + } else { + restored = "the webui version and all saved extension versions"; + } + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + ".\n(A backup of the current state will be made.)"); + return [confirmed, config_state_name, config_restore_type]; +} diff --git a/modules/extensions.py b/modules/extensions.py index 3a7a03727..a87beaa37 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,10 +3,11 @@ import sys import traceback import time +from datetime import datetime import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path extensions = [] @@ -31,12 +32,15 @@ class Extension: self.status = '' self.can_update = False self.is_builtin = is_builtin + self.commit_hash = '' + self.commit_date = None self.version = '' + self.branch = None self.remote = None self.have_info_from_repo = False def read_info_from_repo(self): - if self.have_info_from_repo: + if self.is_builtin or self.have_info_from_repo: return self.have_info_from_repo = True @@ -56,10 +60,15 @@ class Extension: self.status = 'unknown' self.remote = next(repo.remote().urls, None) head = repo.head.commit - ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) - self.version = f'{head.hexsha[:8]} ({ts})' + self.commit_date = repo.head.commit.committed_date + ts = time.asctime(time.gmtime(self.commit_date)) + if repo.active_branch: + self.branch = repo.active_branch.name + self.commit_hash = head.hexsha + self.version = f'{self.commit_hash[:8]} ({ts})' - except Exception: + except Exception as ex: + print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) self.remote = None def list_files(self, subdir, extension): @@ -88,12 +97,12 @@ class Extension: self.can_update = False self.status = "latest" - def fetch_and_reset_hard(self): + def fetch_and_reset_hard(self, commit='origin'): repo = git.Repo(self.path) # Fix: `error: Your local changes to the following files would be overwritten by merge`, # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) - repo.git.reset('origin', hard=True) + repo.git.reset(commit, hard=True) def list_extensions(): diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 926ec3bbf..6765bafe0 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -20,3 +20,4 @@ data_path = cmd_opts_pre.data_dir models_path = os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") +config_states_dir = os.path.join(script_path, "config_states") diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecbd..ffc5e4fe4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -424,6 +424,7 @@ options_templates.update(options_section(('postprocessing', "Postprocessing"), { options_templates.update(options_section((None, "Hidden options"), { "disabled_extensions": OptionInfo([], "Disable these extensions"), "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda28..ed677b3ec 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -2,6 +2,7 @@ import json import os.path import sys import time +from datetime import datetime import traceback import git @@ -11,7 +12,8 @@ import html import shutil import errno -from modules import extensions, shared, paths +from modules import extensions, shared, paths, config_states +from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} @@ -30,6 +32,9 @@ def apply_and_restart(disable_list, update_list, disable_all): update = json.loads(update_list) assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}" + if update: + save_config_state("Backup (pre-update)") + update = set(update) for ext in extensions.extensions: @@ -50,6 +55,48 @@ def apply_and_restart(disable_list, update_list, disable_all): shared.state.need_restart = True +def save_config_state(name): + current_config_state = config_states.get_config() + if not name: + name = "Config" + current_config_state["name"] = name + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + print(f"Saving backup of webui/extension state to {filename}.") + with open(filename, "w", encoding="utf-8") as f: + json.dump(current_config_state, f) + config_states.list_config_states() + new_value = next(iter(config_states.all_config_states.keys()), "Current") + new_choices = ["Current"] + list(config_states.all_config_states.keys()) + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + + +def restore_config_state(confirmed, config_state_name, restore_type): + if config_state_name == "Current": + return "Select a config to restore from." + if not confirmed: + return "Cancelled." + + check_access() + + save_config_state("Backup (pre-restore)") + + config_state = config_states.all_config_states[config_state_name] + + print(f"Restoring webui state from backup: {restore_type}") + + if restore_type == "extensions" or restore_type == "both": + shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.save(shared.config_filename) + + if restore_type == "webui" or restore_type == "both": + config_states.restore_webui_config(config_state) + + shared.state.interrupt() + shared.state.need_restart = True + + return "" + + def check_updates(id_task, disable_list): check_access() @@ -121,6 +168,117 @@ def extension_table(): return code +def update_config_states_table(state_name): + if state_name == "Current": + config_state = config_states.get_config() + else: + config_state = config_states.all_config_states[state_name] + + config_name = config_state.get("name", "Config") + created_date = time.asctime(time.gmtime(config_state["created_at"])) + + code = f"""""" + + webui_remote = config_state["webui"]["remote"] or "" + webui_branch = config_state["webui"]["branch"] + webui_commit_hash = config_state["webui"]["commit_hash"] + if webui_commit_hash: + webui_commit_hash = webui_commit_hash[:8] + else: + webui_commit_hash = "" + webui_commit_date = config_state["webui"]["commit_date"] + if webui_commit_date: + webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) + else: + webui_commit_date = "" + + code += f"""

Config Backup: {config_name}

+ Created at: {created_date}""" + + code += f"""

WebUI State

+ + + + + + + + + + + + + + + + + +
URLBranchCommitDate
{webui_remote}{webui_branch}{webui_commit_hash}{webui_commit_date}
+ """ + + code += """

Extension State

+ + + + + + + + + + + + """ + + ext_map = {ext.name: ext for ext in extensions.extensions} + + for ext_name, ext_conf in config_state["extensions"].items(): + ext_remote = ext_conf["remote"] or "" + ext_branch = ext_conf["branch"] or "" + ext_enabled = ext_conf["enabled"] + ext_commit_hash = ext_conf["commit_hash"] or "" + ext_commit_date = ext_conf["commit_date"] + if ext_commit_date: + ext_commit_date = time.asctime(time.gmtime(ext_commit_date)) + else: + ext_commit_date = "" + + remote = f"""{html.escape(ext_remote or '')}""" + + style_enabled = "" + style_remote = "" + style_branch = "" + style_commit = "" + if ext_name in ext_map: + current_ext = ext_map[ext_name] + current_ext.read_info_from_repo() + if current_ext.enabled != ext_enabled: + style_enabled = ' style="color: var(--primary-400)"' + if current_ext.remote != ext_remote: + style_remote = ' style="color: var(--primary-400)"' + if current_ext.branch != ext_branch: + style_branch = ' style="color: var(--primary-400)"' + if current_ext.commit_hash != ext_commit_hash: + style_commit = ' style="color: var(--primary-400)"' + + code += f""" + + + + + + + + """ + + code += """ + +
ExtensionURLBranchCommitDate
{html.escape(ext_name)}{remote}{ext_branch}{ext_commit_hash[:8]}{ext_commit_date}
+ """ + + return code + + def normalize_git_url(url): if url is None: return "" @@ -292,6 +450,8 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" def create_ui(): import modules.ui + config_states.list_config_states() + with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions") as tabs: with gr.TabItem("Installed"): @@ -386,4 +546,28 @@ def create_ui(): outputs=[extensions_table, install_result], ) + with gr.TabItem("Backup/Restore"): + with gr.Row(elem_id="extensions_backup_top_row"): + config_states_list = gr.Dropdown(label="Saved Configs", elem_id="extension_backup_saved_configs", value="Current", choices=["Current"] + list(config_states.all_config_states.keys())) + modules.ui.create_refresh_button(config_states_list, config_states.list_config_states, lambda: {"choices": ["Current"] + list(config_states.all_config_states.keys())}, "refresh_config_states") + config_restore_type = gr.Radio(label="State to restore", choices=["extensions", "webui", "both"], value="extensions", elem_id="extension_backup_restore_type") + config_restore_button = gr.Button(value="Restore Selected Config", variant="primary", elem_id="extension_backup_restore") + with gr.Row(elem_id="extensions_backup_top_row2"): + config_save_name = gr.Textbox("", placeholder="Config Name", show_label=False) + config_save_button = gr.Button(value="Save Current Config") + + config_states_info = gr.HTML("") + config_states_table = gr.HTML(lambda: update_config_states_table("Current")) + + config_save_button.click(fn=save_config_state, inputs=[config_save_name], outputs=[config_states_list, config_states_info]) + + dummy_component = gr.Label(visible=False) + config_restore_button.click(fn=restore_config_state, _js="config_state_confirm_restore", inputs=[dummy_component, config_states_list, config_restore_type], outputs=[config_states_info]) + + config_states_list.change( + fn=update_config_states_table, + inputs=[config_states_list], + outputs=[config_states_table], + ) + return ui diff --git a/webui.py b/webui.py index b570895fb..b8f9a2c1c 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,7 @@ import importlib import signal import re import warnings +import json from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -37,7 +38,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks +from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan @@ -105,6 +106,17 @@ def initialize(): localization.list_localizations(cmd_opts.localizations_dir) startup_timer.record("list extensions") + config_state_file = shared.opts.restore_config_state_file + shared.opts.restore_config_state_file = "" + shared.opts.save(shared.config_filename) + + if os.path.isfile(config_state_file): + print(f"*** About to restore extension state from file: {config_state_file}") + with open(config_state_file, "r", encoding="utf-8") as f: + config_state = json.load(f) + config_states.restore_extension_state(config_state) + startup_timer.record("restore extension config") + if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers modules.scripts.load_scripts() @@ -301,6 +313,17 @@ def webui(): extensions.list_extensions() startup_timer.record("list extensions") + config_state_file = shared.opts.restore_config_state_file + shared.opts.restore_config_state_file = "" + shared.opts.save(shared.config_filename) + + if os.path.isfile(config_state_file): + print(f"*** About to restore extension state from file: {config_state_file}") + with open(config_state_file, "r", encoding="utf-8") as f: + config_state = json.load(f) + config_states.restore_extension_state(config_state) + startup_timer.record("restore extension config") + localization.list_localizations(cmd_opts.localizations_dir) modelloader.forbid_loaded_nonbuiltin_upscalers() From f22d0dde4e57444b2d4fe997338550bb82bb249e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:29 -0500 Subject: [PATCH 033/160] Better checking of extension state from Git info --- modules/extensions.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index a87beaa37..34d9d6544 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -91,9 +91,20 @@ class Extension: for fetch in repo.remote().fetch(dry_run=True): if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True - self.status = "behind" + self.status = "new commits" return + try: + origin = repo.rev_parse('origin') + if repo.head.commit != origin: + self.can_update = True + self.status = "behind HEAD" + return + except Exception: + self.can_update = False + self.status = "unknown (remote error)" + return + self.can_update = False self.status = "latest" @@ -103,6 +114,7 @@ class Extension: # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) repo.git.reset(commit, hard=True) + self.have_info_from_repo = False def list_extensions(): From f3320b802c12f29e5a3201fcc0abfe72be294293 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:54 -0500 Subject: [PATCH 034/160] Various UI fixes in config state tab --- .gitignore | 1 + javascript/extensions.js | 10 +- modules/config_states.py | 200 +++++++++++++++++++++++++++++++++++++++ modules/ui_extensions.py | 45 +++++---- webui.py | 8 +- 5 files changed, 241 insertions(+), 23 deletions(-) create mode 100644 modules/config_states.py diff --git a/.gitignore b/.gitignore index 0b1d17ca3..7c89b6730 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,4 @@ notification.mp3 /test/stdout.txt /test/stderr.txt /cache.json +/config_states/ diff --git a/javascript/extensions.js b/javascript/extensions.js index c27864998..3c2f995aa 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -50,7 +50,7 @@ function install_extension_from_index(button, url){ function config_state_confirm_restore(_, config_state_name, config_restore_type) { if (config_state_name == "Current") { - return [false, config_state_name]; + return [false, config_state_name, config_restore_type]; } let restored = ""; if (config_restore_type == "extensions") { @@ -60,6 +60,12 @@ function config_state_confirm_restore(_, config_state_name, config_restore_type) } else { restored = "the webui version and all saved extension versions"; } - let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + ".\n(A backup of the current state will be made.)"); + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); + if (confirmed) { + restart_reload(); + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ + x.innerHTML = "Loading..." + }) + } return [confirmed, config_state_name, config_restore_type]; } diff --git a/modules/config_states.py b/modules/config_states.py new file mode 100644 index 000000000..2ea00929c --- /dev/null +++ b/modules/config_states.py @@ -0,0 +1,200 @@ +""" +Supports saving and restoring webui and extensions from a known working set of commits +""" + +import os +import sys +import traceback +import json +import time +import tqdm + +from datetime import datetime +from collections import OrderedDict +import git + +from modules import shared, extensions +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir + + +all_config_states = OrderedDict() + + +def list_config_states(): + global all_config_states + + all_config_states.clear() + os.makedirs(config_states_dir, exist_ok=True) + + config_states = [] + for filename in os.listdir(config_states_dir): + if filename.endswith(".json"): + path = os.path.join(config_states_dir, filename) + with open(path, "r", encoding="utf-8") as f: + j = json.load(f) + j["filepath"] = path + config_states.append(j) + + config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + + for cs in config_states: + timestamp = time.asctime(time.gmtime(cs["created_at"])) + name = cs.get("name", "Config") + full_name = f"{name}: {timestamp}" + all_config_states[full_name] = cs + + return all_config_states + + +def get_webui_config(): + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + webui_remote = None + webui_commit_hash = None + webui_commit_date = None + webui_branch = None + if webui_repo and not webui_repo.bare: + try: + webui_remote = next(webui_repo.remote().urls, None) + head = webui_repo.head.commit + webui_commit_date = webui_repo.head.commit.committed_date + webui_commit_hash = head.hexsha + webui_branch = webui_repo.active_branch.name + + except Exception: + webui_remote = None + + return { + "remote": webui_remote, + "commit_hash": webui_commit_hash, + "commit_date": webui_commit_date, + "branch": webui_branch, + } + + +def get_extension_config(): + ext_config = {} + + for ext in extensions.extensions: + entry = { + "name": ext.name, + "path": ext.path, + "enabled": ext.enabled, + "is_builtin": ext.is_builtin, + "remote": ext.remote, + "commit_hash": ext.commit_hash, + "commit_date": ext.commit_date, + "branch": ext.branch, + "have_info_from_repo": ext.have_info_from_repo + } + + ext_config[ext.name] = entry + + return ext_config + + +def get_config(): + creation_time = datetime.now().timestamp() + webui_config = get_webui_config() + ext_config = get_extension_config() + + return { + "created_at": creation_time, + "webui": webui_config, + "extensions": ext_config + } + + +def restore_webui_config(config): + print("* Restoring webui state...") + + if "webui" not in config: + print("Error: No webui data saved to config") + return + + webui_config = config["webui"] + + if "commit_hash" not in webui_config: + print("Error: No commit saved to webui config") + return + + webui_commit_hash = webui_config.get("commit_hash", None) + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + return + + try: + webui_repo.git.fetch(all=True) + webui_repo.git.reset(webui_commit_hash, hard=True) + print(f"* Restored webui to commit {webui_commit_hash}.") + except Exception: + print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + +def restore_extension_config(config): + print("* Restoring extension state...") + + if "extensions" not in config: + print("Error: No extension data saved to config") + return + + ext_config = config["extensions"] + + results = [] + disabled = [] + + for ext in tqdm.tqdm(extensions.extensions): + if ext.is_builtin: + continue + + ext.read_info_from_repo() + current_commit = ext.commit_hash + + if ext.name not in ext_config: + ext.disabled = True + disabled.append(ext.name) + results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled")) + continue + + entry = ext_config[ext.name] + + if "commit_hash" in entry and entry["commit_hash"]: + try: + ext.fetch_and_reset_hard(entry["commit_hash"]) + ext.read_info_from_repo() + if current_commit != entry["commit_hash"]: + results.append((ext, current_commit[:8], True, entry["commit_hash"][:8])) + except Exception as ex: + results.append((ext, current_commit[:8], False, ex)) + else: + results.append((ext, current_commit[:8], False, "No commit hash found in config")) + + if not entry.get("enabled", False): + ext.disabled = True + disabled.append(ext.name) + else: + ext.disabled = False + + shared.opts.disabled_extensions = disabled + shared.opts.save(shared.config_filename) + + print("* Finished restoring extensions. Results:") + for ext, prev_commit, success, result in results: + if success: + print(f" + {ext.name}: {prev_commit} -> {result}") + else: + print(f" ! {ext.name}: FAILURE ({result})") diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed677b3ec..b94b3a3ac 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -17,6 +17,7 @@ from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} +STYLE_PRIMARY = ' style="color: var(--primary-400)"' def check_access(): @@ -67,7 +68,7 @@ def save_config_state(name): config_states.list_config_states() new_value = next(iter(config_states.all_config_states.keys()), "Current") new_choices = ["Current"] + list(config_states.all_config_states.keys()) - return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to \"{filename}\"" def restore_config_state(confirmed, config_state_name, restore_type): @@ -78,14 +79,12 @@ def restore_config_state(confirmed, config_state_name, restore_type): check_access() - save_config_state("Backup (pre-restore)") - config_state = config_states.all_config_states[config_state_name] - print(f"Restoring webui state from backup: {restore_type}") + print(f"*** Restoring webui state from backup: {restore_type} ***") if restore_type == "extensions" or restore_type == "both": - shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.restore_config_state_file = config_state["filepath"] shared.opts.save(shared.config_filename) if restore_type == "webui" or restore_type == "both": @@ -149,7 +148,7 @@ def extension_table(): style = "" if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": - style = ' style="color: var(--primary-400)"' + style = STYLE_PRIMARY code += f""" @@ -181,17 +180,25 @@ def update_config_states_table(state_name): webui_remote = config_state["webui"]["remote"] or "" webui_branch = config_state["webui"]["branch"] - webui_commit_hash = config_state["webui"]["commit_hash"] - if webui_commit_hash: - webui_commit_hash = webui_commit_hash[:8] - else: - webui_commit_hash = "" + webui_commit_hash = config_state["webui"]["commit_hash"] or "" webui_commit_date = config_state["webui"]["commit_date"] if webui_commit_date: webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) else: webui_commit_date = "" + current_webui = config_states.get_webui_config() + + style_remote = "" + style_branch = "" + style_commit = "" + if current_webui["remote"] != webui_remote: + style_remote = STYLE_PRIMARY + if current_webui["branch"] != webui_branch: + style_branch = STYLE_PRIMARY + if current_webui["commit_hash"] != webui_commit_hash: + style_commit = STYLE_PRIMARY + code += f"""

Config Backup: {config_name}

Created at: {created_date}""" @@ -207,10 +214,10 @@ def update_config_states_table(state_name): - {webui_remote} - {webui_branch} - {webui_commit_hash} - {webui_commit_date} + {webui_remote} + {webui_branch} + {webui_commit_hash[:8]} + {webui_commit_date} @@ -253,13 +260,13 @@ def update_config_states_table(state_name): current_ext = ext_map[ext_name] current_ext.read_info_from_repo() if current_ext.enabled != ext_enabled: - style_enabled = ' style="color: var(--primary-400)"' + style_enabled = STYLE_PRIMARY if current_ext.remote != ext_remote: - style_remote = ' style="color: var(--primary-400)"' + style_remote = STYLE_PRIMARY if current_ext.branch != ext_branch: - style_branch = ' style="color: var(--primary-400)"' + style_branch = STYLE_PRIMARY if current_ext.commit_hash != ext_commit_hash: - style_commit = ' style="color: var(--primary-400)"' + style_commit = STYLE_PRIMARY code += f""" diff --git a/webui.py b/webui.py index b8f9a2c1c..5ce450569 100644 --- a/webui.py +++ b/webui.py @@ -114,8 +114,10 @@ def initialize(): print(f"*** About to restore extension state from file: {config_state_file}") with open(config_state_file, "r", encoding="utf-8") as f: config_state = json.load(f) - config_states.restore_extension_state(config_state) + config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") + else: + print(f"!!! Config state backup not found: {config_state_file}") if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers @@ -321,8 +323,10 @@ def webui(): print(f"*** About to restore extension state from file: {config_state_file}") with open(config_state_file, "r", encoding="utf-8") as f: config_state = json.load(f) - config_states.restore_extension_state(config_state) + config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") + else: + print(f"!!! Config state backup not found: {config_state_file}") localization.list_localizations(cmd_opts.localizations_dir) From 9b1fa8298127b05c71c4de04bd6f64b72540ef5a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:55:57 -0500 Subject: [PATCH 035/160] Add filename to UI and config name to filename --- modules/ui_extensions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b94b3a3ac..b1ef38e22 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -61,7 +61,7 @@ def save_config_state(name): if not name: name = "Config" current_config_state["name"] = name - filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: json.dump(current_config_state, f) @@ -175,6 +175,7 @@ def update_config_states_table(state_name): config_name = config_state.get("name", "Config") created_date = time.asctime(time.gmtime(config_state["created_at"])) + filepath = config_state.get("filepath", "") code = f"""""" @@ -200,6 +201,7 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

+ Filepath: {filepath} Created at: {created_date}""" code += f"""

WebUI State

From 64bbd3bf030aac9068df6bbe79c852e97cf6dbde Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:00:51 -0500 Subject: [PATCH 036/160] Make into divs --- modules/ui_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b1ef38e22..3735965c6 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -201,8 +201,8 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

- Filepath: {filepath} - Created at: {created_date}""" +
Filepath: {filepath}
+
Created at: {created_date}
""" code += f"""

WebUI State

From 1c0544abdbffb910837d285857515b932a073f89 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:21:57 -0500 Subject: [PATCH 037/160] Add links for commits in table, if remote is from GitHub --- modules/ui_extensions.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 3735965c6..0843a7c03 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -122,6 +122,16 @@ def check_updates(id_task, disable_list): return extension_table(), "" +def make_commit_link(commit_hash, remote, text=None): + if text is None: + text = commit_hash[:8] + if remote.startswith("https://github.com/"): + href = os.path.join(remote, "commit", commit_hash) + return f'{text}' + else: + return text + + def extension_table(): code = f"""
@@ -150,11 +160,15 @@ def extension_table(): if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": style = STYLE_PRIMARY + version_link = ext.version + if ext.commit_hash and ext.remote: + version_link = make_commit_link(ext.commit_hash, ext.remote, ext.version) + code += f""" - + {ext_status} """ @@ -200,6 +214,9 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -218,8 +235,8 @@ def update_config_states_table(state_name): - - + +
{html.escape(ext.name)} {remote}{ext.version}{version_link}
{webui_remote} {webui_branch}{webui_commit_hash[:8]}{webui_commit_date}{commit_link}{date_link}
@@ -270,13 +287,16 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) + code += f""" {html.escape(ext_name)} {remote} {ext_branch} - {ext_commit_hash[:8]} - {ext_commit_date} + {commit_link} + {date_link} """ From 563d048780790fb9e7e9a5fc54a86bdf1ee65d58 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:22:45 -0500 Subject: [PATCH 038/160] Squelch warning if no config restore --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 5ce450569..ee5e4f117 100644 --- a/webui.py +++ b/webui.py @@ -116,7 +116,7 @@ def initialize(): config_state = json.load(f) config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") - else: + elif config_state_file: print(f"!!! Config state backup not found: {config_state_file}") if cmd_opts.ui_debug_mode: @@ -325,7 +325,7 @@ def webui(): config_state = json.load(f) config_states.restore_extension_config(config_state) startup_timer.record("restore extension config") - else: + elif config_state_file: print(f"!!! Config state backup not found: {config_state_file}") localization.list_localizations(cmd_opts.localizations_dir) From 3ccf6f5ae8e57233200f9cac52da8d5d88ecee93 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:26:52 -0500 Subject: [PATCH 039/160] Add webui link --- modules/ui_extensions.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 0843a7c03..af9e92cc8 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -202,6 +202,10 @@ def update_config_states_table(state_name): else: webui_commit_date = "" + remote = f"""{html.escape(webui_remote or '')}""" + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + current_webui = config_states.get_webui_config() style_remote = "" @@ -214,9 +218,6 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(webui_commit_hash, webui_remote) - date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) - code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -233,7 +234,7 @@ def update_config_states_table(state_name): - {webui_remote} + {remote} {webui_branch} {commit_link} {date_link} @@ -270,6 +271,8 @@ def update_config_states_table(state_name): ext_commit_date = "" remote = f"""{html.escape(ext_remote or '')}""" + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) style_enabled = "" style_remote = "" @@ -287,9 +290,6 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(ext_commit_hash, ext_remote) - date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) - code += f""" {html.escape(ext_name)} From 44e8e9c36807d4a71c2fc84129ebcf5ba4f77f21 Mon Sep 17 00:00:00 2001 From: devdn Date: Thu, 30 Mar 2023 00:54:28 -0400 Subject: [PATCH 040/160] fix live preview & alternate uncond guidance for better quality --- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6a54ce32b..17d24df49 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -116,11 +116,13 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond - sigma_thresh = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + if self.step % 2 and s_min_uncond > 0 and not is_edit_model: + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + sigma_threshold = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: @@ -159,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) + sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) From dbca512154341bb13e1b15d207176f2d403aff30 Mon Sep 17 00:00:00 2001 From: siutin Date: Fri, 3 Feb 2023 03:13:03 +0800 Subject: [PATCH 041/160] add an internal API for obtaining current task id --- modules/progress.py | 8 ++++++++ webui.py | 1 + 2 files changed, 9 insertions(+) diff --git a/modules/progress.py b/modules/progress.py index c69ecf3d1..05032ac54 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -4,6 +4,7 @@ import time import gradio as gr from pydantic import BaseModel, Field +from typing import List from modules.shared import opts @@ -37,6 +38,9 @@ def add_task_to_queue(id_job): pending_tasks[id_job] = time.time() +class CurrentTaskResponse(BaseModel): + current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") + class ProgressRequest(BaseModel): id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for") id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image") @@ -56,6 +60,8 @@ class ProgressResponse(BaseModel): def setup_progress_api(app): return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse) +def setup_current_task_api(app): + return app.add_api_route("/internal/current_task", current_task_api, methods=["GET"], response_model=CurrentTaskResponse) def progressapi(req: ProgressRequest): active = req.id_task == current_task @@ -97,3 +103,5 @@ def progressapi(req: ProgressRequest): return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) +def current_task_api(): + return CurrentTaskResponse(current_task=current_task) \ No newline at end of file diff --git a/webui.py b/webui.py index b570895fb..cca9c77fa 100644 --- a/webui.py +++ b/webui.py @@ -279,6 +279,7 @@ def webui(): setup_middleware(app) modules.progress.setup_progress_api(app) + modules.progress.setup_current_task_api(app) if launch_api: create_api(app) From 9407f1731aa8c112ffc0efaa611a76f7fead3d0c Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 03:53:05 +0800 Subject: [PATCH 042/160] store the last generated result --- modules/call_queue.py | 1 + modules/progress.py | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/modules/call_queue.py b/modules/call_queue.py index 92097c15e..30ac26bc6 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -37,6 +37,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): res = func(*args, **kwargs) finally: progress.finish_task(id_task) + progress.set_last_task_result(id_task, res) shared.state.end() diff --git a/modules/progress.py b/modules/progress.py index 05032ac54..27a336ad7 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -37,6 +37,16 @@ def finish_task(id_task): def add_task_to_queue(id_job): pending_tasks[id_job] = time.time() +last_task_id = None +last_task_result = None + +def set_last_task_result(id_job, result): + global last_task_id + global last_task_result + + last_task_id = id_job + last_task_result = result + class CurrentTaskResponse(BaseModel): current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") From 4242e194e417ec5008d09ec6d756594ac65f77bd Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 03:55:31 +0800 Subject: [PATCH 043/160] add a button to restore the current progress --- javascript/progressbar.js | 4 ++-- javascript/ui.js | 36 ++++++++++++++++++++++++++++++++++-- modules/progress.py | 14 ++++++++++++++ modules/ui.py | 34 ++++++++++++++++++++++++++++++---- 4 files changed, 80 insertions(+), 8 deletions(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 4ac9b8db1..7ba14192f 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -59,8 +59,8 @@ function setTitle(progress){ } -function randomId(){ - return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")" +function randomId(prefix=null){ + return "task(" + (prefix == null ? "" : prefix + "_") + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")" } // starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and diff --git a/javascript/ui.js b/javascript/ui.js index 4a440193b..9fe884c0e 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -163,7 +163,7 @@ function submit(){ rememberGallerySelection('txt2img_gallery') showSubmitButtons('txt2img', false) - var id = randomId() + var id = randomId("txt2img") requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function(){ showSubmitButtons('txt2img', true) @@ -180,7 +180,7 @@ function submit_img2img(){ rememberGallerySelection('img2img_gallery') showSubmitButtons('img2img', false) - var id = randomId() + var id = randomId("img2img") requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function(){ showSubmitButtons('img2img', true) }) @@ -361,3 +361,35 @@ function selectCheckpoint(name){ desiredCheckpointName = name; gradioApp().getElementById('change_checkpoint').click() } + +function restoreProgress (task_tag) { + + if (task_tag) { + let successHandler = ({ current_task }) => { + if (current_task) { + let _task_tag = ["txt2img", "img2img"].find(t => current_task.startsWith(`task(${t}_`) && current_task.endsWith(")")) + if (!_task_tag) { + console.warn(`task tag ${current_task} not implemented yet`) + return + } + if (task_tag != _task_tag) return + showSubmitButtons(task_tag, false) + requestProgress(current_task, gradioApp().getElementById(`${task_tag}_gallery_container`), gradioApp().getElementById(`${task_tag}_gallery`), function(){ + showSubmitButtons(task_tag, true) + }) + } + } + + let errorHandler = e => window.alert(`invalid internal api respsonse. message: ${e}`) + + fetch("./internal/current_task") + .then(res => res.json()) + .then(successHandler) + .catch(errorHandler) + } + + var res = create_submit_args(arguments) + res[0] = 0 + return res + +} \ No newline at end of file diff --git a/modules/progress.py b/modules/progress.py index 27a336ad7..36963c92e 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -48,6 +48,20 @@ def set_last_task_result(id_job, result): last_task_result = result +def restore_progress_call(task_tag): + if current_task is None or not current_task[5:-1].startswith(task_tag): + + # image, generation_info, html_info, html_log + return tuple(list([None, None, None, None])) + + else: + + t_task = current_task + while t_task != last_task_id: + time.sleep(2.5) + return last_task_result + + class CurrentTaskResponse(BaseModel): current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b5..0133ee12b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,6 +41,7 @@ from modules.textual_inversion import textual_inversion import modules.hypernetworks.ui from modules.generation_parameters_copypaste import image_from_url_text import modules.extras +from modules.progress import restore_progress_call warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) @@ -293,6 +294,7 @@ def create_toprow(is_img2img): interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + restore_progress = gr.Button('Restore Progress', elem_id=f"{id_part}_restore_progress") skip.click( fn=lambda: shared.state.skip(), @@ -329,7 +331,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button + return prompt, prompt_styles, negative_prompt, submit, restore_progress, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button def setup_progressbar(*args, **kwargs): @@ -446,7 +448,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, restore_progress, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -578,6 +580,18 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress.click( + fn=lambda: restore_progress_call('txt2img'), + _js="() => restoreProgress('txt2img')", + inputs=[], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ] + ) + txt_prompt_img.change( fn=modules.images.image_data, inputs=[ @@ -646,7 +660,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, restore_progress, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -898,6 +912,18 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress.click( + fn=lambda: restore_progress_call('img2img'), + _js="() => restoreProgress('img2img')", + inputs=[], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ] + ) + img2img_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, @@ -1491,7 +1517,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() From e0b58527ff040f9c547ea45b5fcf1bfb7ab23cdd Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 15:57:26 +0800 Subject: [PATCH 044/160] use condition to wait for result --- modules/call_queue.py | 2 +- modules/progress.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/call_queue.py b/modules/call_queue.py index 30ac26bc6..9888109ec 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -7,7 +7,7 @@ import time from modules import shared, progress queue_lock = threading.Lock() - +queue_lock_condition = threading.Condition(lock=queue_lock) def wrap_queued_call(func): def f(*args, **kwargs): diff --git a/modules/progress.py b/modules/progress.py index 36963c92e..1947c0fdd 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -6,6 +6,7 @@ import gradio as gr from pydantic import BaseModel, Field from typing import List +from modules import call_queue from modules.shared import opts import modules.shared as shared @@ -57,8 +58,9 @@ def restore_progress_call(task_tag): else: t_task = current_task - while t_task != last_task_id: - time.sleep(2.5) + with call_queue.queue_lock_condition: + call_queue.queue_lock_condition.wait_for(lambda: t_task == last_task_id) + return last_task_result From 90366b8d8564c6fcbf5899fb31e426b68b04eb7b Mon Sep 17 00:00:00 2001 From: siutin Date: Wed, 29 Mar 2023 00:13:15 +0800 Subject: [PATCH 045/160] tool button --- javascript/hints.js | 2 +- modules/ui.py | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index f48a0eb69..7c6083115 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -22,7 +22,7 @@ titles = { "\u{1f4cb}": "Apply selected styles to current prompt", "\u{1f4d2}": "Paste available values into the field", "\u{1f3b4}": "Show/hide extra networks", - + "\u{1F300}": "Restore progress", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", diff --git a/modules/ui.py b/modules/ui.py index 0133ee12b..8fc17ce70 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -82,6 +82,7 @@ apply_style_symbol = '\U0001f4cb' # 📋 clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 def plaintext_to_html(text): @@ -294,7 +295,6 @@ def create_toprow(is_img2img): interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - restore_progress = gr.Button('Restore Progress', elem_id=f"{id_part}_restore_progress") skip.click( fn=lambda: shared.state.skip(), @@ -314,6 +314,7 @@ def create_toprow(is_img2img): extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress") token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") @@ -331,7 +332,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, restore_progress, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button def setup_progressbar(*args, **kwargs): @@ -448,7 +449,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, restore_progress, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -580,8 +581,8 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress.click( - fn=lambda: restore_progress_call('txt2img'), + restore_progress_button.click( + fn=lambda: restore_progress_call(), _js="() => restoreProgress('txt2img')", inputs=[], outputs=[ @@ -660,7 +661,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, restore_progress, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -912,8 +913,8 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress.click( - fn=lambda: restore_progress_call('img2img'), + restore_progress_button.click( + fn=lambda: restore_progress_call(), _js="() => restoreProgress('img2img')", inputs=[], outputs=[ From 70ab21e67d128b953fbf4a360e02ac783f40dd55 Mon Sep 17 00:00:00 2001 From: siutin Date: Wed, 29 Mar 2023 00:17:19 +0800 Subject: [PATCH 046/160] keep randomId simpler --- javascript/progressbar.js | 4 ++-- javascript/ui.js | 10 ++-------- modules/progress.py | 4 ++-- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 7ba14192f..4ac9b8db1 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -59,8 +59,8 @@ function setTitle(progress){ } -function randomId(prefix=null){ - return "task(" + (prefix == null ? "" : prefix + "_") + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")" +function randomId(){ + return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")" } // starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and diff --git a/javascript/ui.js b/javascript/ui.js index 9fe884c0e..c9df066de 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -163,7 +163,7 @@ function submit(){ rememberGallerySelection('txt2img_gallery') showSubmitButtons('txt2img', false) - var id = randomId("txt2img") + var id = randomId() requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function(){ showSubmitButtons('txt2img', true) @@ -180,7 +180,7 @@ function submit_img2img(){ rememberGallerySelection('img2img_gallery') showSubmitButtons('img2img', false) - var id = randomId("img2img") + var id = randomId() requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function(){ showSubmitButtons('img2img', true) }) @@ -367,12 +367,6 @@ function restoreProgress (task_tag) { if (task_tag) { let successHandler = ({ current_task }) => { if (current_task) { - let _task_tag = ["txt2img", "img2img"].find(t => current_task.startsWith(`task(${t}_`) && current_task.endsWith(")")) - if (!_task_tag) { - console.warn(`task tag ${current_task} not implemented yet`) - return - } - if (task_tag != _task_tag) return showSubmitButtons(task_tag, false) requestProgress(current_task, gradioApp().getElementById(`${task_tag}_gallery_container`), gradioApp().getElementById(`${task_tag}_gallery`), function(){ showSubmitButtons(task_tag, true) diff --git a/modules/progress.py b/modules/progress.py index 1947c0fdd..e99267f56 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -49,8 +49,8 @@ def set_last_task_result(id_job, result): last_task_result = result -def restore_progress_call(task_tag): - if current_task is None or not current_task[5:-1].startswith(task_tag): +def restore_progress_call(): + if current_task is None: # image, generation_info, html_info, html_log return tuple(list([None, None, None, None])) From d5063e07e8b4737621978feffd37b18077b9ea64 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 30 Mar 2023 10:57:54 -0400 Subject: [PATCH 047/160] update torch --- .gitignore | 2 +- environment-wsl2.yaml | 11 ++++++----- launch.py | 6 +++--- requirements.txt | 1 + requirements_versions.txt | 4 ++-- webui-macos-env.sh | 2 +- webui.py | 2 ++ 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 0b1d17ca3..3b48ba9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,4 @@ notification.mp3 /extensions /test/stdout.txt /test/stderr.txt -/cache.json +/cache.json* diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index f88727507..061345650 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -4,8 +4,9 @@ channels: - defaults dependencies: - python=3.10 - - pip=22.2.2 - - cudatoolkit=11.3 - - pytorch=1.12.1 - - torchvision=0.13.1 - - numpy=1.23.1 \ No newline at end of file + - pip=23.0 + - cudatoolkit=11.8 + - pytorch=2.0 + - torchvision=0.15 + - numpy=1.23 + \ No newline at end of file diff --git a/launch.py b/launch.py index 68e08114d..37c8b5164 100644 --- a/launch.py +++ b/launch.py @@ -225,10 +225,10 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") @@ -296,7 +296,7 @@ def prepare_environment(): if not os.path.isfile(requirements_file): requirements_file = os.path.join(script_path, requirements_file) - run_pip(f"install -r \"{requirements_file}\"", "requirements for Web UI") + run_pip(f"install -r \"{requirements_file}\"", "requirements") run_extensions_installers(settings_file=args.ui_settings_file) diff --git a/requirements.txt b/requirements.txt index c72b2927e..36cdae6c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +astunparse blendmodes accelerate basicsr diff --git a/requirements_versions.txt b/requirements_versions.txt index df65431a3..6487f1c30 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -1,10 +1,10 @@ blendmodes==2022 transformers==4.25.1 -accelerate==0.12.0 +accelerate==0.18.0 basicsr==1.4.2 gfpgan==1.3.8 gradio==3.23 -numpy==1.23.3 +numpy==1.23.5 Pillow==9.4.0 realesrgan==0.3.0 torch diff --git a/webui-macos-env.sh b/webui-macos-env.sh index 37cac4fb0..65d804134 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -11,7 +11,7 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" +export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118" export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 diff --git a/webui.py b/webui.py index b570895fb..54552cdd6 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,8 @@ startup_timer = timer.Timer() import torch import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") +warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") + startup_timer.record("import torch") import gradio From a73f3bf0cfc89cde294b42f5c566017daf4b2ccd Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:19:40 -0600 Subject: [PATCH 048/160] Change extras "scale to" to sliders --- scripts/postprocessing_upscale.py | 14 ++++++++++---- style.css | 4 ++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 11eab31a5..bc43719bd 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -4,8 +4,9 @@ import numpy as np from modules import scripts_postprocessing, shared import gradio as gr -from modules.ui_components import FormRow +from modules.ui_components import FormRow, ToolButton +switch_values_symbol = '\U000021C5' # ⇅ upscale_cache = {} @@ -25,9 +26,12 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") + with gr.Column(elem_id="upscaling_column_size", scale=4): + upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w") + upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_w") + with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"): + upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn") + upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with FormRow(): extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) @@ -36,6 +40,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") + upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) @@ -45,6 +50,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, + "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, diff --git a/style.css b/style.css index de16a7f2f..aafc23627 100644 --- a/style.css +++ b/style.css @@ -312,6 +312,10 @@ div.dimensions-tools{ align-content: center; } +div#extras_scale_to_tab div.form{ + flex-direction: row; +} + #mode_img2img .gradio-image > div.fixed-height, #mode_img2img .gradio-image > div.fixed-height img{ height: 480px !important; max-height: 480px !important; From 69ad46b047678a7a97a152a20e702bac61e37b8b Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:25:39 -0600 Subject: [PATCH 049/160] Import switch_values_symbol --- scripts/postprocessing_upscale.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bc43719bd..bf27b64d0 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -5,8 +5,7 @@ from modules import scripts_postprocessing, shared import gradio as gr from modules.ui_components import FormRow, ToolButton - -switch_values_symbol = '\U000021C5' # ⇅ +from modules.ui import switch_values_symbol upscale_cache = {} From 3ebdd2afd3769046289880d44bbe1322a832073f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 31 Mar 2023 00:56:38 -0600 Subject: [PATCH 050/160] Don't return upscaling_res_switch_btn --- scripts/postprocessing_upscale.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bf27b64d0..e60208ac3 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -49,7 +49,6 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, - "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, From 18e4ca46944201ccd0b506201d5da441eba93080 Mon Sep 17 00:00:00 2001 From: Z_nonymous <0x29a@free.fr> Date: Fri, 31 Mar 2023 10:54:42 +0200 Subject: [PATCH 051/160] Fix #9185 --- modules/img2img.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2d..5ce408f48 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -4,7 +4,7 @@ import sys import traceback import numpy as np -from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops +from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError from modules import devices, sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict @@ -46,7 +46,10 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): if state.interrupted: break - img = Image.open(image) + try: + img = Image.open(image) + except UnidentifiedImageError: + continue # Use the EXIF orientation of photos taken by smartphones. img = ImageOps.exif_transpose(img) p.init_images = [img] * p.batch_size From c938b172a49433291e246b04f9835f3383bad0c8 Mon Sep 17 00:00:00 2001 From: bbonvi <6573230@gmail.com> Date: Fri, 31 Mar 2023 19:29:34 +0600 Subject: [PATCH 052/160] fix missing live preview and progress during certain tasks Sometimes tasks take longer than 5 seconds to start, resulting in missing progress bar and livepreviews, so we have to keep pulling for progress a bit longer (5s -> 20s). --- javascript/progressbar.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 4ac9b8db1..eb44aab93 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -138,7 +138,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre return } - if(elapsedFromStart > 5 && !res.queued && !res.active){ + if(elapsedFromStart > 20 && !res.queued && !res.active){ removeProgressBar() return } From 9dc722bcf27d6d2cd9f275df4f2aae23f58c1122 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Sat, 1 Apr 2023 10:39:50 +0100 Subject: [PATCH 053/160] bug: outpaint-mk2 use sample file format not grid --- scripts/outpainting_mk_2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 0906da6ae..670bb8ace 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -275,7 +275,7 @@ class Script(scripts.Script): if opts.samples_save: for img in all_processed_images: - images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) + images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.samples_format, info=res.info, p=p) if opts.grid_save and not unwanted_grid_because_of_img_count: images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) From 80b847e72dacabbf334d37bb4e6cc65a9a223ce3 Mon Sep 17 00:00:00 2001 From: wywywywy Date: Sat, 1 Apr 2023 10:47:49 +0100 Subject: [PATCH 054/160] bug: poorman use sample file format not grid --- scripts/poor_mans_outpainting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index d8feda00a..ddcbd2d3a 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -138,7 +138,7 @@ class Script(scripts.Script): combined_image = images.combine_grid(grid) if opts.samples_save: - images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.grid_format, info=initial_info, p=p) + images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.samples_format, info=initial_info, p=p) processed = Processed(p, [combined_image], initial_seed, initial_info) From d132481058f8a827cd407f2121f128a2bb862f7a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 17:41:55 -0500 Subject: [PATCH 055/160] Embed model merge metadata in .safetensors file --- modules/extras.py | 44 ++++++++++++++++++++++++++++++++++++++++++-- modules/sd_models.py | 11 ++++++++++- modules/ui.py | 4 +++- 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index d8ece9557..77d885923 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -1,6 +1,7 @@ import os import re import shutil +import json import torch @@ -71,7 +72,7 @@ def to_half(tensor, enable): return tensor -def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights): +def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): shared.state.begin() shared.state.job = 'model-merge' @@ -241,13 +242,52 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") + metadata = {"format": "pt", "models": {}, "merge_recipe": None} + + if save_metadata: + merge_recipe = { + "primary_model_hash": primary_model_info.sha256, + "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, + "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, + "interp_method": interp_method, + "multiplier": multiplier, + "save_as_half": save_as_half, + "custom_name": custom_name, + "config_source": config_source, + "bake_in_vae": bake_in_vae, + "discard_weights": discard_weights, + "is_inpainting": result_is_inpainting_model, + "is_instruct_pix2pix": result_is_instruct_pix2pix_model + } + metadata["merge_recipe"] = json.dumps(merge_recipe) + + def add_model_metadata(checkpoint_info): + metadata["models"][checkpoint_info.sha256] = { + "name": checkpoint_info.name, + "legacy_hash": checkpoint_info.hash, + "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + } + + metadata["models"].update(checkpoint_info.metadata.get("models", {})) + + add_model_metadata(primary_model_info) + if secondary_model_info: + add_model_metadata(secondary_model_info) + if tertiary_model_info: + add_model_metadata(tertiary_model_info) + + metadata["models"] = json.dumps(metadata["models"]) + _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata={"format": "pt"}) + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) else: torch.save(theta_0, output_modelname) sd_models.list_models() + created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) + if created_model: + created_model.calculate_shorthash() create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6ea874dfc..4f7613a14 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -52,6 +52,15 @@ class CheckpointInfo: self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.metadata = {} + + _, ext = os.path.splitext(self.filename) + if ext.lower() == ".safetensors": + try: + self.metadata = read_metadata_from_safetensors(filename) + except Exception as e: + errors.display(e, f"reading checkpoint metadata: {filename}") + def register(self): checkpoints_list[self.title] = self for id in self.ids: @@ -544,4 +553,4 @@ def unload_model_weights(sd_model=None, info=None): print(f"Unloaded weights {timer.summary()}.") - return sd_model \ No newline at end of file + return sd_model diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b5..64fb93c33 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1019,8 +1019,9 @@ def create_ui(): interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): with gr.Column(): @@ -1658,6 +1659,7 @@ def create_ui(): config_source, bake_in_vae, discard_weights, + save_metadata, ], outputs=[ primary_model_name, From afc349c2c0d7c7543e8cc085cde2beef8549fffc Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 18:40:33 -0500 Subject: [PATCH 056/160] Add field for model merge type Incase this is supported by other merge extensions --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/extras.py b/modules/extras.py index 77d885923..9a00c9a3e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -246,6 +246,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if save_metadata: merge_recipe = { + "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, From 7c016dd642cc29e064715ac04ab3d83c5451b45e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 19:06:39 -0500 Subject: [PATCH 057/160] Calculate shorthash on merge if not exist --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/extras.py b/modules/extras.py index 9a00c9a3e..97d14e5aa 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -263,6 +263,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ metadata["merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): + checkpoint_info.calculate_shorthash() metadata["models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, From 9a4e650800adc444c07a48572a958a71c2144c15 Mon Sep 17 00:00:00 2001 From: Pluventi Date: Mon, 3 Apr 2023 03:32:48 +0200 Subject: [PATCH 058/160] Update postprocessing.py Solution for anyone getting an error when batching on extras, even with a clean install of "stable diffusion webui" --- modules/postprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e6056..63b9caf82 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -18,7 +18,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(img.name) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: From fbaf6e4fd897fa1f3e3f747f1d699c240cad76a0 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:41:23 -0500 Subject: [PATCH 059/160] Namespace metadata fields --- modules/extras.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 97d14e5aa..ff4e9c4e5 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -242,7 +242,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") - metadata = {"format": "pt", "models": {}, "merge_recipe": None} + metadata = {"format": "pt", "sd_merge_models": {}, "sd_merge_recipe": None} if save_metadata: merge_recipe = { @@ -260,17 +260,17 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ "is_inpainting": result_is_inpainting_model, "is_instruct_pix2pix": result_is_instruct_pix2pix_model } - metadata["merge_recipe"] = json.dumps(merge_recipe) + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): checkpoint_info.calculate_shorthash() - metadata["models"][checkpoint_info.sha256] = { + metadata["sd_merge_models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, - "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) } - metadata["models"].update(checkpoint_info.metadata.get("models", {})) + metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) add_model_metadata(primary_model_info) if secondary_model_info: @@ -278,7 +278,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if tertiary_model_info: add_model_metadata(tertiary_model_info) - metadata["models"] = json.dumps(metadata["models"]) + metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": From d9fdb5214922b10a16c66207376953460972c6e8 Mon Sep 17 00:00:00 2001 From: GeorgLegato Date: Mon, 3 Apr 2023 04:53:29 +0200 Subject: [PATCH 060/160] Update script.js updated how tabs are presented in DOM with Gradio 3.23. --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 1b9a443f9..03afe8445 100644 --- a/script.js +++ b/script.js @@ -7,7 +7,7 @@ function gradioApp() { } function get_uiCurrentTab() { - return gradioApp().querySelector('#tabs button:not(.border-transparent)') + return gradioApp().querySelector('#tabs button.selected') } function get_uiCurrentTabContent() { From aef42bfec09a9ca93d1222b7b47256f37e192a32 Mon Sep 17 00:00:00 2001 From: keith <1868690+wk5ovc@users.noreply.github.com> Date: Mon, 3 Apr 2023 17:05:49 +0800 Subject: [PATCH 061/160] Fix #9046 /sdapi/v1/txt2img endpoint not working **Describe what this pull request is trying to achieve.** Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/9046 **Environment this was tested in** * OS: Linux * Browser: chrome * Graphics card: RTX 3090 --- webui.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/webui.py b/webui.py index b570895fb..8927aa330 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,7 @@ import importlib import signal import re import warnings +import asyncio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -66,6 +67,46 @@ if cmd_opts.server_name: else: server_name = "0.0.0.0" if cmd_opts.listen else None +if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): + # "Any thread" and "selector" should be orthogonal, but there's not a clean + # interface for composing policies so pick the right base. + _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore +else: + _BasePolicy = asyncio.DefaultEventLoopPolicy + + +class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore + """Event loop policy that allows loop creation on any thread. + + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + .. versionadded:: 5.0 + + """ + + def get_event_loop(self) -> asyncio.AbstractEventLoop: + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop + + +asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + def check_versions(): if shared.cmd_opts.skip_version_check: From d537a1f1b62de7795a77feb71b4c990389c30087 Mon Sep 17 00:00:00 2001 From: Micky Brunetti Date: Tue, 4 Apr 2023 00:14:20 +0900 Subject: [PATCH 062/160] Fix skip-install bug (see #8935) --- launch.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/launch.py b/launch.py index 68e08114d..b8f650fc8 100644 --- a/launch.py +++ b/launch.py @@ -19,7 +19,6 @@ python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") stored_commit_hash = None -skip_install = False dir_repos = "repositories" if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: @@ -122,7 +121,7 @@ def run_python(code, desc=None, errdesc=None): def run_pip(args, desc=None): - if skip_install: + if "--skip-install" in sys.argv: return index_url_line = f' --index-url {index_url}' if index_url != '' else '' @@ -223,8 +222,6 @@ def run_extensions_installers(settings_file): def prepare_environment(): - global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") From f7215906af573f6e60b006cd430e82691ba4f8d6 Mon Sep 17 00:00:00 2001 From: gmasil <54176035+gmasil@users.noreply.github.com> Date: Mon, 3 Apr 2023 18:19:57 +0200 Subject: [PATCH 063/160] allow styles.csv to be symlinked or mounted in docker without moving the file around --- modules/styles.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 990d56236..9ed859911 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -72,16 +72,14 @@ class StyleDatabase: return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") + # Always keep a backup file around + if os.path.exists(path): + shutil.copy(path, path + ".bak") + + fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) From 54fd00ff8f6fc1396ce0396772962b45609e7a9c Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 3 Apr 2023 13:28:20 -0400 Subject: [PATCH 064/160] fixed logic for updating the displayed generation params when the image modal is closed --- javascript/generationParams.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f050939..1266a266c 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,9 +16,9 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText - if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') - gradioApp().getElementById(selectedTab+"_generation_info_button").click() + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) + gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() }); }); From 4fa59b045add1d23350e884e201dc77bc34864e6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 3 Apr 2023 15:23:35 -0400 Subject: [PATCH 065/160] update xformers --- environment-wsl2.yaml | 1 - launch.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index 061345650..0c4ae6809 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -9,4 +9,3 @@ dependencies: - pytorch=2.0 - torchvision=0.15 - numpy=1.23 - \ No newline at end of file diff --git a/launch.py b/launch.py index 37c8b5164..c5ae90926 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From 5ebe3b25044efac959c4d8f208dd17d6f1e5ce8a Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:50:29 +0000 Subject: [PATCH 066/160] Added guard clause to prevent multiple tunnel creations --- modules/ngrok.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/ngrok.py b/modules/ngrok.py index 3df2c06bf..90268bf1c 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,6 +1,14 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): + # Guard for existing tunnels + existing = ngrok.get_tunnels() + if existing: + public_url = existing[0].public_url + print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + account = None if token is None: token = 'None' From 2edf73b38fbea4ce67c38edafea8a8983b9435b5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:57:39 +0000 Subject: [PATCH 067/160] Improved message clarity --- modules/ngrok.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ngrok.py b/modules/ngrok.py index 90268bf1c..5302b6a84 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -5,7 +5,7 @@ def connect(token, port, region): existing = ngrok.get_tunnels() if existing: public_url = existing[0].public_url - print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return From 3158d17ccf6adfe974691f46526773df2f4028f5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:41:55 +0000 Subject: [PATCH 068/160] fixed an issue with using ngrok for other connections and also ngrok not using auth_token --- modules/ngrok.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/modules/ngrok.py b/modules/ngrok.py index 5302b6a84..1ad7989bb 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,14 +1,6 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): - # Guard for existing tunnels - existing = ngrok.get_tunnels() - if existing: - public_url = existing[0].public_url - print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' - 'You can use this link after the launch is complete.') - return - account = None if token is None: token = 'None' @@ -21,6 +13,18 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) + + # Guard for existing tunnels + existing = ngrok.get_tunnels(pyngrok_config=config) + if existing: + for established in existing: + # Extra configuration in the case that the user is also using ngrok for other tunnels + if established.config['addr'][-4:] == str(port): + public_url = existing[0].public_url + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url From 539a69860bafe1c922029722a50462f3ef0db5e4 Mon Sep 17 00:00:00 2001 From: hitomi Date: Sat, 25 Mar 2023 13:58:53 -0700 Subject: [PATCH 069/160] fix `--realesrgan-models-path` not working --- modules/realesrgan_model.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index aad4a6298..d60794330 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -9,7 +9,7 @@ from realesrgan import RealESRGANer from modules.upscaler import Upscaler, UpscalerData from modules.shared import cmd_opts, opts - +from modules import modelloader class UpscalerRealESRGAN(Upscaler): def __init__(self, path): @@ -23,7 +23,15 @@ class UpscalerRealESRGAN(Upscaler): self.enable = True self.scalers = [] scalers = self.load_models(path) + + local_model_paths = self.find_models(ext_filter=[".pth"]) for scaler in scalers: + if scaler.local_data_path.startswith("http"): + filename = modelloader.friendly_name(scaler.local_data_path) + local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None) + if local: + scaler.local_data_path = local + if scaler.name in opts.realesrgan_enabled_models: self.scalers.append(scaler) @@ -64,7 +72,9 @@ class UpscalerRealESRGAN(Upscaler): print(f"Unable to find model info: {path}") return None - info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + if info.local_data_path.startswith("http"): + info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + return info except Exception as e: print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr) From 2ba42bfbd2682c647df07e3baf0d013fbf516329 Mon Sep 17 00:00:00 2001 From: hitomi Date: Sat, 25 Mar 2023 14:02:29 -0700 Subject: [PATCH 070/160] fix `--ldsr-models-path` not working --- extensions-builtin/LDSR/scripts/ldsr_model.py | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index b8cff29b9..da19cff12 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -25,22 +25,28 @@ class UpscalerLDSR(Upscaler): yaml_path = os.path.join(self.model_path, "project.yaml") old_model_path = os.path.join(self.model_path, "model.pth") new_model_path = os.path.join(self.model_path, "model.ckpt") - safetensors_model_path = os.path.join(self.model_path, "model.safetensors") + + local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) + local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) + local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) + local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) + if os.path.exists(yaml_path): statinfo = os.stat(yaml_path) if statinfo.st_size >= 10485760: print("Removing invalid LDSR YAML file.") os.remove(yaml_path) + if os.path.exists(old_model_path): print("Renaming model from model.pth to model.ckpt") os.rename(old_model_path, new_model_path) - if os.path.exists(safetensors_model_path): - model = safetensors_model_path + + if local_safetensors_path is not None and os.path.exists(local_safetensors_path): + model = local_safetensors_path else: - model = load_file_from_url(url=self.model_url, model_dir=self.model_path, - file_name="model.ckpt", progress=True) - yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path, - file_name="project.yaml", progress=True) + model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="model.ckpt", progress=True) + + yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_path, file_name="project.yaml", progress=True) try: return LDSR(model, yaml) From 80752f43b22acd85bf6ab54b2e4788f144a0c813 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 4 Apr 2023 17:27:27 -0400 Subject: [PATCH 071/160] revert xformers --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index c5ae90926..37c8b5164 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From c01dc1cb30f7cd87e1df6458580da99c702ee513 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 19:22:51 +0800 Subject: [PATCH 072/160] add dropdown for X/Y/Z plot --- scripts/xyz_grid.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..774fa2c76 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -374,16 +374,19 @@ class Script(scripts.Script): with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) + x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) with gr.Row(): y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) + y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) with gr.Row(): z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) + z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True) fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) with gr.Row(variant="compact", elem_id="axis_options"): @@ -413,18 +416,20 @@ class Script(scripts.Script): def fill(x_type): axis = self.current_axis_options[x_type] - return ", ".join(axis.choices()) if axis.choices else gr.update() + return axis.choices() if axis.choices else gr.update() - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values]) + fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown]) + fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) + fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) def select_axis(x_type): - return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) + choices = self.current_axis_options[x_type].choices + has_choices = choices is not None + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button]) + x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) self.infotext_fields = ( (x_type, "X Type"), @@ -435,20 +440,23 @@ class Script(scripts.Script): (z_values, "Z Values"), ) - return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] + return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] - def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): + def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): if not no_fixed_seeds: modules.processing.fix_seed(p) if not opts.return_grid: p.batch_size = 1 - def process_axis(opt, vals): + def process_axis(opt, vals, vals_dropdown): if opt.label == 'Nothing': return [0] - valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + if opt.choices is not None: + valslist = vals_dropdown + else: + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] if opt.type == int: valslist_ext = [] @@ -506,13 +514,13 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] - xs = process_axis(x_opt, x_values) + xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] - ys = process_axis(y_opt, y_values) + ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] - zs = process_axis(z_opt, z_values) + zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes From 3ac5f9c471e4cfb5b664f9f0a7f7e7b171b1cee1 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 21:43:27 +0800 Subject: [PATCH 073/160] fix axis swap and infotxt --- scripts/xyz_grid.py | 43 ++++++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 774fa2c76..52ae1c6e1 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -404,14 +404,14 @@ class Script(scripts.Script): swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button") swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button") - def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values): - return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values + def swap_axes(axis1_type, axis1_values, axis1_values_dropdown, axis2_type, axis2_values, axis2_values_dropdown): + return self.current_axis_options[axis2_type].label, axis2_values, axis2_values_dropdown, self.current_axis_options[axis1_type].label, axis1_values, axis1_values_dropdown - xy_swap_args = [x_type, x_values, y_type, y_values] + xy_swap_args = [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown] swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args) - yz_swap_args = [y_type, y_values, z_type, z_values] + yz_swap_args = [y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown] swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args) - xz_swap_args = [x_type, x_values, z_type, z_values] + xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) def fill(x_type): @@ -422,22 +422,37 @@ class Script(scripts.Script): fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) - def select_axis(x_type): - choices = self.current_axis_options[x_type].choices + def select_axis(axis_type,axis_values_dropdown): + choices = self.current_axis_options[axis_type].choices has_choices = choices is not None - return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) + current_values = axis_values_dropdown + if has_choices: + choices = choices() + if isinstance(current_values,str): + current_values = current_values.split(",") + current_values = list(filter(lambda x: x in choices, current_values)) + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown]) + + def get_dropdown_update_from_params(axis,params): + val_key = axis + " Values" + vals = params.get(val_key,"") + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + return gr.update(value = valslist) self.infotext_fields = ( (x_type, "X Type"), (x_values, "X Values"), + (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)), (y_type, "Y Type"), (y_values, "Y Values"), + (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)), (z_type, "Z Type"), (z_values, "Z Values"), + (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)), ) return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] @@ -514,12 +529,18 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] + if x_opt.choices is not None: + x_values = ",".join(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] + if y_opt.choices is not None: + y_values = ",".join(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] + if z_opt.choices is not None: + z_values = ",".join(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else From 52a8f286ef99bb5004bea2b099a7bcbb073b638f Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:28:00 -0300 Subject: [PATCH 074/160] fix preprocess orientation --- modules/textual_inversion/preprocess.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 2239cb842..9cb98694e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -161,7 +161,25 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.subindex = 0 filename = os.path.join(src, imagefile) try: - img = Image.open(filename).convert("RGB") + img = Image.open(filename) + # make sure to rotate the image according to EXIF data of the original image + # ImageOps.exif_transpose(img) # doesn't work for some reason + EXIF = img._getexif() + # rotate the image according to the EXIF data + try: + if EXIF[274] == 3: + # print("Rotating image by 180 degrees") + img = img.rotate(180, expand=True) + elif EXIF[274] == 6: + # print("Rotating image by 270 degrees") + img = img.rotate(270, expand=True) + elif EXIF[274] == 8: + # print("Rotating image by 90 degrees") + img = img.rotate(90, expand=True) + except: + pass + # print("No EXIF data found for image: " + filename) + img = img.convert("RGB") except Exception: continue From 3a5b47e26e8cb1cd640fedfe7cb5263a588d7088 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Thu, 6 Apr 2023 01:36:27 +0200 Subject: [PATCH 075/160] Forcing PyTorch version for AMD GPUs automatic install The old code tries to install the newest versions of pytorch, wich is currently 2.0. Forcing it to 1.13.1 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 8cdad22d3..7d9a8538a 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From 48c06af8dc718abf0bf9355ea5548c6a66e0b1e6 Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:51:29 -0300 Subject: [PATCH 076/160] Pythonic way to achieve it --- modules/textual_inversion/preprocess.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 9cb98694e..de1ddb59b 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -162,23 +162,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre filename = os.path.join(src, imagefile) try: img = Image.open(filename) - # make sure to rotate the image according to EXIF data of the original image - # ImageOps.exif_transpose(img) # doesn't work for some reason - EXIF = img._getexif() - # rotate the image according to the EXIF data - try: - if EXIF[274] == 3: - # print("Rotating image by 180 degrees") - img = img.rotate(180, expand=True) - elif EXIF[274] == 6: - # print("Rotating image by 270 degrees") - img = img.rotate(270, expand=True) - elif EXIF[274] == 8: - # print("Rotating image by 90 degrees") - img = img.rotate(90, expand=True) - except: - pass - # print("No EXIF data found for image: " + filename) + img = ImageOps.exif_transpose(img) img = img.convert("RGB") except Exception: continue From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: [PATCH 077/160] Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..5556afc5e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecbd..69c2b21e6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { From 63a6f9b4d98a192bb359910cb284cf00582baabf Mon Sep 17 00:00:00 2001 From: forsurefr <67145502+forsurefr@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:13:51 +0300 Subject: [PATCH 078/160] Do not save init image by default --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 69c2b21e6..c5a1b5ad2 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -254,7 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), - "save_init_img": OptionInfo(True, "Save init images when using img2img"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), From d609f6030ec464b371a899ced366c62bbd9a4a91 Mon Sep 17 00:00:00 2001 From: gk Date: Fri, 7 Apr 2023 21:04:46 +0900 Subject: [PATCH 079/160] Add [batch_number] and [generation_number] filename patterns --- modules/images.py | 6 ++++++ modules/processing.py | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index b3535070b..5c0cf1d8d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,6 +352,8 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'batch_number': lambda self: self.p.batch_index + 1, + 'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1, } default_time_format = '%Y%m%d%H%M%S' @@ -403,6 +405,10 @@ class FilenameGenerator: for m in re_pattern.finditer(x): text, pattern = m.groups() + + if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1): + continue + res += text if pattern is None: diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..0e6a60ba6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -670,6 +670,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) for i, x_sample in enumerate(x_samples_ddim): + p.batch_index = i + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) @@ -718,7 +720,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.return_mask: output_images.append(image_mask) - + if opts.return_mask_composite: output_images.append(image_mask_composite) From 27b9ec60e4ede748ec23615fecddb70e48daa623 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Sat, 8 Apr 2023 15:58:00 -0400 Subject: [PATCH 080/160] sort embeddings by name (case insensitive) --- modules/textual_inversion/textual_inversion.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d2e62e589..7c50839f2 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple +from collections import namedtuple, OrderedDict import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = {} + self.word_embeddings = OrderedDict() self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -233,6 +233,9 @@ class EmbeddingDatabase: self.load_from_dir(embdir) embdir.update() + # re-sort word_embeddings because load_from_dir may not load in alphabetic order. + self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: self.previously_displayed_embeddings = displayed_embeddings From 1aba8d82cbb816a755d012c5c729d8bafeb1b8ed Mon Sep 17 00:00:00 2001 From: yike5460 Date: Sun, 9 Apr 2023 22:22:43 +0800 Subject: [PATCH 081/160] feat: add branch support for extension installation --- modules/ui_extensions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda28..d9487f834 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -129,7 +129,7 @@ def normalize_git_url(url): return url -def install_extension_from_url(dirname, url): +def install_extension_from_url(dirname, branch_name, url): check_access() assert url, 'No URL specified' @@ -150,7 +150,7 @@ def install_extension_from_url(dirname, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir) as repo: + with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: repo.remote().fetch() for submodule in repo.submodules: submodule.update() @@ -376,13 +376,14 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") + install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") install_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_url], + inputs=[install_dirname, install_branch, install_url], outputs=[extensions_table, install_result], ) From c19618f37059b425b1e53429ad8def2caa78cdec Mon Sep 17 00:00:00 2001 From: Ilya Khadykin Date: Sun, 9 Apr 2023 21:33:09 +0200 Subject: [PATCH 082/160] fix(extras): fix batch image processing on 'Extras\Batch Process' tab This change fixes an issue where an incorrect type was passed to the PIL.Image.open() function that caused the whole process to fail. Scope of this change is limited to only batch image processing, and it shouldn't affect other functionality. --- modules/postprocessing.py | 6 ++++-- modules/ui_postprocessing.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e6056..c27ad8db3 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -1,4 +1,6 @@ import os +import tempfile +from typing import List from PIL import Image @@ -6,7 +8,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder: List[tempfile.NamedTemporaryFile], input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin() @@ -18,7 +20,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(os.path.abspath(img.name)) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d9553..d278e1b60 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -13,7 +13,7 @@ def create_ui(): extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") + image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") From c84118d70d7c3dd2f741f9e89b9a8a4643f27f98 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 4 Apr 2023 23:42:39 +0800 Subject: [PATCH 083/160] feat(xyz): try sort Checkpoint name values --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..8aaee2441 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -211,7 +211,7 @@ axis_options = [ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From 7c62bb2788d9cec10bab9d0154bd24f3658f7a83 Mon Sep 17 00:00:00 2001 From: yike5460 Date: Mon, 10 Apr 2023 09:38:26 +0800 Subject: [PATCH 084/160] fix: support for default branch --- modules/ui_extensions.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9487f834..b402bc8bf 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -150,10 +150,17 @@ def install_extension_from_url(dirname, branch_name, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: - repo.remote().fetch() - for submodule in repo.submodules: - submodule.update() + if branch_name == '': + # if no branch is specified, use the default branch + with git.Repo.clone_from(url, tmpdir) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() + else: + with git.Repo.clone_from(url, tmpdir, branch=branch_name) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() try: os.rename(tmpdir, target_dir) except OSError as err: @@ -376,7 +383,7 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") - install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") + install_branch = gr.Text(label="Specific branch name", placeholder="Leave empty for default main branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") From 9edd4b6e516ec327e15cc00a3933c681fc4b2f75 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Tue, 11 Apr 2023 11:22:28 +0200 Subject: [PATCH 085/160] Using --index-url instead of --extra-index-url following new PyTorch install command --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 7d9a8538a..3b92e184f 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From 8af4b3bbe46dcb7f701d3650b6e4d31d6dd268a7 Mon Sep 17 00:00:00 2001 From: gk Date: Thu, 13 Apr 2023 08:46:59 +0900 Subject: [PATCH 086/160] Try using TCMalloc on Linux by default --- README.md | 1 + webui-user.sh | 3 +++ webui.sh | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b67e2296a..20f74531c 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,7 @@ sudo pacman -S wget git python3 bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` 3. Run `webui.sh`. +4. Check `webui-user.sh` for options. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). diff --git a/webui-user.sh b/webui-user.sh index bfa53cb7c..49a426ff9 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -43,4 +43,7 @@ # Uncomment to enable accelerated launch #export ACCELERATE="True" +# Uncomment to disable TCMalloc +#export NO_TCMALLOC="True" + ########################################### diff --git a/webui.sh b/webui.sh index 8cdad22d3..1122b9650 100755 --- a/webui.sh +++ b/webui.sh @@ -113,13 +113,13 @@ case "$gpu_info" in printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half" printf "\n%s\n" "${delimiter}" ;; - *) + *) ;; esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" -fi +fi for preq in "${GIT}" "${python_cmd}" do @@ -172,15 +172,30 @@ else exit 1 fi +# Try using TCMalloc on Linux +prepare_tcmalloc() { + if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then + TCMALLOC="$(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1)" + if [[ ! -z "${TCMALLOC}" ]]; then + echo "Using TCMalloc: ${TCMALLOC}" + export LD_PRELOAD="${TCMALLOC}" + else + printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n" + fi + fi +} + if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ] then printf "\n%s\n" "${delimiter}" printf "Accelerating launch.py..." printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" else printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." - printf "\n%s\n" "${delimiter}" + printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" fi From 7fb72edaffd3d4f2336d2478a424fc455f2376a6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 13 Apr 2023 06:47:48 -0400 Subject: [PATCH 087/160] change index url --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 37c8b5164..4256ef256 100644 --- a/launch.py +++ b/launch.py @@ -225,7 +225,7 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') From fcc194afad8acd68c3fe3fd43e0bd3bac0371199 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 13 Apr 2023 22:42:20 +0300 Subject: [PATCH 088/160] prompt-bracket-checker: Simplify + improve error reporting --- .../javascript/prompt-bracket-checker.js | 119 +++++------------- 1 file changed, 29 insertions(+), 90 deletions(-) diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index f0918e260..5c7a836a2 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -1,103 +1,42 @@ // Stable Diffusion WebUI - Bracket checker -// Version 1.0 -// By Hingashi no Florin/Bwin4L +// By Hingashi no Florin/Bwin4L & @akx // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. -function checkBrackets(evt, textArea, counterElt) { - errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n'; - errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n'; - errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n'; +function checkBrackets(textArea, counterElt) { + var counts = {}; + (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; - openBracketRegExp = /\(/g; - closeBracketRegExp = /\)/g; - - openSquareBracketRegExp = /\[/g; - closeSquareBracketRegExp = /\]/g; - - openCurlyBracketRegExp = /\{/g; - closeCurlyBracketRegExp = /\}/g; - - totalOpenBracketMatches = 0; - totalCloseBracketMatches = 0; - totalOpenSquareBracketMatches = 0; - totalCloseSquareBracketMatches = 0; - totalOpenCurlyBracketMatches = 0; - totalCloseCurlyBracketMatches = 0; - - openBracketMatches = textArea.value.match(openBracketRegExp); - if(openBracketMatches) { - totalOpenBracketMatches = openBracketMatches.length; - } - - closeBracketMatches = textArea.value.match(closeBracketRegExp); - if(closeBracketMatches) { - totalCloseBracketMatches = closeBracketMatches.length; - } - - openSquareBracketMatches = textArea.value.match(openSquareBracketRegExp); - if(openSquareBracketMatches) { - totalOpenSquareBracketMatches = openSquareBracketMatches.length; - } - - closeSquareBracketMatches = textArea.value.match(closeSquareBracketRegExp); - if(closeSquareBracketMatches) { - totalCloseSquareBracketMatches = closeSquareBracketMatches.length; - } - - openCurlyBracketMatches = textArea.value.match(openCurlyBracketRegExp); - if(openCurlyBracketMatches) { - totalOpenCurlyBracketMatches = openCurlyBracketMatches.length; - } - - closeCurlyBracketMatches = textArea.value.match(closeCurlyBracketRegExp); - if(closeCurlyBracketMatches) { - totalCloseCurlyBracketMatches = closeCurlyBracketMatches.length; - } - - if(totalOpenBracketMatches != totalCloseBracketMatches) { - if(!counterElt.title.includes(errorStringParen)) { - counterElt.title += errorStringParen; + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); } - } else { - counterElt.title = counterElt.title.replace(errorStringParen, ''); } - if(totalOpenSquareBracketMatches != totalCloseSquareBracketMatches) { - if(!counterElt.title.includes(errorStringSquare)) { - counterElt.title += errorStringSquare; - } - } else { - counterElt.title = counterElt.title.replace(errorStringSquare, ''); - } + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); +} - if(totalOpenCurlyBracketMatches != totalCloseCurlyBracketMatches) { - if(!counterElt.title.includes(errorStringCurly)) { - counterElt.title += errorStringCurly; - } - } else { - counterElt.title = counterElt.title.replace(errorStringCurly, ''); - } +function setupBracketChecking(id_prompt, id_counter) { + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter) - if(counterElt.title != '') { - counterElt.classList.add('error'); - } else { - counterElt.classList.remove('error'); + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); } } -function setupBracketChecking(id_prompt, id_counter){ - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) - - textarea.addEventListener("input", function(evt){ - checkBrackets(evt, textarea, counter) - }); -} - -onUiLoaded(function(){ - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'img2img_token_counter') - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}) \ No newline at end of file +onUiLoaded(function () { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +}); From dab5002c59ce1f68deae5e6e0c03e5e2c27155db Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Thu, 13 Apr 2023 23:12:33 -0400 Subject: [PATCH 089/160] sort self.word_embeddings without instantiating it a new dict --- modules/textual_inversion/textual_inversion.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7c50839f2..379df2430 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple, OrderedDict +from collections import namedtuple import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = OrderedDict() + self.word_embeddings = {} self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -234,7 +234,10 @@ class EmbeddingDatabase: embdir.update() # re-sort word_embeddings because load_from_dir may not load in alphabetic order. - self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it. + sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + self.word_embeddings.clear() + self.word_embeddings.update(sorted_word_embeddings) displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: From 3af152d488db0c521f6058676e1a65c7157ccc14 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:17:14 -0400 Subject: [PATCH 090/160] Fix image mask composite for weird resolutions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..f49992d90 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -708,7 +708,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA') + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") From fbab3fc6d122fb4e6648dd82291a70fc348c0b4a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:24:55 -0400 Subject: [PATCH 091/160] Only handle image mask if any option enabled --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index f49992d90..5c6edc60b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -706,7 +706,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') From 02e351880796422eac3bbaf7aa86332b588651ce Mon Sep 17 00:00:00 2001 From: tqwuliao Date: Sat, 15 Apr 2023 23:20:08 +0800 Subject: [PATCH 092/160] Add new FilenameGenerator [hasprompt..] --- javascript/hints.js | 4 ++-- modules/images.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b23..730ce7bd4 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -67,8 +67,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime