Merge branch 'AUTOMATIC1111:master' into master

This commit is contained in:
ParityError 2023-03-17 00:36:17 -07:00 committed by GitHub
commit 34c0f499c5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 165 additions and 8 deletions

View File

@ -3,7 +3,9 @@ import os
import re import re
import torch import torch
from modules import shared, devices, sd_models from modules import shared, devices, sd_models, errors
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
re_digits = re.compile(r"\d+") re_digits = re.compile(r"\d+")
re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)") re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)")
@ -43,6 +45,23 @@ class LoraOnDisk:
def __init__(self, name, filename): def __init__(self, name, filename):
self.name = name self.name = name
self.filename = filename self.filename = filename
self.metadata = {}
_, ext = os.path.splitext(filename)
if ext.lower() == ".safetensors":
try:
self.metadata = sd_models.read_metadata_from_safetensors(filename)
except Exception as e:
errors.display(e, f"reading lora {filename}")
if self.metadata:
m = {}
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
m[k] = v
self.metadata = m
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
class LoraModule: class LoraModule:

View File

@ -23,6 +23,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
"search_term": self.search_terms_from_path(lora_on_disk.filename), "search_term": self.search_terms_from_path(lora_on_disk.filename),
"prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"), "prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": f"{path}.{shared.opts.samples_format}", "local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
} }
def allowed_directories_for_previews(self): def allowed_directories_for_previews(self):

View File

@ -1,4 +1,6 @@
<div class='card' {preview_html} onclick={card_clicked}> <div class='card' {preview_html} onclick={card_clicked}>
{metadata_button}
<div class='actions'> <div class='actions'>
<div class='additional'> <div class='additional'>
<ul> <ul>

View File

@ -103,3 +103,39 @@ function extraNetworksSearchButton(tabs_id, event){
searchTextarea.value = text searchTextarea.value = text
updateInput(searchTextarea) updateInput(searchTextarea)
} }
var globalPopup = null;
var globalPopupInner = null;
function popup(contents){
if(! globalPopup){
globalPopup = document.createElement('div')
globalPopup.onclick = function(){ globalPopup.style.display = "none"; };
globalPopup.classList.add('global-popup');
var close = document.createElement('div')
close.classList.add('global-popup-close');
close.onclick = function(){ globalPopup.style.display = "none"; };
close.title = "Close";
globalPopup.appendChild(close)
globalPopupInner = document.createElement('div')
globalPopupInner.onclick = function(event){ event.stopPropagation(); return false; };
globalPopupInner.classList.add('global-popup-inner');
globalPopup.appendChild(globalPopupInner)
gradioApp().appendChild(globalPopup);
}
globalPopupInner.innerHTML = '';
globalPopupInner.appendChild(contents);
globalPopup.style.display = "flex";
}
function extraNetworksShowMetadata(text){
elem = document.createElement('pre')
elem.classList.add('popup-metadata');
elem.textContent = text;
popup(elem);
}

View File

@ -573,6 +573,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
os.replace(temp_file_path, filename_without_extension + extension) os.replace(temp_file_path, filename_without_extension + extension)
fullfn_without_extension, extension = os.path.splitext(params.filename) fullfn_without_extension, extension = os.path.splitext(params.filename)
if hasattr(os, 'statvfs'):
max_name_len = os.statvfs(path).f_namemax
fullfn_without_extension = fullfn_without_extension[:max_name_len - max(4, len(extension))]
params.filename = fullfn_without_extension + extension
fullfn = params.filename
_atomically_save_image(image, fullfn_without_extension, extension) _atomically_save_image(image, fullfn_without_extension, extension)
image.already_saved_as = fullfn image.already_saved_as = fullfn

View File

@ -71,7 +71,7 @@ class UniPCSampler(object):
# sampling # sampling
C, H, W = shape C, H, W = shape
size = (batch_size, C, H, W) size = (batch_size, C, H, W)
print(f'Data shape for UniPC sampling is {size}') # print(f'Data shape for UniPC sampling is {size}')
device = self.model.betas.device device = self.model.betas.device
if x_T is None: if x_T is None:

View File

@ -1,6 +1,7 @@
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import math import math
from tqdm.auto import trange
class NoiseScheduleVP: class NoiseScheduleVP:
@ -750,7 +751,7 @@ class UniPC:
if method == 'multistep': if method == 'multistep':
assert steps >= order, "UniPC order must be < sampling steps" assert steps >= order, "UniPC order must be < sampling steps"
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}") #print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}")
assert timesteps.shape[0] - 1 == steps assert timesteps.shape[0] - 1 == steps
with torch.no_grad(): with torch.no_grad():
vec_t = timesteps[0].expand((x.shape[0])) vec_t = timesteps[0].expand((x.shape[0]))
@ -766,7 +767,7 @@ class UniPC:
self.after_update(x, model_x) self.after_update(x, model_x)
model_prev_list.append(model_x) model_prev_list.append(model_x)
t_prev_list.append(vec_t) t_prev_list.append(vec_t)
for step in range(order, steps + 1): for step in trange(order, steps + 1):
vec_t = timesteps[step].expand(x.shape[0]) vec_t = timesteps[step].expand(x.shape[0])
if lower_order_final: if lower_order_final:
step_order = min(order, steps + 1 - step) step_order = min(order, steps + 1 - step)

View File

@ -583,6 +583,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if state.job_count == -1: if state.job_count == -1:
state.job_count = p.n_iter state.job_count = p.n_iter
extra_network_data = None
for n in range(p.n_iter): for n in range(p.n_iter):
p.iteration = n p.iteration = n
@ -712,7 +713,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.grid_save: if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
if not p.disable_extra_networks: if not p.disable_extra_networks and extra_network_data:
extra_networks.deactivate(p, extra_network_data) extra_networks.deactivate(p, extra_network_data)
devices.torch_gc() devices.torch_gc()

View File

@ -210,6 +210,30 @@ def get_state_dict_from_checkpoint(pl_sd):
return pl_sd return pl_sd
def read_metadata_from_safetensors(filename):
import json
with open(filename, mode="rb") as file:
metadata_len = file.read(8)
metadata_len = int.from_bytes(metadata_len, "little")
json_start = file.read(2)
assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
json_data = json_start + file.read(metadata_len-2)
json_obj = json.loads(json_data)
res = {}
for k, v in json_obj.get("__metadata__", {}).items():
res[k] = v
if isinstance(v, str) and v[0:1] == '{':
try:
res[k] = json.loads(v)
except Exception as e:
pass
return res
def read_state_dict(checkpoint_file, print_global_state=False, map_location=None): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
_, extension = os.path.splitext(checkpoint_file) _, extension = os.path.splitext(checkpoint_file)
if extension.lower() == ".safetensors": if extension.lower() == ".safetensors":

View File

@ -30,8 +30,8 @@ def add_pages_to_demo(app):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
ext = os.path.splitext(filename)[1].lower() ext = os.path.splitext(filename)[1].lower()
if ext not in (".png", ".jpg"): if ext not in (".png", ".jpg", ".webp"):
raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg.") raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.")
# would profit from returning 304 # would profit from returning 304
return FileResponse(filename, headers={"Accept-Ranges": "bytes"}) return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
@ -124,6 +124,12 @@ class ExtraNetworksPage:
if onclick is None: if onclick is None:
onclick = '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"' onclick = '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
metadata_button = ""
metadata = item.get("metadata")
if metadata:
metadata_onclick = '"' + html.escape(f"""extraNetworksShowMetadata({json.dumps(metadata)}); return false;""") + '"'
metadata_button = f"<div class='metadata-button' title='Show metadata' onclick={metadata_onclick}></div>"
args = { args = {
"preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '', "preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '',
"prompt": item.get("prompt", None), "prompt": item.get("prompt", None),
@ -134,6 +140,7 @@ class ExtraNetworksPage:
"card_clicked": onclick, "card_clicked": onclick,
"save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"',
"search_term": item.get("search_term", ""), "search_term": item.get("search_term", ""),
"metadata_button": metadata_button,
} }
return self.card_page.format(**args) return self.card_page.format(**args)

View File

@ -362,6 +362,46 @@ input[type="range"]{
height: 100%; height: 100%;
} }
.popup-metadata{
color: black;
background: white;
display: inline-block;
padding: 1em;
white-space: pre-wrap;
}
.global-popup{
display: flex;
position: fixed;
z-index: 1001;
left: 0;
top: 0;
width: 100%;
height: 100%;
overflow: auto;
background-color: rgba(20, 20, 20, 0.95);
}
.global-popup-close:before {
content: "×";
}
.global-popup-close{
position: fixed;
right: 0.25em;
top: 0;
cursor: pointer;
color: white;
font-size: 32pt;
}
.global-popup-inner{
display: inline-block;
margin: auto;
padding: 2em;
}
#lightboxModal{ #lightboxModal{
display: none; display: none;
position: fixed; position: fixed;
@ -837,6 +877,27 @@ footer {
margin-left: 0.5em; margin-left: 0.5em;
} }
.extra-network-cards .card .metadata-button:before, .extra-network-thumbs .card .metadata-button:before{
content: "🛈";
}
.extra-network-cards .card .metadata-button, .extra-network-thumbs .card .metadata-button{
display: none;
position: absolute;
right: 0;
color: white;
text-shadow: 2px 2px 3px black;
padding: 0.25em;
font-size: 22pt;
}
.extra-network-cards .card:hover .metadata-button, .extra-network-thumbs .card:hover .metadata-button{
display: inline-block;
}
.extra-network-cards .card .metadata-button:hover, .extra-network-thumbs .card .metadata-button:hover{
color: red;
}
.extra-network-thumbs { .extra-network-thumbs {
display: flex; display: flex;
flex-flow: row wrap; flex-flow: row wrap;