mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-06-07 21:20:49 +00:00
commit
d66bc86159
28
.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
vendored
Normal file
28
.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Please read the [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) before submitting a pull request!
|
||||||
|
|
||||||
|
If you have a large change, pay special attention to this paragraph:
|
||||||
|
|
||||||
|
> Before making changes, if you think that your feature will result in more than 100 lines changing, find me and talk to me about the feature you are proposing. It pains me to reject the hard work someone else did, but I won't add everything to the repo, and it's better if the rejection happens before you have to waste time working on the feature.
|
||||||
|
|
||||||
|
Otherwise, after making sure you're following the rules described in wiki page, remove this section and continue on.
|
||||||
|
|
||||||
|
**Describe what this pull request is trying to achieve.**
|
||||||
|
|
||||||
|
A clear and concise description of what you're trying to accomplish with this, so your intent doesn't have to be extracted from your code.
|
||||||
|
|
||||||
|
**Additional notes and description of your changes**
|
||||||
|
|
||||||
|
More technical discussion about your changes go here, plus anything that a maintainer might have to specifically take a look at, or be wary of.
|
||||||
|
|
||||||
|
**Environment this was tested in**
|
||||||
|
|
||||||
|
List the environment you have developed / tested this on. As per the contributing page, changes should be able to work on Windows out of the box.
|
||||||
|
- OS: [e.g. Windows, Linux]
|
||||||
|
- Browser [e.g. chrome, safari]
|
||||||
|
- Graphics card [e.g. NVIDIA RTX 2080 8GB, AMD RX 6600 8GB]
|
||||||
|
|
||||||
|
**Screenshots or videos of your changes**
|
||||||
|
|
||||||
|
If applicable, screenshots or a video showing off your changes. If it edits an existing UI, it should ideally contain a comparison of what used to be there, before your changes were made.
|
||||||
|
|
||||||
|
This is **required** for anything that touches the user interface.
|
@ -147,10 +147,6 @@ generateOnRepeatId = appendContextMenuOption('#txt2img_generate','Generate forev
|
|||||||
|
|
||||||
cancelGenerateForever = function(){
|
cancelGenerateForever = function(){
|
||||||
clearInterval(window.generateOnRepeatInterval)
|
clearInterval(window.generateOnRepeatInterval)
|
||||||
let interruptbutton = gradioApp().querySelector('#txt2img_interrupt');
|
|
||||||
if(interruptbutton.offsetParent){
|
|
||||||
interruptbutton.click();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
|
appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
|
||||||
|
@ -79,6 +79,8 @@ titles = {
|
|||||||
"Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
|
"Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
|
||||||
"Scale latent": "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.",
|
"Scale latent": "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.",
|
||||||
|
|
||||||
|
"Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.",
|
||||||
|
"Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be bevaing in an unethical manner.",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ def prepare_enviroment():
|
|||||||
|
|
||||||
if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
|
if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
|
||||||
if platform.system() == "Windows":
|
if platform.system() == "Windows":
|
||||||
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
|
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/c/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
|
||||||
elif platform.system() == "Linux":
|
elif platform.system() == "Linux":
|
||||||
run_pip("install xformers", "xformers")
|
run_pip("install xformers", "xformers")
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@ errors.run(enable_tf32, "Enabling TF32")
|
|||||||
|
|
||||||
device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
|
device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
|
||||||
dtype = torch.float16
|
dtype = torch.float16
|
||||||
|
dtype_vae = torch.float16
|
||||||
|
|
||||||
def randn(seed, shape):
|
def randn(seed, shape):
|
||||||
# Pytorch currently doesn't handle setting randomness correctly when the metal backend is used.
|
# Pytorch currently doesn't handle setting randomness correctly when the metal backend is used.
|
||||||
@ -59,9 +60,12 @@ def randn_without_seed(shape):
|
|||||||
return torch.randn(shape, device=device)
|
return torch.randn(shape, device=device)
|
||||||
|
|
||||||
|
|
||||||
def autocast():
|
def autocast(disable=False):
|
||||||
from modules import shared
|
from modules import shared
|
||||||
|
|
||||||
|
if disable:
|
||||||
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
|
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
|
||||||
return contextlib.nullcontext()
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
|
|||||||
# enables the generation of additional tensors with noise that the sampler will use during its processing.
|
# enables the generation of additional tensors with noise that the sampler will use during its processing.
|
||||||
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
|
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
|
||||||
# produce the same images as with two batches [100], [101].
|
# produce the same images as with two batches [100], [101].
|
||||||
if p is not None and p.sampler is not None and len(seeds) > 1 and opts.enable_batch_seeds:
|
if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or opts.eta_noise_seed_delta > 0):
|
||||||
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
|
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
|
||||||
else:
|
else:
|
||||||
sampler_noises = None
|
sampler_noises = None
|
||||||
@ -247,6 +247,9 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
|
|||||||
if sampler_noises is not None:
|
if sampler_noises is not None:
|
||||||
cnt = p.sampler.number_of_needed_noises(p)
|
cnt = p.sampler.number_of_needed_noises(p)
|
||||||
|
|
||||||
|
if opts.eta_noise_seed_delta > 0:
|
||||||
|
torch.manual_seed(seed + opts.eta_noise_seed_delta)
|
||||||
|
|
||||||
for j in range(cnt):
|
for j in range(cnt):
|
||||||
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
|
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
|
||||||
|
|
||||||
@ -259,6 +262,13 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def decode_first_stage(model, x):
|
||||||
|
with devices.autocast(disable=x.dtype == devices.dtype_vae):
|
||||||
|
x = model.decode_first_stage(x)
|
||||||
|
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
def get_fixed_seed(seed):
|
def get_fixed_seed(seed):
|
||||||
if seed is None or seed == '' or seed == -1:
|
if seed is None or seed == '' or seed == -1:
|
||||||
return int(random.randrange(4294967294))
|
return int(random.randrange(4294967294))
|
||||||
@ -294,6 +304,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
|
|||||||
"Denoising strength": getattr(p, 'denoising_strength', None),
|
"Denoising strength": getattr(p, 'denoising_strength', None),
|
||||||
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
|
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
|
||||||
"Clip skip": None if clip_skip <= 1 else clip_skip,
|
"Clip skip": None if clip_skip <= 1 else clip_skip,
|
||||||
|
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
|
||||||
}
|
}
|
||||||
|
|
||||||
generation_params.update(p.extra_generation_params)
|
generation_params.update(p.extra_generation_params)
|
||||||
@ -398,9 +409,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
|||||||
# use the image collected previously in sampler loop
|
# use the image collected previously in sampler loop
|
||||||
samples_ddim = shared.state.current_latent
|
samples_ddim = shared.state.current_latent
|
||||||
|
|
||||||
samples_ddim = samples_ddim.to(devices.dtype)
|
samples_ddim = samples_ddim.to(devices.dtype_vae)
|
||||||
|
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
|
||||||
x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
|
|
||||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
|
|
||||||
del samples_ddim
|
del samples_ddim
|
||||||
@ -533,7 +543,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||||||
if self.scale_latent:
|
if self.scale_latent:
|
||||||
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
|
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
|
||||||
else:
|
else:
|
||||||
decoded_samples = self.sd_model.decode_first_stage(samples)
|
decoded_samples = decode_first_stage(self.sd_model, samples)
|
||||||
|
|
||||||
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
|
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
|
||||||
decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
|
decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
|
||||||
|
@ -12,6 +12,10 @@ import _codecs
|
|||||||
import zipfile
|
import zipfile
|
||||||
|
|
||||||
|
|
||||||
|
# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
|
||||||
|
TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
|
||||||
|
|
||||||
|
|
||||||
def encode(*args):
|
def encode(*args):
|
||||||
out = _codecs.encode(*args)
|
out = _codecs.encode(*args)
|
||||||
return out
|
return out
|
||||||
@ -20,7 +24,7 @@ def encode(*args):
|
|||||||
class RestrictedUnpickler(pickle.Unpickler):
|
class RestrictedUnpickler(pickle.Unpickler):
|
||||||
def persistent_load(self, saved_id):
|
def persistent_load(self, saved_id):
|
||||||
assert saved_id[0] == 'storage'
|
assert saved_id[0] == 'storage'
|
||||||
return torch.storage._TypedStorage()
|
return TypedStorage()
|
||||||
|
|
||||||
def find_class(self, module, name):
|
def find_class(self, module, name):
|
||||||
if module == 'collections' and name == 'OrderedDict':
|
if module == 'collections' and name == 'OrderedDict':
|
||||||
|
@ -23,7 +23,7 @@ def apply_optimizations():
|
|||||||
|
|
||||||
ldm.modules.diffusionmodules.model.nonlinearity = silu
|
ldm.modules.diffusionmodules.model.nonlinearity = silu
|
||||||
|
|
||||||
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)):
|
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (8, 6)):
|
||||||
print("Applying xformers cross attention optimization.")
|
print("Applying xformers cross attention optimization.")
|
||||||
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
|
||||||
@ -43,10 +43,7 @@ def undo_optimizations():
|
|||||||
|
|
||||||
|
|
||||||
def get_target_prompt_token_count(token_count):
|
def get_target_prompt_token_count(token_count):
|
||||||
if token_count < 75:
|
return math.ceil(max(token_count, 1) / 75) * 75
|
||||||
return 75
|
|
||||||
|
|
||||||
return math.ceil(token_count / 10) * 10
|
|
||||||
|
|
||||||
|
|
||||||
class StableDiffusionModelHijack:
|
class StableDiffusionModelHijack:
|
||||||
@ -127,7 +124,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
self.token_mults[ident] = mult
|
self.token_mults[ident] = mult
|
||||||
|
|
||||||
def tokenize_line(self, line, used_custom_terms, hijack_comments):
|
def tokenize_line(self, line, used_custom_terms, hijack_comments):
|
||||||
id_start = self.wrapped.tokenizer.bos_token_id
|
|
||||||
id_end = self.wrapped.tokenizer.eos_token_id
|
id_end = self.wrapped.tokenizer.eos_token_id
|
||||||
|
|
||||||
if opts.enable_emphasis:
|
if opts.enable_emphasis:
|
||||||
@ -154,7 +150,13 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
i += 1
|
i += 1
|
||||||
else:
|
else:
|
||||||
emb_len = int(embedding.vec.shape[0])
|
emb_len = int(embedding.vec.shape[0])
|
||||||
fixes.append((len(remade_tokens), embedding))
|
iteration = len(remade_tokens) // 75
|
||||||
|
if (len(remade_tokens) + emb_len) // 75 != iteration:
|
||||||
|
rem = (75 * (iteration + 1) - len(remade_tokens))
|
||||||
|
remade_tokens += [id_end] * rem
|
||||||
|
multipliers += [1.0] * rem
|
||||||
|
iteration += 1
|
||||||
|
fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
|
||||||
remade_tokens += [0] * emb_len
|
remade_tokens += [0] * emb_len
|
||||||
multipliers += [weight] * emb_len
|
multipliers += [weight] * emb_len
|
||||||
used_custom_terms.append((embedding.name, embedding.checksum()))
|
used_custom_terms.append((embedding.name, embedding.checksum()))
|
||||||
@ -162,10 +164,10 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
|
|
||||||
token_count = len(remade_tokens)
|
token_count = len(remade_tokens)
|
||||||
prompt_target_length = get_target_prompt_token_count(token_count)
|
prompt_target_length = get_target_prompt_token_count(token_count)
|
||||||
tokens_to_add = prompt_target_length - len(remade_tokens) + 1
|
tokens_to_add = prompt_target_length - len(remade_tokens)
|
||||||
|
|
||||||
remade_tokens = [id_start] + remade_tokens + [id_end] * tokens_to_add
|
remade_tokens = remade_tokens + [id_end] * tokens_to_add
|
||||||
multipliers = [1.0] + multipliers + [1.0] * tokens_to_add
|
multipliers = multipliers + [1.0] * tokens_to_add
|
||||||
|
|
||||||
return remade_tokens, fixes, multipliers, token_count
|
return remade_tokens, fixes, multipliers, token_count
|
||||||
|
|
||||||
@ -260,29 +262,55 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
hijack_fixes.append(fixes)
|
hijack_fixes.append(fixes)
|
||||||
batch_multipliers.append(multipliers)
|
batch_multipliers.append(multipliers)
|
||||||
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
|
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
|
||||||
|
|
||||||
def forward(self, text):
|
def forward(self, text):
|
||||||
|
use_old = opts.use_old_emphasis_implementation
|
||||||
if opts.use_old_emphasis_implementation:
|
if use_old:
|
||||||
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
|
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
|
||||||
else:
|
else:
|
||||||
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
|
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
|
||||||
|
|
||||||
self.hijack.fixes = hijack_fixes
|
|
||||||
self.hijack.comments += hijack_comments
|
self.hijack.comments += hijack_comments
|
||||||
|
|
||||||
if len(used_custom_terms) > 0:
|
if len(used_custom_terms) > 0:
|
||||||
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
|
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
|
||||||
|
|
||||||
|
if use_old:
|
||||||
|
self.hijack.fixes = hijack_fixes
|
||||||
|
return self.process_tokens(remade_batch_tokens, batch_multipliers)
|
||||||
|
|
||||||
|
z = None
|
||||||
|
i = 0
|
||||||
|
while max(map(len, remade_batch_tokens)) != 0:
|
||||||
|
rem_tokens = [x[75:] for x in remade_batch_tokens]
|
||||||
|
rem_multipliers = [x[75:] for x in batch_multipliers]
|
||||||
|
|
||||||
|
self.hijack.fixes = []
|
||||||
|
for unfiltered in hijack_fixes:
|
||||||
|
fixes = []
|
||||||
|
for fix in unfiltered:
|
||||||
|
if fix[0] == i:
|
||||||
|
fixes.append(fix[1])
|
||||||
|
self.hijack.fixes.append(fixes)
|
||||||
|
|
||||||
|
z1 = self.process_tokens([x[:75] for x in remade_batch_tokens], [x[:75] for x in batch_multipliers])
|
||||||
|
z = z1 if z is None else torch.cat((z, z1), axis=-2)
|
||||||
|
|
||||||
|
remade_batch_tokens = rem_tokens
|
||||||
|
batch_multipliers = rem_multipliers
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return z
|
||||||
|
|
||||||
|
|
||||||
|
def process_tokens(self, remade_batch_tokens, batch_multipliers):
|
||||||
|
if not opts.use_old_emphasis_implementation:
|
||||||
|
remade_batch_tokens = [[self.wrapped.tokenizer.bos_token_id] + x[:75] + [self.wrapped.tokenizer.eos_token_id] for x in remade_batch_tokens]
|
||||||
|
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
|
||||||
|
|
||||||
|
tokens = torch.asarray(remade_batch_tokens).to(device)
|
||||||
|
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
|
||||||
|
|
||||||
target_token_count = get_target_prompt_token_count(token_count) + 2
|
|
||||||
|
|
||||||
position_ids_array = [min(x, 75) for x in range(target_token_count-1)] + [76]
|
|
||||||
position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1))
|
|
||||||
|
|
||||||
remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
|
|
||||||
tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
|
|
||||||
|
|
||||||
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=-opts.CLIP_stop_at_last_layers)
|
|
||||||
if opts.CLIP_stop_at_last_layers > 1:
|
if opts.CLIP_stop_at_last_layers > 1:
|
||||||
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
|
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
|
||||||
z = self.wrapped.transformer.text_model.final_layer_norm(z)
|
z = self.wrapped.transformer.text_model.final_layer_norm(z)
|
||||||
@ -290,7 +318,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
|||||||
z = outputs.last_hidden_state
|
z = outputs.last_hidden_state
|
||||||
|
|
||||||
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
|
||||||
batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]
|
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
|
||||||
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
|
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
|
||||||
original_mean = z.mean()
|
original_mean = z.mean()
|
||||||
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
|
||||||
|
@ -13,8 +13,6 @@ from modules import shared
|
|||||||
if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
|
if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
|
||||||
try:
|
try:
|
||||||
import xformers.ops
|
import xformers.ops
|
||||||
import functorch
|
|
||||||
xformers._is_functorch_available = True
|
|
||||||
shared.xformers_available = True
|
shared.xformers_available = True
|
||||||
except Exception:
|
except Exception:
|
||||||
print("Cannot import xformers", file=sys.stderr)
|
print("Cannot import xformers", file=sys.stderr)
|
||||||
|
@ -149,8 +149,13 @@ def load_model_weights(model, checkpoint_info):
|
|||||||
model.half()
|
model.half()
|
||||||
|
|
||||||
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
|
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
|
||||||
|
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
|
||||||
|
|
||||||
vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
|
vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
|
||||||
|
|
||||||
|
if not os.path.exists(vae_file) and shared.cmd_opts.vae_path is not None:
|
||||||
|
vae_file = shared.cmd_opts.vae_path
|
||||||
|
|
||||||
if os.path.exists(vae_file):
|
if os.path.exists(vae_file):
|
||||||
print(f"Loading VAE weights from: {vae_file}")
|
print(f"Loading VAE weights from: {vae_file}")
|
||||||
vae_ckpt = torch.load(vae_file, map_location="cpu")
|
vae_ckpt = torch.load(vae_file, map_location="cpu")
|
||||||
@ -158,6 +163,8 @@ def load_model_weights(model, checkpoint_info):
|
|||||||
|
|
||||||
model.first_stage_model.load_state_dict(vae_dict)
|
model.first_stage_model.load_state_dict(vae_dict)
|
||||||
|
|
||||||
|
model.first_stage_model.to(devices.dtype_vae)
|
||||||
|
|
||||||
model.sd_model_hash = sd_model_hash
|
model.sd_model_hash = sd_model_hash
|
||||||
model.sd_model_checkpoint = checkpoint_file
|
model.sd_model_checkpoint = checkpoint_file
|
||||||
model.sd_checkpoint_info = checkpoint_info
|
model.sd_checkpoint_info = checkpoint_info
|
||||||
|
@ -7,7 +7,7 @@ import inspect
|
|||||||
import k_diffusion.sampling
|
import k_diffusion.sampling
|
||||||
import ldm.models.diffusion.ddim
|
import ldm.models.diffusion.ddim
|
||||||
import ldm.models.diffusion.plms
|
import ldm.models.diffusion.plms
|
||||||
from modules import prompt_parser
|
from modules import prompt_parser, devices, processing
|
||||||
|
|
||||||
from modules.shared import opts, cmd_opts, state
|
from modules.shared import opts, cmd_opts, state
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
@ -83,7 +83,7 @@ def setup_img2img_steps(p, steps=None):
|
|||||||
|
|
||||||
|
|
||||||
def sample_to_image(samples):
|
def sample_to_image(samples):
|
||||||
x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]
|
x_sample = processing.decode_first_stage(shared.sd_model, samples[0:1])[0]
|
||||||
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
|
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
||||||
x_sample = x_sample.astype(np.uint8)
|
x_sample = x_sample.astype(np.uint8)
|
||||||
|
@ -25,6 +25,7 @@ parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to director
|
|||||||
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
|
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
|
||||||
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
|
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
|
||||||
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
|
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
|
||||||
|
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
|
||||||
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
|
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
|
||||||
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
|
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
|
||||||
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
|
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
|
||||||
@ -65,6 +66,7 @@ parser.add_argument("--autolaunch", action='store_true', help="open the webui UR
|
|||||||
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
|
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
|
||||||
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
|
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
|
||||||
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
|
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
|
||||||
|
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
|
||||||
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
|
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
|
||||||
|
|
||||||
|
|
||||||
@ -171,6 +173,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
|
|||||||
|
|
||||||
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
|
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
|
||||||
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
|
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
|
||||||
|
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
|
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
|
||||||
@ -259,6 +262,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
|
|||||||
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
||||||
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
||||||
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
|
||||||
|
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
if cmd_opts.deepdanbooru:
|
if cmd_opts.deepdanbooru:
|
||||||
|
@ -10,6 +10,7 @@ from tqdm import tqdm
|
|||||||
from modules import modelloader
|
from modules import modelloader
|
||||||
from modules.shared import cmd_opts, opts, device
|
from modules.shared import cmd_opts, opts, device
|
||||||
from modules.swinir_model_arch import SwinIR as net
|
from modules.swinir_model_arch import SwinIR as net
|
||||||
|
from modules.swinir_model_arch_v2 import Swin2SR as net2
|
||||||
from modules.upscaler import Upscaler, UpscalerData
|
from modules.upscaler import Upscaler, UpscalerData
|
||||||
|
|
||||||
precision_scope = (
|
precision_scope = (
|
||||||
@ -57,22 +58,42 @@ class UpscalerSwinIR(Upscaler):
|
|||||||
filename = path
|
filename = path
|
||||||
if filename is None or not os.path.exists(filename):
|
if filename is None or not os.path.exists(filename):
|
||||||
return None
|
return None
|
||||||
model = net(
|
if filename.endswith(".v2.pth"):
|
||||||
|
model = net2(
|
||||||
upscale=scale,
|
upscale=scale,
|
||||||
in_chans=3,
|
in_chans=3,
|
||||||
img_size=64,
|
img_size=64,
|
||||||
window_size=8,
|
window_size=8,
|
||||||
img_range=1.0,
|
img_range=1.0,
|
||||||
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
|
depths=[6, 6, 6, 6, 6, 6],
|
||||||
embed_dim=240,
|
embed_dim=180,
|
||||||
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
num_heads=[6, 6, 6, 6, 6, 6],
|
||||||
mlp_ratio=2,
|
mlp_ratio=2,
|
||||||
upsampler="nearest+conv",
|
upsampler="nearest+conv",
|
||||||
resi_connection="3conv",
|
resi_connection="1conv",
|
||||||
)
|
)
|
||||||
|
params = None
|
||||||
|
else:
|
||||||
|
model = net(
|
||||||
|
upscale=scale,
|
||||||
|
in_chans=3,
|
||||||
|
img_size=64,
|
||||||
|
window_size=8,
|
||||||
|
img_range=1.0,
|
||||||
|
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
|
||||||
|
embed_dim=240,
|
||||||
|
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
||||||
|
mlp_ratio=2,
|
||||||
|
upsampler="nearest+conv",
|
||||||
|
resi_connection="3conv",
|
||||||
|
)
|
||||||
|
params = "params_ema"
|
||||||
|
|
||||||
pretrained_model = torch.load(filename)
|
pretrained_model = torch.load(filename)
|
||||||
model.load_state_dict(pretrained_model["params_ema"], strict=True)
|
if params is not None:
|
||||||
|
model.load_state_dict(pretrained_model[params], strict=True)
|
||||||
|
else:
|
||||||
|
model.load_state_dict(pretrained_model, strict=True)
|
||||||
if not cmd_opts.no_half:
|
if not cmd_opts.no_half:
|
||||||
model = model.half()
|
model = model.half()
|
||||||
return model
|
return model
|
||||||
|
1017
modules/swinir_model_arch_v2.py
Normal file
1017
modules/swinir_model_arch_v2.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -15,11 +15,10 @@ re_tag = re.compile(r"[a-zA-Z][_\w\d()]+")
|
|||||||
|
|
||||||
|
|
||||||
class PersonalizedBase(Dataset):
|
class PersonalizedBase(Dataset):
|
||||||
def __init__(self, data_root, size=None, repeats=100, flip_p=0.5, placeholder_token="*", width=512, height=512, model=None, device=None, template_file=None):
|
def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, device=None, template_file=None):
|
||||||
|
|
||||||
self.placeholder_token = placeholder_token
|
self.placeholder_token = placeholder_token
|
||||||
|
|
||||||
self.size = size
|
|
||||||
self.width = width
|
self.width = width
|
||||||
self.height = height
|
self.height = height
|
||||||
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
|
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
|
||||||
|
@ -10,8 +10,9 @@ from modules.shared import opts, cmd_opts
|
|||||||
if cmd_opts.deepdanbooru:
|
if cmd_opts.deepdanbooru:
|
||||||
import modules.deepbooru as deepbooru
|
import modules.deepbooru as deepbooru
|
||||||
|
|
||||||
def preprocess(process_src, process_dst, process_flip, process_split, process_caption, process_caption_deepbooru=False):
|
def preprocess(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru=False):
|
||||||
size = 512
|
width = process_width
|
||||||
|
height = process_height
|
||||||
src = os.path.abspath(process_src)
|
src = os.path.abspath(process_src)
|
||||||
dst = os.path.abspath(process_dst)
|
dst = os.path.abspath(process_dst)
|
||||||
|
|
||||||
@ -69,23 +70,23 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca
|
|||||||
is_wide = ratio < 1 / 1.35
|
is_wide = ratio < 1 / 1.35
|
||||||
|
|
||||||
if process_split and is_tall:
|
if process_split and is_tall:
|
||||||
img = img.resize((size, size * img.height // img.width))
|
img = img.resize((width, height * img.height // img.width))
|
||||||
|
|
||||||
top = img.crop((0, 0, size, size))
|
top = img.crop((0, 0, width, height))
|
||||||
save_pic(top, index)
|
save_pic(top, index)
|
||||||
|
|
||||||
bot = img.crop((0, img.height - size, size, img.height))
|
bot = img.crop((0, img.height - height, width, img.height))
|
||||||
save_pic(bot, index)
|
save_pic(bot, index)
|
||||||
elif process_split and is_wide:
|
elif process_split and is_wide:
|
||||||
img = img.resize((size * img.width // img.height, size))
|
img = img.resize((width * img.width // img.height, height))
|
||||||
|
|
||||||
left = img.crop((0, 0, size, size))
|
left = img.crop((0, 0, width, height))
|
||||||
save_pic(left, index)
|
save_pic(left, index)
|
||||||
|
|
||||||
right = img.crop((img.width - size, 0, img.width, size))
|
right = img.crop((img.width - width, 0, img.width, height))
|
||||||
save_pic(right, index)
|
save_pic(right, index)
|
||||||
else:
|
else:
|
||||||
img = images.resize_image(1, img, size, size)
|
img = images.resize_image(1, img, width, height)
|
||||||
save_pic(img, index)
|
save_pic(img, index)
|
||||||
|
|
||||||
shared.state.nextjob()
|
shared.state.nextjob()
|
||||||
|
@ -156,7 +156,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'):
|
|||||||
return fn
|
return fn
|
||||||
|
|
||||||
|
|
||||||
def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file):
|
def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, num_repeats, create_image_every, save_embedding_every, template_file):
|
||||||
assert embedding_name, 'embedding not selected'
|
assert embedding_name, 'embedding not selected'
|
||||||
|
|
||||||
shared.state.textinfo = "Initializing textual inversion training..."
|
shared.state.textinfo = "Initializing textual inversion training..."
|
||||||
@ -182,7 +182,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
|
|||||||
|
|
||||||
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
|
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
|
||||||
with torch.autocast("cuda"):
|
with torch.autocast("cuda"):
|
||||||
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, size=512, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file)
|
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=num_repeats, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file)
|
||||||
|
|
||||||
hijack = sd_hijack.model_hijack
|
hijack = sd_hijack.model_hijack
|
||||||
|
|
||||||
@ -200,6 +200,9 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
|
|||||||
if ititial_step > steps:
|
if ititial_step > steps:
|
||||||
return embedding, filename
|
return embedding, filename
|
||||||
|
|
||||||
|
tr_img_len = len([os.path.join(data_root, file_path) for file_path in os.listdir(data_root)])
|
||||||
|
epoch_len = (tr_img_len * num_repeats) + tr_img_len
|
||||||
|
|
||||||
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
|
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
|
||||||
for i, (x, text) in pbar:
|
for i, (x, text) in pbar:
|
||||||
embedding.step = i + ititial_step
|
embedding.step = i + ititial_step
|
||||||
@ -223,7 +226,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
|
|||||||
loss.backward()
|
loss.backward()
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
|
||||||
pbar.set_description(f"loss: {losses.mean():.7f}")
|
epoch_num = embedding.step // epoch_len
|
||||||
|
epoch_step = embedding.step - (epoch_num * epoch_len) + 1
|
||||||
|
|
||||||
|
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step}/{epoch_len}]loss: {losses.mean():.7f}")
|
||||||
|
|
||||||
if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
|
if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
|
||||||
last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt')
|
last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt')
|
||||||
@ -236,6 +242,8 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
|
|||||||
sd_model=shared.sd_model,
|
sd_model=shared.sd_model,
|
||||||
prompt=text,
|
prompt=text,
|
||||||
steps=20,
|
steps=20,
|
||||||
|
height=training_height,
|
||||||
|
width=training_width,
|
||||||
do_not_save_grid=True,
|
do_not_save_grid=True,
|
||||||
do_not_save_samples=True,
|
do_not_save_samples=True,
|
||||||
)
|
)
|
||||||
|
@ -524,7 +524,7 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
|
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
|
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
|
||||||
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
|
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
|
||||||
|
|
||||||
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
|
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
|
||||||
@ -710,7 +710,7 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
tiling = gr.Checkbox(label='Tiling', value=False)
|
tiling = gr.Checkbox(label='Tiling', value=False)
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
|
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
|
||||||
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
|
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
|
||||||
|
|
||||||
with gr.Group():
|
with gr.Group():
|
||||||
@ -961,7 +961,7 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
|
|
||||||
extras_send_to_inpaint.click(
|
extras_send_to_inpaint.click(
|
||||||
fn=lambda x: image_from_url_text(x),
|
fn=lambda x: image_from_url_text(x),
|
||||||
_js="extract_image_from_gallery_img2img",
|
_js="extract_image_from_gallery_inpaint",
|
||||||
inputs=[result_images],
|
inputs=[result_images],
|
||||||
outputs=[init_img_with_mask],
|
outputs=[init_img_with_mask],
|
||||||
)
|
)
|
||||||
@ -1029,6 +1029,8 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
|
|
||||||
process_src = gr.Textbox(label='Source directory')
|
process_src = gr.Textbox(label='Source directory')
|
||||||
process_dst = gr.Textbox(label='Destination directory')
|
process_dst = gr.Textbox(label='Destination directory')
|
||||||
|
process_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
|
||||||
|
process_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
process_flip = gr.Checkbox(label='Create flipped copies')
|
process_flip = gr.Checkbox(label='Create flipped copies')
|
||||||
@ -1047,13 +1049,16 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
run_preprocess = gr.Button(value="Preprocess", variant='primary')
|
run_preprocess = gr.Button(value="Preprocess", variant='primary')
|
||||||
|
|
||||||
with gr.Group():
|
with gr.Group():
|
||||||
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 512x512 images</p>")
|
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 1:1 ratio images</p>")
|
||||||
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
|
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
|
||||||
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
|
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
|
||||||
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
|
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
|
||||||
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
|
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
|
||||||
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
|
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
|
||||||
|
training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
|
||||||
|
training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
|
||||||
steps = gr.Number(label='Max steps', value=100000, precision=0)
|
steps = gr.Number(label='Max steps', value=100000, precision=0)
|
||||||
|
num_repeats = gr.Number(label='Number of repeats for a single input image per epoch', value=100, precision=0)
|
||||||
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
|
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
|
||||||
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
|
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
|
||||||
|
|
||||||
@ -1096,6 +1101,8 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
inputs=[
|
inputs=[
|
||||||
process_src,
|
process_src,
|
||||||
process_dst,
|
process_dst,
|
||||||
|
process_width,
|
||||||
|
process_height,
|
||||||
process_flip,
|
process_flip,
|
||||||
process_split,
|
process_split,
|
||||||
process_caption,
|
process_caption,
|
||||||
@ -1115,7 +1122,10 @@ def create_ui(wrap_gradio_gpu_call):
|
|||||||
learn_rate,
|
learn_rate,
|
||||||
dataset_directory,
|
dataset_directory,
|
||||||
log_directory,
|
log_directory,
|
||||||
|
training_width,
|
||||||
|
training_height,
|
||||||
steps,
|
steps,
|
||||||
|
num_repeats,
|
||||||
create_image_every,
|
create_image_every,
|
||||||
save_embedding_every,
|
save_embedding_every,
|
||||||
template_file,
|
template_file,
|
||||||
|
@ -23,4 +23,3 @@ resize-right
|
|||||||
torchdiffeq
|
torchdiffeq
|
||||||
kornia
|
kornia
|
||||||
lark
|
lark
|
||||||
functorch
|
|
||||||
|
@ -22,4 +22,3 @@ resize-right==0.0.2
|
|||||||
torchdiffeq==0.2.3
|
torchdiffeq==0.2.3
|
||||||
kornia==0.6.7
|
kornia==0.6.7
|
||||||
lark==1.1.2
|
lark==1.1.2
|
||||||
functorch==0.2.1
|
|
||||||
|
16
script.js
16
script.js
@ -40,6 +40,22 @@ document.addEventListener("DOMContentLoaded", function() {
|
|||||||
mutationObserver.observe( gradioApp(), { childList:true, subtree:true })
|
mutationObserver.observe( gradioApp(), { childList:true, subtree:true })
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a ctrl+enter as a shortcut to start a generation
|
||||||
|
*/
|
||||||
|
document.addEventListener('keydown', function(e) {
|
||||||
|
var handled = false;
|
||||||
|
if (e.key !== undefined) {
|
||||||
|
if((e.key == "Enter" && (e.metaKey || e.ctrlKey))) handled = true;
|
||||||
|
} else if (e.keyCode !== undefined) {
|
||||||
|
if((e.keyCode == 13 && (e.metaKey || e.ctrlKey))) handled = true;
|
||||||
|
}
|
||||||
|
if (handled) {
|
||||||
|
gradioApp().querySelector("#txt2img_generate").click();
|
||||||
|
e.preventDefault();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* checks that a UI element is not in another hidden element or tab content
|
* checks that a UI element is not in another hidden element or tab content
|
||||||
*/
|
*/
|
||||||
|
@ -205,7 +205,10 @@ class Script(scripts.Script):
|
|||||||
if not no_fixed_seeds:
|
if not no_fixed_seeds:
|
||||||
modules.processing.fix_seed(p)
|
modules.processing.fix_seed(p)
|
||||||
|
|
||||||
p.batch_size = 1
|
if not opts.return_grid:
|
||||||
|
p.batch_size = 1
|
||||||
|
|
||||||
|
|
||||||
CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
|
CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
|
||||||
|
|
||||||
def process_axis(opt, vals):
|
def process_axis(opt, vals):
|
||||||
|
11
style.css
11
style.css
@ -1,3 +1,7 @@
|
|||||||
|
.container {
|
||||||
|
max-width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
.output-html p {margin: 0 0.5em;}
|
.output-html p {margin: 0 0.5em;}
|
||||||
|
|
||||||
.row > *,
|
.row > *,
|
||||||
@ -463,3 +467,10 @@ input[type="range"]{
|
|||||||
max-width: 32em;
|
max-width: 32em;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
canvas[key="mask"] {
|
||||||
|
z-index: 12 !important;
|
||||||
|
filter: invert();
|
||||||
|
mix-blend-mode: multiply;
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user