mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-06-07 21:20:49 +00:00
Merge pull request #9445 from gakada/master
Add [batch_number] and [generation_number] filename patterns
This commit is contained in:
commit
8651943cf9
@ -352,6 +352,8 @@ class FilenameGenerator:
|
|||||||
'prompt_no_styles': lambda self: self.prompt_no_style(),
|
'prompt_no_styles': lambda self: self.prompt_no_style(),
|
||||||
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
|
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
|
||||||
'prompt_words': lambda self: self.prompt_words(),
|
'prompt_words': lambda self: self.prompt_words(),
|
||||||
|
'batch_number': lambda self: self.p.batch_index + 1,
|
||||||
|
'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
|
||||||
'hasprompt': lambda self, *args: self.hasprompt(*args), #accept formats:[hasprompt<prompt1|default><prompt2>..]
|
'hasprompt': lambda self, *args: self.hasprompt(*args), #accept formats:[hasprompt<prompt1|default><prompt2>..]
|
||||||
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
|
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
|
||||||
}
|
}
|
||||||
@ -421,6 +423,10 @@ class FilenameGenerator:
|
|||||||
|
|
||||||
for m in re_pattern.finditer(x):
|
for m in re_pattern.finditer(x):
|
||||||
text, pattern = m.groups()
|
text, pattern = m.groups()
|
||||||
|
|
||||||
|
if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1):
|
||||||
|
continue
|
||||||
|
|
||||||
res += text
|
res += text
|
||||||
|
|
||||||
if pattern is None:
|
if pattern is None:
|
||||||
|
@ -683,6 +683,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
|
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
|
||||||
|
|
||||||
for i, x_sample in enumerate(x_samples_ddim):
|
for i, x_sample in enumerate(x_samples_ddim):
|
||||||
|
p.batch_index = i
|
||||||
|
|
||||||
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
||||||
x_sample = x_sample.astype(np.uint8)
|
x_sample = x_sample.astype(np.uint8)
|
||||||
|
|
||||||
@ -731,7 +733,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
|
|
||||||
if opts.return_mask:
|
if opts.return_mask:
|
||||||
output_images.append(image_mask)
|
output_images.append(image_mask)
|
||||||
|
|
||||||
if opts.return_mask_composite:
|
if opts.return_mask_composite:
|
||||||
output_images.append(image_mask_composite)
|
output_images.append(image_mask_composite)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user