From 3f43d8a966ba8462ba019a5ad573f94508cd45f8 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 11 Jan 2023 10:28:55 -0500 Subject: [PATCH] set descriptions --- modules/hypernetworks/hypernetwork.py | 4 +++- modules/textual_inversion/preprocess.py | 7 ++++++- modules/textual_inversion/textual_inversion.py | 4 +++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 300d3975f..194679e89 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -619,7 +619,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, epoch_num = hypernetwork.step // steps_per_epoch epoch_step = hypernetwork.step % steps_per_epoch - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}") + description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + shared.state.textinfo = description if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: # Before saving, change name to match current checkpoint. hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index feb876c62..3c1042ada 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -135,7 +135,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.process_caption_deepbooru = process_caption_deepbooru params.preprocess_txt_action = preprocess_txt_action - for index, imagefile in enumerate(tqdm.tqdm(files)): + pbar = tqdm.tqdm(files) + for index, imagefile in enumerate(pbar): params.subindex = 0 filename = os.path.join(src, imagefile) try: @@ -143,6 +144,10 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre except Exception: continue + description = f"Preprocessing [Image {index}/{len(files)}]" + pbar.set_description(description) + shared.state.textinfo = description + params.src = filename existing_caption = None diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 3866c1546..b915b091c 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -476,7 +476,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ epoch_num = embedding.step // steps_per_epoch epoch_step = embedding.step % steps_per_epoch - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}") + description = f"Training textual inversion [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + shared.state.textinfo = description if embedding_dir is not None and steps_done % save_embedding_every == 0: # Before saving, change name to match current checkpoint. embedding_name_every = f'{embedding_name}-{steps_done}'