From 4a216ded433ded315106e2989c5ff7dec1c49304 Mon Sep 17 00:00:00 2001 From: Ljzd-PRO <63289359+Ljzd-PRO@users.noreply.github.com> Date: Thu, 13 Oct 2022 02:07:49 +0800 Subject: [PATCH] load models to VRAM when using `--lowram` param load models to VRM instead of RAM (for machines which have bigger VRM than RAM such as free Google Colab server) --- modules/sd_models.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 0a55b4c32..78a198b9c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -134,7 +134,12 @@ def load_model_weights(model, checkpoint_info): print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") - pl_sd = torch.load(checkpoint_file, map_location="cpu") + if shared.cmd_opts.lowram: + print("Load to VRAM if GPU is available (low RAM)") + pl_sd = torch.load(checkpoint_file) + else: + pl_sd = torch.load(checkpoint_file, map_location="cpu") + if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") @@ -158,7 +163,13 @@ def load_model_weights(model, checkpoint_info): if os.path.exists(vae_file): print(f"Loading VAE weights from: {vae_file}") - vae_ckpt = torch.load(vae_file, map_location="cpu") + + if shared.cmd_opts.lowram: + print("Load to VRAM if GPU is available (low RAM)") + vae_ckpt = torch.load(vae_file) + else: + vae_ckpt = torch.load(vae_file, map_location="cpu") + vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} model.first_stage_model.load_state_dict(vae_dict)