Merge pull request #108 from xeonvs/mps-support

Added support for launching on Apple Silicon M1/M2
This commit is contained in:
AUTOMATIC1111 2022-09-07 22:29:44 +03:00 committed by GitHub
commit 296d012423
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 18 additions and 9 deletions

View File

@ -14,8 +14,11 @@ import modules.images
def load_model(filename):
# this code is adapted from https://github.com/xinntao/ESRGAN
pretrained_net = torch.load(filename)
if torch.has_mps:
map_l = 'cpu'
else:
map_l = None
pretrained_net = torch.load(filename, map_location=map_l)
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
if 'conv_first.weight' in pretrained_net:

View File

@ -2,9 +2,12 @@ import torch
module_in_gpu = None
cpu = torch.device("cpu")
gpu = torch.device("cuda")
device = gpu if torch.cuda.is_available() else cpu
if torch.has_cuda:
device = gpu = torch.device("cuda")
elif torch.has_mps:
device = gpu = torch.device("mps")
else:
device = gpu = torch.device("cpu")
def setup_for_low_vram(sd_model, use_medvram):
parents = {}

View File

@ -232,7 +232,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device)
batch_multipliers = torch.asarray(batch_multipliers).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()

View File

@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
cmd_opts = parser.parse_args()
cpu = torch.device("cpu")
gpu = torch.device("cuda")
device = gpu if torch.cuda.is_available() else cpu
if torch.has_cuda:
device = torch.device("cuda")
elif torch.has_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram