This commit is contained in:
wangshuai09 2024-01-31 10:46:53 +08:00
parent 74ff85a1a1
commit cc3f604310
8 changed files with 22 additions and 20 deletions

View File

@ -88,9 +88,16 @@ def torch_gc():
xpu_specific.torch_xpu_gc() xpu_specific.torch_xpu_gc()
if npu_specific.has_npu: if npu_specific.has_npu:
torch_npu_set_device()
npu_specific.torch_npu_gc() npu_specific.torch_npu_gc()
def torch_npu_set_device():
# Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue
if npu_specific.has_npu:
torch.npu.set_device(0)
def enable_tf32(): def enable_tf32():
if torch.cuda.is_available(): if torch.cuda.is_available():

View File

@ -143,10 +143,7 @@ def initialize_rest(*, reload_script_modules=False):
by that time, so we apply optimization again. by that time, so we apply optimization again.
""" """
from modules import devices from modules import devices
# Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue devices.torch_npu_set_device()
if devices.npu_specific.has_npu:
import torch
torch.npu.set_device(0)
shared.sd_model # noqa: B018 shared.sd_model # noqa: B018

View File

@ -338,6 +338,7 @@ def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/") torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}") torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
requirements_file_for_npu = os.environ.get('REQS_FILE_FOR_NPU', "requirements_npu.txt")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23.post1') xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23.post1')
clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
@ -421,6 +422,13 @@ def prepare_environment():
run_pip(f"install -r \"{requirements_file}\"", "requirements") run_pip(f"install -r \"{requirements_file}\"", "requirements")
startup_timer.record("install requirements") startup_timer.record("install requirements")
if not os.path.isfile(requirements_file_for_npu):
requirements_file_for_npu = os.path.join(script_path, requirements_file_for_npu)
if "torch_npu" in torch_command and not requirements_met(requirements_file_for_npu):
run_pip(f"install -r \"{requirements_file_for_npu}\"", "requirements_for_npu")
startup_timer.record("install requirements_for_npu")
if not args.skip_install: if not args.skip_install:
run_extensions_installers(settings_file=args.ui_settings_file) run_extensions_installers(settings_file=args.ui_settings_file)

View File

@ -8,11 +8,10 @@ def check_for_npu():
if importlib.util.find_spec("torch_npu") is None: if importlib.util.find_spec("torch_npu") is None:
return False return False
import torch_npu import torch_npu
torch_npu.npu.set_device(0)
try: try:
# Will raise a RuntimeError if no NPU is found # Will raise a RuntimeError if no NPU is found
_ = torch.npu.device_count() _ = torch_npu.npu.device_count()
return torch.npu.is_available() return torch.npu.is_available()
except RuntimeError: except RuntimeError:
return False return False
@ -25,8 +24,6 @@ def get_npu_device_string():
def torch_npu_gc(): def torch_npu_gc():
# Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue
torch.npu.set_device(0)
with torch.npu.device(get_npu_device_string()): with torch.npu.device(get_npu_device_string()):
torch.npu.empty_cache() torch.npu.empty_cache()

View File

@ -150,10 +150,7 @@ class EmbeddingDatabase:
return embedding return embedding
def get_expected_shape(self): def get_expected_shape(self):
# workaround devices.torch_npu_set_device()
if devices.npu_specific.has_npu:
import torch
torch.npu.set_device(0)
vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1) vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
return vec.shape[1] return vec.shape[1]

View File

@ -4,8 +4,6 @@ accelerate
blendmodes blendmodes
clean-fid clean-fid
cloudpickle
decorator
einops einops
facexlib facexlib
fastapi>=0.90.1 fastapi>=0.90.1
@ -26,10 +24,8 @@ resize-right
safetensors safetensors
scikit-image>=0.19 scikit-image>=0.19
synr==0.5.0
tomesd tomesd
torch torch
torchdiffeq torchdiffeq
torchsde torchsde
tornado
transformers==4.30.2 transformers==4.30.2

4
requirements_npu.txt Normal file
View File

@ -0,0 +1,4 @@
cloudpickle
decorator
synr==0.5.0
tornado

View File

@ -3,8 +3,6 @@ Pillow==9.5.0
accelerate==0.21.0 accelerate==0.21.0
blendmodes==2022 blendmodes==2022
clean-fid==0.1.35 clean-fid==0.1.35
cloudpickle==3.0.0
decorator==5.1.1
einops==0.4.1 einops==0.4.1
facexlib==0.3.0 facexlib==0.3.0
fastapi==0.94.0 fastapi==0.94.0
@ -23,12 +21,10 @@ pytorch_lightning==1.9.4
resize-right==0.0.2 resize-right==0.0.2
safetensors==0.4.2 safetensors==0.4.2
scikit-image==0.21.0 scikit-image==0.21.0
synr==0.5.0
spandrel==0.1.6 spandrel==0.1.6
tomesd==0.1.3 tomesd==0.1.3
torch torch
torchdiffeq==0.2.3 torchdiffeq==0.2.3
torchsde==0.2.6 torchsde==0.2.6
tornado==6.4
transformers==4.30.2 transformers==4.30.2
httpx==0.24.1 httpx==0.24.1