From 605c3191570de9facba5c20af083d5fd13fca88a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 4 Sep 2023 19:38:38 +0200 Subject: [PATCH] feat(diffusers): don't set seed in params and respect device (#1010) **Description** Follow up of #998 - respect the device used to load the model and do not specify a seed in the parameters, but rather just configure the generator as described in https://huggingface.co/docs/diffusers/using-diffusers/reusing_seeds Signed-off-by: Ettore Di Giacinto --- extra/grpc/diffusers/backend_diffusers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/extra/grpc/diffusers/backend_diffusers.py b/extra/grpc/diffusers/backend_diffusers.py index bb7d991a..c23fc22a 100755 --- a/extra/grpc/diffusers/backend_diffusers.py +++ b/extra/grpc/diffusers/backend_diffusers.py @@ -221,8 +221,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): modelFileBase = os.path.dirname(request.ModelFile) # modify LoraAdapter to be relative to modelFileBase request.LoraAdapter = os.path.join(modelFileBase, request.LoraAdapter) + device = "cpu" if not request.CUDA else "cuda" + self.device = device if request.LoraAdapter: - device = "cpu" if not request.CUDA else "cuda" # Check if its a local file and not a directory ( we load lora differently for a safetensor file ) if os.path.exists(request.LoraAdapter) and not os.path.isdir(request.LoraAdapter): self.load_lora_weights(request.LoraAdapter, 1, device, torchType) @@ -300,7 +301,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): "width": request.width, "height": request.height, "num_inference_steps": request.step, - "seed": request.seed, } if request.src != "": @@ -321,7 +321,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): # Set seed if request.seed > 0: - kwargs["generator"] = torch.Generator(device="cuda").manual_seed( + kwargs["generator"] = torch.Generator(device=self.device).manual_seed( request.seed )