Skip to content

Commit d1a7a6a

Browse files
committed
fix(diffusers/xpu): Set device to XPU and ignore CUDA request when on Intel
Signed-off-by: Richard Palethorpe <[email protected]>
1 parent d99530a commit d1a7a6a

File tree

2 files changed

+14
-9
lines changed

2 files changed

+14
-9
lines changed

backend/python/diffusers/backend.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,7 @@
3838
FRAMES = os.environ.get("FRAMES", "64")
3939

4040
if XPU:
41-
import intel_extension_for_pytorch as ipex
42-
43-
print(ipex.xpu.get_device_name(0))
41+
print(torch.xpu.get_device_name(0))
4442

4543
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
4644
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
@@ -336,6 +334,8 @@ def LoadModel(self, request, context):
336334
request.LoraAdapter = os.path.join(request.ModelPath, request.LoraAdapter)
337335

338336
device = "cpu" if not request.CUDA else "cuda"
337+
if XPU:
338+
device = "xpu"
339339
self.device = device
340340
if request.LoraAdapter:
341341
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )
@@ -359,12 +359,11 @@ def LoadModel(self, request, context):
359359

360360
self.pipe.set_adapters(adapters_name, adapter_weights=adapters_weights)
361361

362-
if request.CUDA:
363-
self.pipe.to('cuda')
362+
if device != "cpu":
363+
self.pipe.to(device)
364364
if self.controlnet:
365-
self.controlnet.to('cuda')
366-
if XPU:
367-
self.pipe = self.pipe.to("xpu")
365+
self.controlnet.to(device)
366+
368367
except Exception as err:
369368
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
370369
# Implement your logic here for the LoadModel service

backend/python/diffusers/run.sh

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,10 @@ else
66
source $backend_dir/../common/libbackend.sh
77
fi
88

9-
startBackend $@
9+
if [ -d "/opt/intel" ]; then
10+
# Assumes we are using the Intel oneAPI container image
11+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
12+
export XPU=1
13+
fi
14+
15+
startBackend $@

0 commit comments

Comments
 (0)