Update inference_webui.py

Change inference device to accelerate inference on Mac and reduce memory leak
This commit is contained in:
XXXXRT666 2024-02-20 16:03:08 +00:00 committed by GitHub
parent 3180294710
commit 861658050b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -73,7 +73,7 @@ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
device = "cpu"
else:
device = "cpu"