From 31802947108cb12d708404fb621f287fd5d13716 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Tue, 20 Feb 2024 15:57:58 +0000 Subject: [PATCH] Update config.py Change the inference device for Mac to accelerate inference and reduce memory leak --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index 3e9e951..caaadd4 100644 --- a/config.py +++ b/config.py @@ -20,7 +20,7 @@ python_exec = sys.executable or "python" if torch.cuda.is_available(): infer_device = "cuda" elif torch.backends.mps.is_available(): - infer_device = "mps" + infer_device = "cpu" else: infer_device = "cpu"