From 910741a104aba7b99bc9385e8880a9711a46438a Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Wed, 21 Feb 2024 13:26:30 +0000 Subject: [PATCH] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index d2f3f94..2693647 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -16,6 +16,7 @@ logging.getLogger("asyncio").setLevel(logging.ERROR) logging.getLogger("charset_normalizer").setLevel(logging.ERROR) logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) import pdb +import torch if os.path.exists("./gweight.txt"): with open("./gweight.txt", 'r', encoding="utf-8") as file: @@ -48,11 +49,11 @@ is_share = os.environ.get("is_share", "False") is_share = eval(is_share) if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -is_half = eval(os.environ.get("is_half", "True")) +is_half = eval(os.environ.get("is_half", "True")) and not torch.backends.mps.is_available() import gradio as gr from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np -import librosa, torch +import librosa from feature_extractor import cnhubert cnhubert.cnhubert_base_path = cnhubert_base_path