fixed feature_extractor issue

This commit is contained in:
YongKang Zhou 2024-01-31 08:56:56 +00:00
parent 956ae58677
commit 72dca99288

8
api.py
View File

@ -104,8 +104,13 @@ RESP: 无
import argparse
import os
import signal
import sys
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))
import signal
from time import time as ttime
import torch
import librosa
@ -440,6 +445,7 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
torch.cuda.empty_cache()
if device == "mps":
print('executed torch.mps.empty_cache()')
torch.mps.empty_cache()
return StreamingResponse(wav, media_type="audio/wav")