mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-04-05 19:41:56 +08:00
added the ability to automatically switch to cpu
if fast whisper don't compile with cuda
This commit is contained in:
parent
b75b5dcf6b
commit
93075f52dd
@ -4,6 +4,7 @@ os.environ["HF_ENDPOINT"]="https://hf-mirror.com"
|
|||||||
import traceback
|
import traceback
|
||||||
import requests
|
import requests
|
||||||
from glob import glob
|
from glob import glob
|
||||||
|
import torch
|
||||||
|
|
||||||
from faster_whisper import WhisperModel
|
from faster_whisper import WhisperModel
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
@ -45,8 +46,9 @@ def execute_asr(input_folder, output_folder, model_size, language,precision):
|
|||||||
if language == 'auto':
|
if language == 'auto':
|
||||||
language = None #不设置语种由模型自动输出概率最高的语种
|
language = None #不设置语种由模型自动输出概率最高的语种
|
||||||
print("loading faster whisper model:",model_size,model_path)
|
print("loading faster whisper model:",model_size,model_path)
|
||||||
|
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||||
try:
|
try:
|
||||||
model = WhisperModel(model_path, device="cuda", compute_type=precision)
|
model = WhisperModel(model_path, device=device, compute_type=precision)
|
||||||
except:
|
except:
|
||||||
return print(traceback.format_exc())
|
return print(traceback.format_exc())
|
||||||
output = []
|
output = []
|
||||||
|
Loading…
x
Reference in New Issue
Block a user