From 93075f52ddc80c70e634534a2ad960d1f2b66e58 Mon Sep 17 00:00:00 2001 From: Yuze Wang Date: Tue, 5 Mar 2024 15:19:32 +0800 Subject: [PATCH] added the ability to automatically switch to cpu if fast whisper don't compile with cuda --- tools/asr/fasterwhisper_asr.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/asr/fasterwhisper_asr.py b/tools/asr/fasterwhisper_asr.py index 5f49de7..9371324 100644 --- a/tools/asr/fasterwhisper_asr.py +++ b/tools/asr/fasterwhisper_asr.py @@ -4,6 +4,7 @@ os.environ["HF_ENDPOINT"]="https://hf-mirror.com" import traceback import requests from glob import glob +import torch from faster_whisper import WhisperModel from tqdm import tqdm @@ -45,8 +46,9 @@ def execute_asr(input_folder, output_folder, model_size, language,precision): if language == 'auto': language = None #不设置语种由模型自动输出概率最高的语种 print("loading faster whisper model:",model_size,model_path) + device = 'cuda' if torch.cuda.is_available() else 'cpu' try: - model = WhisperModel(model_path, device="cuda", compute_type=precision) + model = WhisperModel(model_path, device=device, compute_type=precision) except: return print(traceback.format_exc()) output = []