mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-08-12 11:33:29 +08:00
Add Distil
This commit is contained in:
parent
2d09bbe63a
commit
e4b9354534
@ -6,15 +6,10 @@ def check_fw_local_models():
|
|||||||
启动时检查本地是否有 Faster Whisper 模型.
|
启动时检查本地是否有 Faster Whisper 模型.
|
||||||
"""
|
"""
|
||||||
model_size_list = [
|
model_size_list = [
|
||||||
"tiny",
|
|
||||||
"tiny.en",
|
|
||||||
"base",
|
|
||||||
"base.en",
|
|
||||||
"small",
|
|
||||||
"small.en",
|
|
||||||
"medium",
|
"medium",
|
||||||
"medium.en",
|
"medium.en",
|
||||||
"large",
|
"distil-large-v2",
|
||||||
|
"distil-large-v3",
|
||||||
"large-v1",
|
"large-v1",
|
||||||
"large-v2",
|
"large-v2",
|
||||||
"large-v3",
|
"large-v3",
|
||||||
@ -25,11 +20,24 @@ def check_fw_local_models():
|
|||||||
return model_size_list
|
return model_size_list
|
||||||
|
|
||||||
|
|
||||||
|
def get_models():
|
||||||
|
model_size_list = [
|
||||||
|
"medium",
|
||||||
|
"medium.en",
|
||||||
|
"distil-large-v2",
|
||||||
|
"distil-large-v3",
|
||||||
|
"large-v1",
|
||||||
|
"large-v2",
|
||||||
|
"large-v3",
|
||||||
|
]
|
||||||
|
return model_size_list
|
||||||
|
|
||||||
|
|
||||||
asr_dict = {
|
asr_dict = {
|
||||||
"达摩 ASR (中文)": {"lang": ["zh", "yue"], "size": ["large"], "path": "funasr_asr.py", "precision": ["float32"]},
|
"达摩 ASR (中文)": {"lang": ["zh", "yue"], "size": ["large"], "path": "funasr_asr.py", "precision": ["float32"]},
|
||||||
"Faster Whisper (多语种)": {
|
"Faster Whisper (多语种)": {
|
||||||
"lang": ["auto", "zh", "en", "ja", "ko", "yue"],
|
"lang": ["auto", "zh", "en", "ja", "ko", "yue"],
|
||||||
"size": check_fw_local_models(),
|
"size": get_models(),
|
||||||
"path": "fasterwhisper_asr.py",
|
"path": "fasterwhisper_asr.py",
|
||||||
"precision": ["float32", "float16", "int8"],
|
"precision": ["float32", "float16", "int8"],
|
||||||
},
|
},
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
|
||||||
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from faster_whisper import WhisperModel
|
from faster_whisper import WhisperModel
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from huggingface_hub.errors import LocalEntryNotFoundError
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from tools.asr.config import check_fw_local_models
|
from tools.asr.config import get_models
|
||||||
|
from tools.asr.funasr_asr import only_asr
|
||||||
from tools.my_utils import load_cudnn
|
from tools.my_utils import load_cudnn
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
@ -38,20 +39,54 @@ language_code_list = [
|
|||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
def execute_asr(input_folder, output_folder, model_size, language, precision):
|
def download_model(model_size: str):
|
||||||
if "-local" in model_size:
|
if "distil" in model_size:
|
||||||
model_size = model_size[:-6]
|
repo_id = "Systran/faster-{}-whisper-{}".format(*model_size.split("-", maxsplit=1))
|
||||||
model_path = f"tools/asr/models/faster-whisper-{model_size}"
|
|
||||||
else:
|
else:
|
||||||
model_path = model_size
|
repo_id = f"Systran/faster-whisper-{model_size}"
|
||||||
|
model_path = f"tools/asr/models/{repo_id.strip('Systran/')}"
|
||||||
|
|
||||||
|
files: list[str] = [
|
||||||
|
"config.json",
|
||||||
|
"model.bin",
|
||||||
|
"tokenizer.json",
|
||||||
|
"vocabulary.txt",
|
||||||
|
]
|
||||||
|
if model_size == "large-v3" or "distil" in model_size:
|
||||||
|
files.append("preprocessor_config.json")
|
||||||
|
files.append("vocabulary.json")
|
||||||
|
|
||||||
|
files.remove("vocabulary.txt")
|
||||||
|
|
||||||
|
for attempt in range(2):
|
||||||
|
try:
|
||||||
|
snapshot_download(
|
||||||
|
repo_id=repo_id,
|
||||||
|
allow_patterns=files,
|
||||||
|
local_dir=model_path,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
except LocalEntryNotFoundError:
|
||||||
|
if attempt < 1:
|
||||||
|
time.sleep(2)
|
||||||
|
else:
|
||||||
|
print("[ERROR] LocalEntryNotFoundError and no fallback.")
|
||||||
|
traceback.print_exc()
|
||||||
|
exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[ERROR] Unexpected error on attempt {attempt + 1}: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
return model_path
|
||||||
|
|
||||||
|
|
||||||
|
def execute_asr(input_folder, output_folder, model_path, language, precision):
|
||||||
if language == "auto":
|
if language == "auto":
|
||||||
language = None # 不设置语种由模型自动输出概率最高的语种
|
language = None # 不设置语种由模型自动输出概率最高的语种
|
||||||
print("loading faster whisper model:", model_size, model_path)
|
print("loading faster whisper model:", model_path, model_path)
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
try:
|
model = WhisperModel(model_path, device=device, compute_type=precision)
|
||||||
model = WhisperModel(model_path, device=device, compute_type=precision)
|
|
||||||
except:
|
|
||||||
return print(traceback.format_exc())
|
|
||||||
|
|
||||||
input_file_names = os.listdir(input_folder)
|
input_file_names = os.listdir(input_folder)
|
||||||
input_file_names.sort()
|
input_file_names.sort()
|
||||||
@ -73,16 +108,15 @@ def execute_asr(input_folder, output_folder, model_size, language, precision):
|
|||||||
|
|
||||||
if info.language == "zh":
|
if info.language == "zh":
|
||||||
print("检测为中文文本, 转 FunASR 处理")
|
print("检测为中文文本, 转 FunASR 处理")
|
||||||
if "only_asr" not in globals():
|
|
||||||
from tools.asr.funasr_asr import only_asr # 如果用英文就不需要导入下载模型
|
|
||||||
text = only_asr(file_path, language=info.language.lower())
|
text = only_asr(file_path, language=info.language.lower())
|
||||||
|
|
||||||
if text == "":
|
if text == "":
|
||||||
for segment in segments:
|
for segment in segments:
|
||||||
text += segment.text
|
text += segment.text
|
||||||
output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}")
|
output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}")
|
||||||
except:
|
except Exception as e:
|
||||||
print(traceback.format_exc())
|
print(e)
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
output_folder = output_folder or "output/asr_opt"
|
output_folder = output_folder or "output/asr_opt"
|
||||||
os.makedirs(output_folder, exist_ok=True)
|
os.makedirs(output_folder, exist_ok=True)
|
||||||
@ -107,7 +141,7 @@ if __name__ == "__main__":
|
|||||||
"--model_size",
|
"--model_size",
|
||||||
type=str,
|
type=str,
|
||||||
default="large-v3",
|
default="large-v3",
|
||||||
choices=check_fw_local_models(),
|
choices=get_models(),
|
||||||
help="Model Size of Faster Whisper",
|
help="Model Size of Faster Whisper",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -123,10 +157,14 @@ if __name__ == "__main__":
|
|||||||
)
|
)
|
||||||
|
|
||||||
cmd = parser.parse_args()
|
cmd = parser.parse_args()
|
||||||
|
model_size = cmd.model_size
|
||||||
|
if model_size == "large":
|
||||||
|
model_size = "large-v3"
|
||||||
|
model_path = download_model(model_size)
|
||||||
output_file_path = execute_asr(
|
output_file_path = execute_asr(
|
||||||
input_folder=cmd.input_folder,
|
input_folder=cmd.input_folder,
|
||||||
output_folder=cmd.output_folder,
|
output_folder=cmd.output_folder,
|
||||||
model_size=cmd.model_size,
|
model_path=model_path,
|
||||||
language=cmd.language,
|
language=cmd.language,
|
||||||
precision=cmd.precision,
|
precision=cmd.precision,
|
||||||
)
|
)
|
||||||
|
9
webui.py
9
webui.py
@ -86,13 +86,10 @@ from config import (
|
|||||||
from tools import my_utils
|
from tools import my_utils
|
||||||
from tools.my_utils import check_details, check_for_existance
|
from tools.my_utils import check_details, check_for_existance
|
||||||
|
|
||||||
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
|
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
||||||
try:
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
||||||
import gradio.analytics as analytics
|
|
||||||
|
|
||||||
analytics.version_check = lambda: None
|
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
|
||||||
except:
|
|
||||||
...
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
n_cpu = cpu_count()
|
n_cpu = cpu_count()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user