mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-04-05 04:22:46 +08:00
add ASR_Model Select
This commit is contained in:
parent
3a03e8115d
commit
67f87d6306
31
tools/asr/config.py
Normal file
31
tools/asr/config.py
Normal file
@ -0,0 +1,31 @@
|
||||
import os
|
||||
|
||||
def check_fw_local_models():
|
||||
'''
|
||||
启动时检查本地是否有 Faster Whisper 模型.
|
||||
'''
|
||||
model_size_list = [
|
||||
"tiny", "tiny.en",
|
||||
"base", "base.en",
|
||||
"small", "small.en",
|
||||
"medium", "medium.en",
|
||||
"large", "large-v1",
|
||||
"large-v2", "large-v3"]
|
||||
for i, size in enumerate(model_size_list):
|
||||
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
|
||||
model_size_list[i] = size + '(local)'
|
||||
return model_size_list
|
||||
|
||||
asr_dict = {
|
||||
"达摩 ASR (中文)": {
|
||||
'lang': ['zh'],
|
||||
'size': ['large'],
|
||||
'path': 'funasr_asr.py',
|
||||
},
|
||||
"Faster Whisper (多语种)": {
|
||||
'lang': ['auto', 'zh', 'en', 'ja'],
|
||||
'size': check_fw_local_models(),
|
||||
'path': 'fasterwhisper_asr.py'
|
||||
}
|
||||
}
|
||||
|
97
tools/asr/fasterwhisper_asr.py
Normal file
97
tools/asr/fasterwhisper_asr.py
Normal file
@ -0,0 +1,97 @@
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
import requests
|
||||
from glob import glob
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
from tqdm import tqdm
|
||||
|
||||
from config import check_fw_local_models
|
||||
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
||||
|
||||
language_code_list = [
|
||||
"af", "am", "ar", "as", "az",
|
||||
"ba", "be", "bg", "bn", "bo",
|
||||
"br", "bs", "ca", "cs", "cy",
|
||||
"da", "de", "el", "en", "es",
|
||||
"et", "eu", "fa", "fi", "fo",
|
||||
"fr", "gl", "gu", "ha", "haw",
|
||||
"he", "hi", "hr", "ht", "hu",
|
||||
"hy", "id", "is", "it", "ja",
|
||||
"jw", "ka", "kk", "km", "kn",
|
||||
"ko", "la", "lb", "ln", "lo",
|
||||
"lt", "lv", "mg", "mi", "mk",
|
||||
"ml", "mn", "mr", "ms", "mt",
|
||||
"my", "ne", "nl", "nn", "no",
|
||||
"oc", "pa", "pl", "ps", "pt",
|
||||
"ro", "ru", "sa", "sd", "si",
|
||||
"sk", "sl", "sn", "so", "sq",
|
||||
"sr", "su", "sv", "sw", "ta",
|
||||
"te", "tg", "th", "tk", "tl",
|
||||
"tr", "tt", "uk", "ur", "uz",
|
||||
"vi", "yi", "yo", "zh", "yue",
|
||||
"auto"]
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
if 'local' in model_size:
|
||||
model_size = model_size.split('(')[0]
|
||||
model_path = f'tools/asr/models/faster-whisper-{model_size}'
|
||||
else:
|
||||
model_path = model_size
|
||||
if language == 'auto':
|
||||
language = None #不设置语种由模型自动输出概率最高的语种
|
||||
|
||||
try:
|
||||
model = WhisperModel(model_path, device="cuda", compute_type="float16")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
|
||||
try:
|
||||
segments, info = model.transcribe(
|
||||
audio = file,
|
||||
beam_size = 5,
|
||||
vad_filter = True,
|
||||
vad_parameters = dict(min_silence_duration_ms=700),
|
||||
language = language)
|
||||
text = ''
|
||||
for segment in segments:
|
||||
text += segment.text
|
||||
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
|
||||
choices=check_fw_local_models(),
|
||||
help="Model Size of Faster Whisper")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh',
|
||||
choices=language_code_list,
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
output_file_path = execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
)
|
66
tools/asr/funasr_asr.py
Normal file
66
tools/asr/funasr_asr.py
Normal file
@ -0,0 +1,66 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
from tqdm import tqdm
|
||||
|
||||
from funasr import AutoModel
|
||||
|
||||
path_asr = 'tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
||||
path_vad = 'tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
||||
path_punc = 'tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
||||
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
|
||||
model = AutoModel(
|
||||
model = path_asr,
|
||||
model_revision = "v2.0.4",
|
||||
vad_model = path_vad,
|
||||
vad_model_revision = "v2.0.4",
|
||||
punc_model = path_punc,
|
||||
punc_model_revision = "v2.0.4",
|
||||
)
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
input_file_names = os.listdir(input_folder)
|
||||
input_file_names.sort()
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
|
||||
for name in tqdm(input_file_names):
|
||||
try:
|
||||
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
|
||||
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
output_folder = output_folder or "output/asr_opt"
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large',
|
||||
help="Model Size of FunASR is Large")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
)
|
@ -1,42 +0,0 @@
|
||||
import os
|
||||
import argparse
|
||||
import os
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
||||
from glob import glob
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
def main(input_folder, output_folder, output_filename, language):
|
||||
model = WhisperModel("large-v3", device="cuda", compute_type="float16")
|
||||
|
||||
output_file = os.path.join(output_folder, output_filename)
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
for file in glob(os.path.join(input_folder, '**/*.wav'), recursive=True):
|
||||
segments, _ = model.transcribe(file, beam_size=10, vad_filter=True,
|
||||
vad_parameters=dict(min_silence_duration_ms=700), language=language)
|
||||
segments = list(segments)
|
||||
|
||||
filename = os.path.basename(file).replace('.wav', '')
|
||||
directory = os.path.dirname(file)
|
||||
|
||||
result_line = f"{file}|{language.upper()}|{segments[0].text}\n"
|
||||
f.write(result_line)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-f", "--output_filename", type=str, default="transcriptions.txt", help="Name of the output text file.")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh', 'en', ...],
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
|
||||
input_folder = cmd.input_folder
|
||||
output_folder = cmd.output_folder
|
||||
output_filename = cmd.output_filename
|
||||
language = cmd.language
|
||||
main(input_folder, output_folder, output_filename, language)
|
@ -1,39 +0,0 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import sys,os,traceback
|
||||
|
||||
from funasr import AutoModel
|
||||
|
||||
dir=sys.argv[1]
|
||||
if(dir[-1]=="/"):dir=dir[:-1]
|
||||
# opt_name=dir.split("\\")[-1].split("/")[-1]
|
||||
opt_name=os.path.basename(dir)
|
||||
|
||||
path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
||||
path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
||||
path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
||||
path_asr=path_asr if os.path.exists(path_asr)else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_vad=path_vad if os.path.exists(path_vad)else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc=path_punc if os.path.exists(path_punc)else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
|
||||
model = AutoModel(model=path_asr, model_revision="v2.0.4",
|
||||
vad_model=path_vad,
|
||||
vad_model_revision="v2.0.4",
|
||||
punc_model=path_punc,
|
||||
punc_model_revision="v2.0.4",
|
||||
)
|
||||
|
||||
|
||||
opt=[]
|
||||
file_names = os.listdir(dir)
|
||||
file_names.sort()
|
||||
for name in file_names:
|
||||
try:
|
||||
text = model.generate(input="%s/%s"%(dir,name))[0]["text"]
|
||||
opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text))
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
|
||||
opt_dir="output/asr_opt"
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt))
|
62
webui.py
62
webui.py
@ -192,20 +192,26 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path
|
||||
p_tts_inference=None
|
||||
yield i18n("TTS推理进程已关闭")
|
||||
|
||||
|
||||
def open_asr(asr_inp_dir):
|
||||
from tools.asr.config import asr_dict
|
||||
def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang):
|
||||
global p_asr
|
||||
if(p_asr==None):
|
||||
asr_inp_dir=my_utils.clean_path(asr_inp_dir)
|
||||
cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir)
|
||||
cmd = f"{python_exec} tools/asr/{asr_dict[asr_model]['path']}"
|
||||
cmd += f" -i {asr_inp_dir}"
|
||||
cmd += f" -o {asr_opt_dir}"
|
||||
cmd += f" -s {asr_model_size}"
|
||||
cmd += f" -l {asr_lang}"
|
||||
|
||||
yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
|
||||
print(cmd)
|
||||
p_asr = Popen(cmd, shell=True)
|
||||
p_asr.wait()
|
||||
p_asr=None
|
||||
yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
||||
yield f"ASR任务完成, 查看终端进行下一步",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
|
||||
else:
|
||||
yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
|
||||
return None
|
||||
|
||||
def close_asr():
|
||||
global p_asr
|
||||
@ -674,12 +680,44 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
||||
with gr.Row():
|
||||
open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True)
|
||||
close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
|
||||
asr_inp_dir = gr.Textbox(
|
||||
label=i18n("批量ASR(中文only)输入文件夹路径"),
|
||||
value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx",
|
||||
interactive=True,
|
||||
)
|
||||
asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
|
||||
with gr.Column():
|
||||
with gr.Row():
|
||||
asr_inp_dir = gr.Textbox(
|
||||
label=i18n("输入文件夹路径"),
|
||||
value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx",
|
||||
interactive=True,
|
||||
)
|
||||
asr_opt_dir = gr.Textbox(
|
||||
label = i18n("输出文件夹路径"),
|
||||
value = "output/asr_opt",
|
||||
interactive = False,
|
||||
)
|
||||
with gr.Row():
|
||||
asr_model = gr.Dropdown(
|
||||
label = i18n("ASR 模型"),
|
||||
choices = list(asr_dict.keys()),
|
||||
interactive = True,
|
||||
)
|
||||
asr_size = gr.Dropdown(
|
||||
label = i18n("ASR 模型尺寸"),
|
||||
choices = [],
|
||||
interactive = True,
|
||||
)
|
||||
asr_lang = gr.Dropdown(
|
||||
label = i18n("ASR 语言设置"),
|
||||
choices = [],
|
||||
interactive = True,
|
||||
)
|
||||
with gr.Row():
|
||||
asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
|
||||
|
||||
def change_lang_choices(key): #根据选择的模型修改可选的语言
|
||||
return gr.Dropdown(choices=asr_dict[key]['lang'])
|
||||
def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
|
||||
return gr.Dropdown(choices=asr_dict[key]['size'])
|
||||
asr_model.change(change_lang_choices, asr_model, asr_lang)
|
||||
asr_model.change(change_size_choices, asr_model, asr_size)
|
||||
|
||||
gr.Markdown(value=i18n("0d-语音文本校对标注工具"))
|
||||
with gr.Row():
|
||||
if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True)
|
||||
@ -691,7 +729,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
||||
label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
|
||||
if_label.change(change_label, [if_label,path_list], [label_info])
|
||||
if_uvr5.change(change_uvr5, [if_uvr5], [uvr5_info])
|
||||
open_asr_button.click(open_asr, [asr_inp_dir], [asr_info,open_asr_button,close_asr_button])
|
||||
open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang], [asr_info,open_asr_button,close_asr_button])
|
||||
close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
|
||||
open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button])
|
||||
close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
|
||||
@ -788,7 +826,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
||||
tts_info = gr.Textbox(label=i18n("TTS推理WebUI进程输出信息"))
|
||||
if_tts.change(change_tts_inference, [if_tts,bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown], [tts_info])
|
||||
with gr.TabItem(i18n("2-GPT-SoVITS-变声")):gr.Markdown(value=i18n("施工中,请静候佳音"))
|
||||
app.queue(concurrency_count=511, max_size=1022).launch(
|
||||
app.queue(max_size=1022).launch(
|
||||
server_name="0.0.0.0",
|
||||
inbrowser=True,
|
||||
share=is_share,
|
||||
|
Loading…
x
Reference in New Issue
Block a user