Merge pull request #135 from LauraGPT/main

damo asr funasr1.0
This commit is contained in:
RVC-Boss 2024-01-29 23:26:09 +08:00 committed by GitHub
commit 056e9d8a2d
3 changed files with 16 additions and 15 deletions

View File

@ -65,7 +65,6 @@ bash install.sh
pip install -r requirements.txt pip install -r requirements.txt
``` ```
#### FFmpeg #### FFmpeg
##### Conda Users ##### Conda Users

View File

@ -8,7 +8,7 @@ gradio==3.38.0
ffmpeg-python ffmpeg-python
onnxruntime onnxruntime
tqdm tqdm
funasr==0.8.7 funasr>=1.0.0
cn2an cn2an
pypinyin pypinyin
pyopenjtalk pyopenjtalk

View File

@ -1,8 +1,9 @@
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import sys,os,traceback import sys,os,traceback
from funasr import AutoModel
dir=sys.argv[1] dir=sys.argv[1]
# opt_name=dir.split("\\")[-1].split("/")[-1] # opt_name=dir.split("\\")[-1].split("/")[-1]
opt_name=os.path.basename(dir) opt_name=os.path.basename(dir)
@ -10,20 +11,22 @@ opt_name=os.path.basename(dir)
path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch' path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
path_asr=path_asr if os.path.exists(path_asr)else "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" path_asr=path_asr if os.path.exists(path_asr)else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
path_vad=path_vad if os.path.exists(path_vad)else "damo/speech_fsmn_vad_zh-cn-16k-common-pytorch" path_vad=path_vad if os.path.exists(path_vad)else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
path_punc=path_punc if os.path.exists(path_punc)else "damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch" path_punc=path_punc if os.path.exists(path_punc)else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition, model = AutoModel(model=path_asr, model_revision="v2.0.4",
model=path_asr,
vad_model=path_vad, vad_model=path_vad,
vad_model_revision="v2.0.4",
punc_model=path_punc, punc_model=path_punc,
punc_model_revision="v2.0.4",
) )
opt=[] opt=[]
for name in os.listdir(dir): for name in os.listdir(dir):
try: try:
text = inference_pipeline(audio_in="%s/%s"%(dir,name))["text"] text = model.generate(input="%s/%s"%(dir,name))[0]["text"]
opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text)) opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text))
except: except:
print(traceback.format_exc()) print(traceback.format_exc())
@ -31,4 +34,3 @@ for name in os.listdir(dir):
opt_dir="output/asr_opt" opt_dir="output/asr_opt"
os.makedirs(opt_dir,exist_ok=True) os.makedirs(opt_dir,exist_ok=True)
with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt)) with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt))