From c36d0a93fe9760efd390a916650019d09171f4a1 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sat, 27 Apr 2024 01:27:57 +0800 Subject: [PATCH] =?UTF-8?q?api=E6=8E=A8=E7=90=86=EF=BC=8C=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E5=A4=9A=E8=BF=9B=E7=A8=8B=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/time_util.py | 3 +- Ref_Audio_Selector/config_param/log_config.py | 1 + .../ref_audio_selector_webui.py | 340 +++++++++--------- Ref_Audio_Selector/tool/audio_inference.py | 37 +- 4 files changed, 209 insertions(+), 172 deletions(-) diff --git a/Ref_Audio_Selector/common/time_util.py b/Ref_Audio_Selector/common/time_util.py index 82d63b8..b58ce3e 100644 --- a/Ref_Audio_Selector/common/time_util.py +++ b/Ref_Audio_Selector/common/time_util.py @@ -1,4 +1,5 @@ import time +import os from Ref_Audio_Selector.config_param.log_config import p_logger import Ref_Audio_Selector.config_param.config_params as params @@ -26,7 +27,7 @@ def timeit_decorator(func): elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 - log_message = f"{func.__name__} 执行耗时: {elapsed_time:.6f} 秒" + log_message = f"进程ID: {os.getpid()}, {func.__name__} 执行耗时: {elapsed_time:.6f} 秒" p_logger.info(log_message) return func_result diff --git a/Ref_Audio_Selector/config_param/log_config.py b/Ref_Audio_Selector/config_param/log_config.py index b10e5c9..1cb1fd8 100644 --- a/Ref_Audio_Selector/config_param/log_config.py +++ b/Ref_Audio_Selector/config_param/log_config.py @@ -13,6 +13,7 @@ def create_general_logger(): # 可以设置控制台输出的格式 console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') console_handler.setFormatter(console_formatter) + console_handler.encoding = 'utf-8' # 设置字符编码为utf-8 # 创建一个用于常规日志的处理器 general_handler = logging.FileHandler(f"{params.log_dir}/{current_date}.log", mode='a', encoding='utf-8') diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index f8bc8eb..0589c75 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -184,9 +184,10 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d if len(ref_audio_manager.get_audio_list()) == 0: raise Exception("待推理的参考音频不能为空") - time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files)(url_composer, text_list, - ref_audio_manager.get_ref_audio_list(), - inference_dir) + time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files_parallel)(url_composer, + text_list, + ref_audio_manager.get_ref_audio_list(), + inference_dir, 3) text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}" @@ -449,171 +450,176 @@ def save_role(text_role): rw_param.write(rw_param.role, text_role) -default_work_space_dir = rw_param.read(rw_param.work_dir) -default_role = rw_param.read(rw_param.role) -default_base_dir = os.path.join(default_work_space_dir, default_role) +if __name__ == '__main__': + default_work_space_dir = rw_param.read(rw_param.work_dir) + default_role = rw_param.read(rw_param.role) + default_base_dir = os.path.join(default_work_space_dir, default_role) -with gr.Blocks() as app: - gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) - with gr.Row(): - text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=default_work_space_dir) - text_role = gr.Text(label=i18n("角色名称"), value=default_role) - text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) - text_role.input(save_role, [text_role], []) - with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): - gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) - text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") + with gr.Blocks() as app: + gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) with gr.Row(): - button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") - text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) - gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - default_sample_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) - button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], - [text_convert_from_list_info, text_sample_dir]) - with gr.Row(): - text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") - text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") - text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") - checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) - with gr.Row(): - button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") - text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) - with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): - gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," - "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) - default_model_inference_voice_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), - value=default_model_inference_voice_dir, interactive=True) - text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=rw_param.read(rw_param.generate_audio_url)) - with gr.Row(): - text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) - text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), - value=rw_param.read(rw_param.ref_path_param)) - text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), - value=rw_param.read(rw_param.ref_text_param)) - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) - text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) - text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_url.blur(save_generate_audio_url, [text_url], []) - text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_text.blur(save_text_param, [text_text], []) - text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_ref_path.blur(save_ref_path_param, [text_ref_path], []) - text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_ref_text.blur(save_ref_text_param, [text_ref_text], []) - text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_emotion.blur(save_emotion_param, [text_emotion], []) - gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) - default_test_content_path = params.default_test_text_path - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) - gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) - gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) - with gr.Row(): - button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") - text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) - with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): - gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - default_asr_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) - text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, interactive=True) - with gr.Row(): - dropdown_asr_model = gr.Dropdown( - label=i18n("ASR 模型"), - choices=[], - interactive=True, - value="达摩 ASR (中文)" - ) - dropdown_asr_size = gr.Dropdown( - label=i18n("ASR 模型尺寸"), - choices=["large"], - interactive=True, - value="large" - ) - dropdown_asr_lang = gr.Dropdown( - label=i18n("ASR 语言设置"), - choices=["zh"], - interactive=True, - value="zh" - ) - with gr.Row(): - button_asr = gr.Button(i18n("启动asr"), variant="primary") - text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) - gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - default_text_similarity_analysis_path = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.asr_filename + '.list')) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), - value=default_text_similarity_analysis_path, interactive=True) - button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, - dropdown_asr_size, dropdown_asr_lang], - [text_asr_info, text_text_similarity_analysis_path]) - with gr.Row(): - button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") - text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, - text_text_similarity_analysis_path], - [text_text_similarity_analysis_info]) - gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) - gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) - with gr.Row(): - text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") - text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") - with gr.Row(): - button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") - text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) - button_similarity_audio_output.click(similarity_audio_output, - [text_work_space_dir, text_role, text_base_audio_path, - text_compare_audio_dir], [text_similarity_audio_output_info]) - with gr.Row(): - default_sync_ref_audio_dir = common.check_path_existence_and_return( + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), + value=default_work_space_dir) + text_role = gr.Text(label=i18n("角色名称"), value=default_role) + text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) + text_role.input(save_role, [text_role], []) + with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): + gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) + text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") + with gr.Row(): + button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") + text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) + gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) + default_sample_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) + button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], + [text_convert_from_list_info, text_sample_dir]) + with gr.Row(): + text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") + text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") + text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") + checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), + show_label=True) + with gr.Row(): + button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") + text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) + with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): + gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," + "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) + default_model_inference_voice_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, - interactive=True) - default_sync_inference_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir)) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), - value=default_sync_inference_audio_dir, interactive=True) - with gr.Row(): - button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") - text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) - button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, - text_sync_inference_audio_dir], [text_sync_ref_info]) - with gr.Tab("第四步:生成参考音频配置文本", open=False): - gr.Markdown(value=i18n("4.1:编辑模板")) - default_template_path = params.default_template_path - default_template_content = common.read_file(default_template_path) - text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) - text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) - gr.Markdown(value=i18n("4.2:生成配置")) - default_sync_ref_audio_dir2 = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, - interactive=True) - with gr.Row(): - button_create_config = gr.Button(i18n("生成配置"), variant="primary") - text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) - button_create_config.click(create_config, - [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], - [text_create_config_info]) - button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, - text_subsection_num, text_sample_num, checkbox_similarity_output], - [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, - text_sync_ref_audio_dir2]) - button_model_inference.click(model_inference, - [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, - text_text, text_ref_path, text_ref_text, text_emotion, - text_test_content], - [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), + value=default_model_inference_voice_dir, interactive=True) + text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), + value=rw_param.read(rw_param.generate_audio_url)) + with gr.Row(): + text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) + text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), + value=rw_param.read(rw_param.ref_path_param)) + text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), + value=rw_param.read(rw_param.ref_text_param)) + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) + text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) + text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_url.blur(save_generate_audio_url, [text_url], []) + text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_text.blur(save_text_param, [text_text], []) + text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_path.blur(save_ref_path_param, [text_ref_path], []) + text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_text.blur(save_ref_text_param, [text_ref_text], []) + text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_emotion.blur(save_emotion_param, [text_emotion], []) + gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) + default_test_content_path = params.default_test_text_path + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) + gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) + gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) + with gr.Row(): + button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") + text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) + with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): + gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) + default_asr_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, + interactive=True) + with gr.Row(): + dropdown_asr_model = gr.Dropdown( + label=i18n("ASR 模型"), + choices=[], + interactive=True, + value="达摩 ASR (中文)" + ) + dropdown_asr_size = gr.Dropdown( + label=i18n("ASR 模型尺寸"), + choices=["large"], + interactive=True, + value="large" + ) + dropdown_asr_lang = gr.Dropdown( + label=i18n("ASR 语言设置"), + choices=["zh"], + interactive=True, + value="zh" + ) + with gr.Row(): + button_asr = gr.Button(i18n("启动asr"), variant="primary") + text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) + gr.Markdown(value=i18n("3.2:启动文本相似度分析")) + default_text_similarity_analysis_path = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.asr_filename + '.list')) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), + value=default_text_similarity_analysis_path, interactive=True) + button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, + dropdown_asr_size, dropdown_asr_lang], + [text_asr_info, text_text_similarity_analysis_path]) + with gr.Row(): + button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") + text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", + interactive=False) + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, + text_text_similarity_analysis_path], + [text_text_similarity_analysis_info]) + gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) + gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) + with gr.Row(): + text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") + text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") + with gr.Row(): + button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") + text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) + button_similarity_audio_output.click(similarity_audio_output, + [text_work_space_dir, text_role, text_base_audio_path, + text_compare_audio_dir], [text_similarity_audio_output_info]) + with gr.Row(): + default_sync_ref_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, + interactive=True) + default_sync_inference_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir)) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), + value=default_sync_inference_audio_dir, interactive=True) + with gr.Row(): + button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") + text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, + text_sync_inference_audio_dir], [text_sync_ref_info]) + with gr.Tab("第四步:生成参考音频配置文本", open=False): + gr.Markdown(value=i18n("4.1:编辑模板")) + default_template_path = params.default_template_path + default_template_content = common.read_file(default_template_path) + text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) + text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) + gr.Markdown(value=i18n("4.2:生成配置")) + default_sync_ref_audio_dir2 = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, + interactive=True) + with gr.Row(): + button_create_config = gr.Button(i18n("生成配置"), variant="primary") + text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) + button_create_config.click(create_config, + [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], + [text_create_config_info]) + button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, + text_subsection_num, text_sample_num, checkbox_similarity_output], + [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, + text_sync_ref_audio_dir2]) + button_model_inference.click(model_inference, + [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, + text_text, text_ref_path, text_ref_text, text_emotion, + text_test_content], + [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) -app.launch( - server_port=9423, - quiet=True, -) + app.launch( + server_port=9423, + quiet=True, + ) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 7f6e3cc..b146e28 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,12 +1,20 @@ +import time import os import requests import itertools +import multiprocessing +from multiprocessing import Pool +from concurrent.futures import ProcessPoolExecutor +import numpy as np import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote -from Ref_Audio_Selector.config_param.log_config import logger +from Ref_Audio_Selector.config_param.log_config import logger, p_logger +# 假设手动指定端口范围为9400-9500 +available_ports = list(range(9400, 9500)) + class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url @@ -74,8 +82,24 @@ def safe_encode_query_params(original_url): return encoded_url -@timeit_decorator -def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): +def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_dir_path, num_processes=None): + if num_processes is None: + num_processes = multiprocessing.cpu_count() + + num_processes = min(num_processes, len(available_ports)) # 限制进程数不超过可用端口数 + + # 将emotion_list均匀分成num_processes个子集 + emotion_groups = np.array_split(emotion_list, num_processes) + + with ProcessPoolExecutor(max_workers=num_processes) as executor: + futures = [executor.submit(generate_audio_files_for_emotion_group, url_composer, text_list, group, output_dir_path) + for group in emotion_groups] + for future in futures: + future.result() # 等待所有进程完成 + + +def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list, output_dir_path): + start_time = time.perf_counter() # 使用 perf_counter 获取高精度计时起点 # Ensure the output directory exists output_dir = os.path.abspath(output_dir_path) os.makedirs(output_dir, exist_ok=True) @@ -108,7 +132,7 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) # 检查是否已经存在对应的音频文件,如果存在则跳过 if os.path.exists(text_subdir_text_file_path) and os.path.exists(emotion_subdir_emotion_file_path): has_generated_count += 1 - logger.info(f"进度: {has_generated_count}/{all_count}") + logger.info(f"进程ID: {os.getpid()}, 进度: {has_generated_count}/{all_count}") continue if url_composer.is_emotion(): @@ -126,6 +150,11 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) has_generated_count += 1 logger.info(f"进度: {has_generated_count}/{all_count}") + end_time = time.perf_counter() # 获取计时终点 + elapsed_time = end_time - start_time # 计算执行耗时 + # 记录日志内容 + log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒" + p_logger.info(log_message) def inference_audio_from_api(url):