api推理,添加多进程请求

This commit is contained in:
Downupanddownup 2024-04-27 01:27:57 +08:00
parent 2a23f95f61
commit c36d0a93fe
4 changed files with 209 additions and 172 deletions

View File

@ -1,4 +1,5 @@
import time
import os
from Ref_Audio_Selector.config_param.log_config import p_logger
import Ref_Audio_Selector.config_param.config_params as params
@ -26,7 +27,7 @@ def timeit_decorator(func):
elapsed_time = end_time - start_time # 计算执行耗时
# 记录日志内容
log_message = f"{func.__name__} 执行耗时: {elapsed_time:.6f}"
log_message = f"进程ID: {os.getpid()}, {func.__name__} 执行耗时: {elapsed_time:.6f}"
p_logger.info(log_message)
return func_result

View File

@ -13,6 +13,7 @@ def create_general_logger():
# 可以设置控制台输出的格式
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler.setFormatter(console_formatter)
console_handler.encoding = 'utf-8' # 设置字符编码为utf-8
# 创建一个用于常规日志的处理器
general_handler = logging.FileHandler(f"{params.log_dir}/{current_date}.log", mode='a', encoding='utf-8')

View File

@ -184,9 +184,10 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d
if len(ref_audio_manager.get_audio_list()) == 0:
raise Exception("待推理的参考音频不能为空")
time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files)(url_composer, text_list,
time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files_parallel)(url_composer,
text_list,
ref_audio_manager.get_ref_audio_list(),
inference_dir)
inference_dir, 3)
text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}"
@ -449,6 +450,7 @@ def save_role(text_role):
rw_param.write(rw_param.role, text_role)
if __name__ == '__main__':
default_work_space_dir = rw_param.read(rw_param.work_dir)
default_role = rw_param.read(rw_param.role)
default_base_dir = os.path.join(default_work_space_dir, default_role)
@ -477,7 +479,8 @@ with gr.Blocks() as app:
text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="")
text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10")
text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4")
checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True)
checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"),
show_label=True)
with gr.Row():
button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary")
text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False)
@ -488,7 +491,8 @@ with gr.Blocks() as app:
os.path.join(default_base_dir, params.reference_audio_dir))
text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"),
value=default_model_inference_voice_dir, interactive=True)
text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=rw_param.read(rw_param.generate_audio_url))
text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"),
value=rw_param.read(rw_param.generate_audio_url))
with gr.Row():
text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param))
text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"),
@ -524,7 +528,8 @@ with gr.Blocks() as app:
gr.Markdown(value=i18n("3.1启动asr获取推理音频文本"))
default_asr_audio_dir = common.check_path_existence_and_return(
os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir))
text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, interactive=True)
text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir,
interactive=True)
with gr.Row():
dropdown_asr_model = gr.Dropdown(
label=i18n("ASR 模型"),
@ -557,7 +562,8 @@ with gr.Blocks() as app:
[text_asr_info, text_text_similarity_analysis_path])
with gr.Row():
button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary")
text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False)
text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="",
interactive=False)
button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role,
text_text_similarity_analysis_path],
[text_text_similarity_analysis_info])

View File

@ -1,12 +1,20 @@
import time
import os
import requests
import itertools
import multiprocessing
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import Ref_Audio_Selector.config_param.config_params as params
from Ref_Audio_Selector.common.time_util import timeit_decorator
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote
from Ref_Audio_Selector.config_param.log_config import logger
from Ref_Audio_Selector.config_param.log_config import logger, p_logger
# 假设手动指定端口范围为9400-9500
available_ports = list(range(9400, 9500))
class URLComposer:
def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name):
self.base_url = base_url
@ -74,8 +82,24 @@ def safe_encode_query_params(original_url):
return encoded_url
@timeit_decorator
def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path):
def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_dir_path, num_processes=None):
if num_processes is None:
num_processes = multiprocessing.cpu_count()
num_processes = min(num_processes, len(available_ports)) # 限制进程数不超过可用端口数
# 将emotion_list均匀分成num_processes个子集
emotion_groups = np.array_split(emotion_list, num_processes)
with ProcessPoolExecutor(max_workers=num_processes) as executor:
futures = [executor.submit(generate_audio_files_for_emotion_group, url_composer, text_list, group, output_dir_path)
for group in emotion_groups]
for future in futures:
future.result() # 等待所有进程完成
def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list, output_dir_path):
start_time = time.perf_counter() # 使用 perf_counter 获取高精度计时起点
# Ensure the output directory exists
output_dir = os.path.abspath(output_dir_path)
os.makedirs(output_dir, exist_ok=True)
@ -108,7 +132,7 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path)
# 检查是否已经存在对应的音频文件,如果存在则跳过
if os.path.exists(text_subdir_text_file_path) and os.path.exists(emotion_subdir_emotion_file_path):
has_generated_count += 1
logger.info(f"度: {has_generated_count}/{all_count}")
logger.info(f"程ID: {os.getpid()}, 进度: {has_generated_count}/{all_count}")
continue
if url_composer.is_emotion():
@ -126,6 +150,11 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path)
has_generated_count += 1
logger.info(f"进度: {has_generated_count}/{all_count}")
end_time = time.perf_counter() # 获取计时终点
elapsed_time = end_time - start_time # 计算执行耗时
# 记录日志内容
log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f}"
p_logger.info(log_message)
def inference_audio_from_api(url):