diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 60dfdaa7..d216f6b4 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -25,6 +25,16 @@ is_half=eval(sys.argv[2]) webui_port_uvr5=int(sys.argv[3]) is_share=eval(sys.argv[4]) +def html_left(text, label='p'): + return f"""
+ <{label} style="margin: 0; padding: 0;">{text} +
""" + +def html_center(text, label='p'): + return f"""
+ <{label} style="margin: 0; padding: 0;">{text} +
""" + def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] try: @@ -116,11 +126,11 @@ with gr.Blocks(title="UVR5 WebUI") as app: value= i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) - with gr.Tabs(): - with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): - with gr.Group(): + with gr.Group(): + gr.Markdown(html_center(i18n("伴奏人声分离&去混响&去回声"),'h2')) + with gr.Group(): gr.Markdown( - value=i18n("人声伴奏分离批量处理, 使用UVR5模型。") + "
" + \ + value=html_left(i18n("人声伴奏分离批量处理, 使用UVR5模型。") + "
" + \ i18n("合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。")+ "
" + \ i18n("模型分为三类:") + "
" + \ i18n("1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;") + "
" + \ @@ -131,10 +141,11 @@ with gr.Blocks(title="UVR5 WebUI") as app: i18n("去混响/去延迟,附:") + "
" + \ i18n("1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;") + "
" + \ i18n("2、MDX-Net-Dereverb模型挺慢的;") + "
" + \ - i18n("3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。") + i18n("3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。"),'h4') ) with gr.Row(): with gr.Column(): + model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) dir_wav_input = gr.Textbox( label=i18n("输入待处理音频文件夹路径"), placeholder="C:\\Users\\Desktop\\todo-songs", @@ -143,7 +154,6 @@ with gr.Blocks(title="UVR5 WebUI") as app: file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") ) with gr.Column(): - model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) agg = gr.Slider( minimum=0, maximum=20, @@ -165,8 +175,11 @@ with gr.Blocks(title="UVR5 WebUI") as app: value="flac", interactive=True, ) - but2 = gr.Button(i18n("转换"), variant="primary") - vc_output4 = gr.Textbox(label=i18n("输出信息")) + with gr.Column(): + with gr.Row(): + but2 = gr.Button(i18n("转换"), variant="primary") + with gr.Row(): + vc_output4 = gr.Textbox(label=i18n("输出信息"),lines=3) but2.click( uvr, [ diff --git a/webui.py b/webui.py index 199dcc19..73612bfa 100644 --- a/webui.py +++ b/webui.py @@ -59,8 +59,10 @@ from scipy.io import wavfile from tools.my_utils import load_audio from multiprocessing import cpu_count # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu -import gradio.analytics as analytics -analytics.version_check = lambda:None +try: + import gradio.analytics as analytics + analytics.version_check = lambda:None +except:... import gradio as gr n_cpu=cpu_count()