From 9f89f679c1e6e08f848a7e3e131fc7c219711ed6 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Sat, 16 Aug 2025 18:34:35 +0800 Subject: [PATCH 1/2] Draft --- .github/build_windows_packages.ps1 | 20 +- .github/workflows/build_windows_packages.yaml | 10 +- .gitignore | 5 +- Docker/miniconda_install.sh | 22 +- GPT_SoVITS/AR/__init__.py | 0 GPT_SoVITS/AR/data/bucket_sampler.py | 6 +- GPT_SoVITS/AR/data/data_module.py | 4 +- GPT_SoVITS/AR/data/dataset.py | 4 +- GPT_SoVITS/AR/models/__init__.py | 0 GPT_SoVITS/AR/models/t2s_lightning_module.py | 12 +- .../AR/models/t2s_lightning_module_onnx.py | 97 +- GPT_SoVITS/AR/models/t2s_model.py | 16 +- GPT_SoVITS/AR/models/t2s_model_onnx.py | 4 +- GPT_SoVITS/AR/modules/__init__.py | 0 GPT_SoVITS/AR/modules/activation.py | 10 +- GPT_SoVITS/AR/modules/activation_onnx.py | 6 +- GPT_SoVITS/AR/modules/lr_schedulers.py | 9 +- GPT_SoVITS/AR/modules/optim.py | 1 + .../AR/modules/patched_mha_with_cache.py | 80 +- .../AR/modules/patched_mha_with_cache_onnx.py | 16 +- GPT_SoVITS/AR/modules/scaling.py | 4 +- GPT_SoVITS/AR/modules/transformer.py | 31 +- GPT_SoVITS/AR/modules/transformer_onnx.py | 15 +- GPT_SoVITS/AR/text_processing/__init__.py | 0 GPT_SoVITS/AR/text_processing/phonemizer.py | 72 - GPT_SoVITS/AR/text_processing/symbols.py | 12 - GPT_SoVITS/Accelerate/MLX/__init__.py | 12 + .../Accelerate/MLX/backends/mlx_quantized.py | 181 ++ .../Accelerate/MLX/backends/mlx_static.py | 99 + .../Accelerate/MLX/backends/mlx_varlen.py | 103 + GPT_SoVITS/Accelerate/MLX/sample_funcs_mlx.py | 65 + GPT_SoVITS/Accelerate/MLX/structs_mlx.py | 152 ++ GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py | 238 ++ GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py | 530 +++++ GPT_SoVITS/Accelerate/PyTorch/__init__.py | 30 + .../backends/flash_attn_varlen_cuda_graph.py | 158 ++ .../PyTorch/backends/mps_flash_attn_varlen.py | 166 ++ .../backends/sage_attn_varlen_cuda_graph.py | 175 ++ .../backends/torch_static_cuda_graph.py | 166 ++ .../PyTorch/backends/torch_varlen.py | 145 ++ GPT_SoVITS/Accelerate/PyTorch/nn.py | 69 + GPT_SoVITS/Accelerate/PyTorch/sample_funcs.py | 67 + GPT_SoVITS/Accelerate/PyTorch/structs.py | 151 ++ GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py | 223 ++ .../Accelerate/PyTorch/t2s_model_abc.py | 671 ++++++ GPT_SoVITS/Accelerate/__init__.py | 30 + GPT_SoVITS/Accelerate/logger.py | 203 ++ GPT_SoVITS/BigVGAN/README.md | 266 --- GPT_SoVITS/BigVGAN/activations.py | 2 +- GPT_SoVITS/BigVGAN/bigvgan.py | 12 +- .../BigVGAN/configs/bigvgan_22khz_80band.json | 45 - .../configs/bigvgan_24khz_100band.json | 45 - .../configs/bigvgan_base_22khz_80band.json | 45 - .../configs/bigvgan_base_24khz_100band.json | 45 - .../configs/bigvgan_v2_22khz_80band_256x.json | 61 - .../bigvgan_v2_22khz_80band_fmax8k_256x.json | 61 - .../bigvgan_v2_44khz_128band_256x.json | 61 - .../bigvgan_v2_44khz_128band_512x.json | 61 - GPT_SoVITS/BigVGAN/discriminators.py | 625 ------ GPT_SoVITS/BigVGAN/inference.py | 85 - GPT_SoVITS/BigVGAN/inference_e2e.py | 100 - GPT_SoVITS/BigVGAN/loss.py | 238 -- GPT_SoVITS/BigVGAN/meldataset.py | 370 ---- GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep | 1 - GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md | 4 - .../BigVGAN/nv-modelcard++/explainability.md | 13 - GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md | 126 -- GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md | 14 - GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md | 6 - GPT_SoVITS/BigVGAN/requirements.txt | 13 - GPT_SoVITS/BigVGAN/tests/test_activation.py | 62 - .../tests/test_activation_snake_beta.py | 62 - .../BigVGAN/tests/test_cuda_vs_torch_model.py | 215 -- GPT_SoVITS/BigVGAN/train.py | 716 ------ GPT_SoVITS/BigVGAN/utils0.py | 43 +- GPT_SoVITS/TTS_infer_pack/TTS.py | 134 +- GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py | 52 +- GPT_SoVITS/TTS_infer_pack/__init__.py | 2 + .../text_segmentation_method.py | 2 +- GPT_SoVITS/download.py | 13 - GPT_SoVITS/eres2net/ERes2Net.py | 264 --- GPT_SoVITS/eres2net/ERes2NetV2.py | 30 +- GPT_SoVITS/eres2net/ERes2Net_huge.py | 289 --- GPT_SoVITS/export_torch_script.py | 36 +- GPT_SoVITS/export_torch_script_v3v4.py | 125 +- GPT_SoVITS/f5_tts/model/__init__.py | 14 +- GPT_SoVITS/f5_tts/model/backbones/dit.py | 13 +- GPT_SoVITS/f5_tts/model/backbones/mmdit.py | 10 +- GPT_SoVITS/f5_tts/model/backbones/unett.py | 15 +- GPT_SoVITS/f5_tts/model/modules.py | 1 - GPT_SoVITS/feature_extractor/cnhubert.py | 82 +- GPT_SoVITS/feature_extractor/whisper_enc.py | 11 +- GPT_SoVITS/inference_cli.py | 86 - GPT_SoVITS/inference_gui.py | 316 --- GPT_SoVITS/inference_webui.py | 1364 ++++++------ GPT_SoVITS/inference_webui_fast.py | 324 +-- GPT_SoVITS/module/__init__.py | 0 GPT_SoVITS/module/attentions.py | 17 +- GPT_SoVITS/module/attentions_onnx.py | 6 +- GPT_SoVITS/module/data_utils.py | 8 +- GPT_SoVITS/module/losses.py | 12 +- GPT_SoVITS/module/mel_processing.py | 9 +- GPT_SoVITS/module/models.py | 86 +- GPT_SoVITS/module/models_onnx.py | 30 +- GPT_SoVITS/module/modules.py | 25 +- GPT_SoVITS/module/mrte_model.py | 8 +- GPT_SoVITS/module/quantize.py | 4 +- GPT_SoVITS/onnx_export.py | 24 +- GPT_SoVITS/prepare_datasets/1-get-text.py | 403 +++- .../2-get-hubert-sv-wav32k.py | 423 ++++ .../prepare_datasets/2-get-hubert-wav32k.py | 134 -- GPT_SoVITS/prepare_datasets/2-get-sv.py | 115 - GPT_SoVITS/prepare_datasets/3-get-semantic.py | 395 +++- GPT_SoVITS/process_ckpt.py | 160 +- GPT_SoVITS/s1_train.py | 91 +- GPT_SoVITS/s2_train.py | 791 +++---- GPT_SoVITS/s2_train_v3.py | 564 +++-- GPT_SoVITS/s2_train_v3_lora.py | 454 ++-- GPT_SoVITS/sv.py | 27 +- GPT_SoVITS/text/LangSegmenter/__init__.py | 2 + .../text/LangSegmenter/langsegmenter.py | 162 +- GPT_SoVITS/text/__init__.py | 12 +- GPT_SoVITS/text/cantonese.py | 16 +- GPT_SoVITS/text/chinese.py | 24 +- GPT_SoVITS/text/chinese2.py | 34 +- GPT_SoVITS/text/cleaner.py | 25 +- GPT_SoVITS/text/en_normalization/expend.py | 7 +- GPT_SoVITS/text/english.py | 17 +- GPT_SoVITS/text/g2pw/__init__.py | 2 +- GPT_SoVITS/text/g2pw/onnx_api.py | 12 +- GPT_SoVITS/text/japanese.py | 8 +- GPT_SoVITS/text/korean.py | 19 +- GPT_SoVITS/text/zh_normalization/__init__.py | 2 +- GPT_SoVITS/text/zh_normalization/num.py | 3 +- GPT_SoVITS/utils.py | 71 +- README.md | 54 +- api.py | 217 +- api_v2.py | 25 +- config.py | 127 +- docs/cn/README.md | 53 +- docs/ja/README.md | 54 +- docs/ko/README.md | 40 +- docs/tr/README.md | 51 +- go-webui.bat | 3 +- go-webui.ps1 | 3 +- gpt-sovits_kaggle.ipynb | 243 --- install.ps1 | 31 +- install.sh | 57 +- requirements.txt | 72 +- .../24kto48k/readme.txt | 0 tools/{AP_BWE_main => AP_BWE}/LICENSE | 0 tools/{AP_BWE_main => AP_BWE}/README.md | 0 .../datasets1/__init__.py | 0 tools/AP_BWE/datasets1/dataset.py | 31 + .../models/__init__.py | 0 tools/{AP_BWE_main => AP_BWE}/models/model.py | 13 +- tools/AP_BWE_main/datasets1/dataset.py | 108 - tools/__init__.py | 0 tools/assets.py | 6 +- tools/audio_sr.py | 37 +- tools/i18n/i18n.py | 4 +- tools/i18n/locale/en_US.json | 19 +- tools/i18n/locale/es_ES.json | 14 +- tools/i18n/locale/fr_FR.json | 14 +- tools/i18n/locale/it_IT.json | 14 +- tools/i18n/locale/ja_JP.json | 14 +- tools/i18n/locale/ko_KR.json | 14 +- tools/i18n/locale/pt_BR.json | 14 +- tools/i18n/locale/ru_RU.json | 14 +- tools/i18n/locale/tr_TR.json | 14 +- tools/i18n/locale/zh_CN.json | 17 +- tools/i18n/locale/zh_HK.json | 14 +- tools/i18n/locale/zh_SG.json | 14 +- tools/i18n/locale/zh_TW.json | 14 +- tools/my_utils.py | 135 +- tools/subfix_webui.py | 41 +- tools/uvr5/bs_roformer/__init__.py | 0 tools/uvr5/bsroformer.py | 5 +- tools/uvr5/lib/lib_v5/dataset.py | 167 -- tools/uvr5/lib/lib_v5/layers.py | 106 - tools/uvr5/lib/lib_v5/layers_123812KB.py | 106 - tools/uvr5/lib/lib_v5/layers_33966KB.py | 110 - tools/uvr5/lib/lib_v5/layers_537227KB.py | 110 - tools/uvr5/lib/lib_v5/layers_537238KB.py | 110 - tools/uvr5/lib/lib_v5/layers_new.py | 111 - .../modelparams/1band_sr16000_hl512.json | 19 - .../modelparams/1band_sr32000_hl512.json | 19 - .../modelparams/1band_sr33075_hl384.json | 19 - .../modelparams/1band_sr44100_hl1024.json | 19 - .../modelparams/1band_sr44100_hl256.json | 19 - .../modelparams/1band_sr44100_hl512.json | 19 - .../modelparams/1band_sr44100_hl512_cut.json | 19 - .../lib/lib_v5/modelparams/2band_32000.json | 30 - .../lib_v5/modelparams/2band_44100_lofi.json | 30 - .../lib/lib_v5/modelparams/2band_48000.json | 30 - .../lib/lib_v5/modelparams/3band_44100.json | 42 - .../lib_v5/modelparams/3band_44100_mid.json | 43 - .../lib_v5/modelparams/3band_44100_msb2.json | 43 - .../lib/lib_v5/modelparams/4band_44100.json | 54 - .../lib_v5/modelparams/4band_44100_mid.json | 55 - .../lib_v5/modelparams/4band_44100_msb.json | 55 - .../lib_v5/modelparams/4band_44100_msb2.json | 55 - .../modelparams/4band_44100_reverse.json | 55 - .../lib_v5/modelparams/4band_44100_sw.json | 55 - .../lib/lib_v5/modelparams/4band_v2_sn.json | 55 - .../uvr5/lib/lib_v5/modelparams/ensemble.json | 43 - tools/uvr5/lib/lib_v5/nets.py | 121 - tools/uvr5/lib/lib_v5/nets_123812KB.py | 122 -- tools/uvr5/lib/lib_v5/nets_123821KB.py | 122 -- tools/uvr5/lib/lib_v5/nets_33966KB.py | 122 -- tools/uvr5/lib/lib_v5/nets_537227KB.py | 122 -- tools/uvr5/lib/lib_v5/nets_537238KB.py | 122 -- tools/uvr5/lib/lib_v5/nets_new.py | 125 -- tools/uvr5/lib/name_params.json | 263 --- tools/uvr5/mdxnet.py | 6 +- tools/uvr5/vr.py | 26 +- tools/uvr5/webui.py | 24 +- webui.py | 1940 +++++++++-------- 218 files changed, 8880 insertions(+), 12305 deletions(-) delete mode 100644 GPT_SoVITS/AR/__init__.py delete mode 100644 GPT_SoVITS/AR/models/__init__.py delete mode 100644 GPT_SoVITS/AR/modules/__init__.py delete mode 100644 GPT_SoVITS/AR/text_processing/__init__.py delete mode 100644 GPT_SoVITS/AR/text_processing/phonemizer.py delete mode 100644 GPT_SoVITS/AR/text_processing/symbols.py create mode 100644 GPT_SoVITS/Accelerate/MLX/__init__.py create mode 100644 GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py create mode 100644 GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py create mode 100644 GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py create mode 100644 GPT_SoVITS/Accelerate/MLX/sample_funcs_mlx.py create mode 100644 GPT_SoVITS/Accelerate/MLX/structs_mlx.py create mode 100644 GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py create mode 100644 GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/__init__.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/nn.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/sample_funcs.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/structs.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py create mode 100644 GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py create mode 100644 GPT_SoVITS/Accelerate/__init__.py create mode 100644 GPT_SoVITS/Accelerate/logger.py delete mode 100644 GPT_SoVITS/BigVGAN/README.md delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json delete mode 100644 GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json delete mode 100644 GPT_SoVITS/BigVGAN/discriminators.py delete mode 100644 GPT_SoVITS/BigVGAN/inference.py delete mode 100644 GPT_SoVITS/BigVGAN/inference_e2e.py delete mode 100644 GPT_SoVITS/BigVGAN/loss.py delete mode 100644 GPT_SoVITS/BigVGAN/meldataset.py delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md delete mode 100644 GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md delete mode 100644 GPT_SoVITS/BigVGAN/requirements.txt delete mode 100644 GPT_SoVITS/BigVGAN/tests/test_activation.py delete mode 100644 GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py delete mode 100644 GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py delete mode 100644 GPT_SoVITS/BigVGAN/train.py delete mode 100644 GPT_SoVITS/download.py delete mode 100644 GPT_SoVITS/eres2net/ERes2Net.py delete mode 100644 GPT_SoVITS/eres2net/ERes2Net_huge.py delete mode 100644 GPT_SoVITS/inference_cli.py delete mode 100644 GPT_SoVITS/inference_gui.py delete mode 100644 GPT_SoVITS/module/__init__.py create mode 100644 GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py delete mode 100644 GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py delete mode 100644 GPT_SoVITS/prepare_datasets/2-get-sv.py delete mode 100644 gpt-sovits_kaggle.ipynb rename tools/{AP_BWE_main => AP_BWE}/24kto48k/readme.txt (100%) rename tools/{AP_BWE_main => AP_BWE}/LICENSE (100%) rename tools/{AP_BWE_main => AP_BWE}/README.md (100%) rename tools/{AP_BWE_main => AP_BWE}/datasets1/__init__.py (100%) create mode 100644 tools/AP_BWE/datasets1/dataset.py rename tools/{AP_BWE_main => AP_BWE}/models/__init__.py (100%) rename tools/{AP_BWE_main => AP_BWE}/models/model.py (98%) delete mode 100644 tools/AP_BWE_main/datasets1/dataset.py delete mode 100644 tools/__init__.py delete mode 100644 tools/uvr5/bs_roformer/__init__.py delete mode 100644 tools/uvr5/lib/lib_v5/dataset.py delete mode 100644 tools/uvr5/lib/lib_v5/layers.py delete mode 100644 tools/uvr5/lib/lib_v5/layers_123812KB.py delete mode 100644 tools/uvr5/lib/lib_v5/layers_33966KB.py delete mode 100644 tools/uvr5/lib/lib_v5/layers_537227KB.py delete mode 100644 tools/uvr5/lib/lib_v5/layers_537238KB.py delete mode 100644 tools/uvr5/lib/lib_v5/layers_new.py delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512_cut.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/2band_32000.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/2band_44100_lofi.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/2band_48000.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/3band_44100.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/3band_44100_mid.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/3band_44100_msb2.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100_mid.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb2.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100_reverse.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_44100_sw.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/4band_v2_sn.json delete mode 100644 tools/uvr5/lib/lib_v5/modelparams/ensemble.json delete mode 100644 tools/uvr5/lib/lib_v5/nets.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_123812KB.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_123821KB.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_33966KB.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_537227KB.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_537238KB.py delete mode 100644 tools/uvr5/lib/lib_v5/nets_new.py delete mode 100644 tools/uvr5/lib/name_params.json diff --git a/.github/build_windows_packages.ps1 b/.github/build_windows_packages.ps1 index 2e4acb2a..8ab21496 100644 --- a/.github/build_windows_packages.ps1 +++ b/.github/build_windows_packages.ps1 @@ -31,8 +31,8 @@ $UVR5_URL = "$baseHF/uvr5_weights.zip" $NLTK_URL = "$baseHF/nltk_data.zip" $JTALK_URL = "$baseHF/open_jtalk_dic_utf_8-1.11.tar.gz" -$PYTHON_VERSION = "3.11.12" -$PY_RELEASE_VERSION = "20250409" +$PYTHON_VERSION = "3.10.18" +$PY_RELEASE_VERSION = "20250902" Write-Host "[INFO] Cleaning .git..." Remove-Item "$srcDir\.git" -Recurse -Force -ErrorAction SilentlyContinue @@ -115,12 +115,17 @@ Remove-Item $ffDir.FullName -Recurse -Force Write-Host "[INFO] Installing PyTorch..." & ".\runtime\python.exe" -m ensurepip & ".\runtime\python.exe" -m pip install --upgrade pip --no-warn-script-location + switch ($cuda) { - "cu124" { - & ".\runtime\python.exe" -m pip install torch==2.6 torchaudio --index-url https://download.pytorch.org/whl/cu124 --no-warn-script-location + "cu126" { + & ".\runtime\python.exe" -m pip install psutil ninja packaging wheel "setuptools>=42" --no-warn-script-location + & ".\runtime\python.exe" -m pip install torch --index-url https://download.pytorch.org/whl/cu126 --no-warn-script-location + & ".\runtime\python.exe" -m pip install flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation } "cu128" { - & ".\runtime\python.exe" -m pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu128 --no-warn-script-location + & ".\runtime\python.exe" -m pip install psutil ninja packaging wheel "setuptools>=42" --no-warn-script-location + & ".\runtime\python.exe" -m pip install torch --index-url https://download.pytorch.org/whl/cu128 --no-warn-script-location + & ".\runtime\python.exe" -m pip install flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation } default { Write-Error "Unsupported CUDA version: $cuda" @@ -129,6 +134,7 @@ switch ($cuda) { } Write-Host "[INFO] Installing dependencies..." +& ".\runtime\python.exe" -m pip install --pre torchcodec --index-url https://download.pytorch.org/whl/nightly/cpu & ".\runtime\python.exe" -m pip install -r extra-req.txt --no-deps --no-warn-script-location & ".\runtime\python.exe" -m pip install -r requirements.txt --no-warn-script-location @@ -162,7 +168,7 @@ Copy-Item -Path $curr -Destination $pkgName -Recurse $7zPath = "$pkgName.7z" $start = Get-Date Write-Host "Compress Starting at $start" -& "C:\Program Files\7-Zip\7z.exe" a -t7z "$7zPath" "$pkgName" -m0=lzma2 -mx=9 -md=1g -ms=1g -mmc=500 -mfb=273 -mlc=0 -mlp=4 -mpb=4 -mc=8g -mmt=on -bsp1 +& "C:\Program Files\7-Zip\7z.exe" a -t7z "$7zPath" "$pkgName" -m0=lzma2 -mx=9 -mmt=on -bsp1 $end = Get-Date Write-Host "Elapsed time: $($end - $start)" Get-ChildItem . @@ -189,6 +195,6 @@ if (-not $hfUser -or -not $hfToken) { exit 1 } $env:HF_HUB_ENABLE_HF_TRANSFER = "1" -huggingface-cli upload "$hfUser/GPT-SoVITS-Packages" "$7zPath" "$7zPath" --repo-type model --token $hfToken +hf upload "$hfUser/GPT-SoVITS-Packages" "$7zPath" "$7zPath" --repo-type model --token $hfToken Write-Host "[SUCCESS] Uploaded: $7zPath to HuggingFace" diff --git a/.github/workflows/build_windows_packages.yaml b/.github/workflows/build_windows_packages.yaml index 32861463..ab3e99a1 100644 --- a/.github/workflows/build_windows_packages.yaml +++ b/.github/workflows/build_windows_packages.yaml @@ -17,7 +17,7 @@ jobs: runs-on: windows-latest strategy: matrix: - torch_cuda: [cu124, cu128] + torch_cuda: [cu126, cu128] env: TORCH_CUDA: ${{ matrix.torch_cuda }} MODELSCOPE_USERNAME: ${{ secrets.MODELSCOPE_USERNAME }} @@ -31,6 +31,14 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Install Windows CUDA 12.9 + uses: Jimver/cuda-toolkit@v0.2.24 + id: cuda-toolkit-win-129 + with: + cuda: 12.9.0 + method: "network" + sub-packages: '["nvcc", "cudart", "visual_studio_integration"]' + - name: Run Build and Upload Script shell: pwsh run: | diff --git a/.gitignore b/.gitignore index d280e459..37fcdd5a 100644 --- a/.gitignore +++ b/.gitignore @@ -16,8 +16,9 @@ ffprobe* cfg.json speakers.json ref_audios -tools/AP_BWE_main/24kto48k/* -!tools/AP_BWE_main/24kto48k/readme.txt +tools/AP_BWE/24kto48k/* +!tools/AP_BWE/24kto48k/readme.txt +onnx # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/Docker/miniconda_install.sh b/Docker/miniconda_install.sh index 001a2a46..cf2e3d6f 100644 --- a/Docker/miniconda_install.sh +++ b/Docker/miniconda_install.sh @@ -23,8 +23,10 @@ fi if [ "$TARGETPLATFORM" = "linux/amd64" ]; then "${WGET_CMD[@]}" -O miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-py311_25.3.1-1-Linux-x86_64.sh + SYSROOT_PKG="sysroot_linux-64>=2.28" elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then "${WGET_CMD[@]}" -O miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-py311_25.3.1-1-Linux-aarch64.sh + SYSROOT_PKG="sysroot_linux-aarch64>=2.28" else exit 1 fi @@ -45,20 +47,36 @@ rm miniconda.sh source "$HOME/miniconda3/etc/profile.d/conda.sh" +"$HOME/miniconda3/bin/conda" init bash + +source "$HOME/.bashrc" + "$HOME/miniconda3/bin/conda" config --add channels conda-forge "$HOME/miniconda3/bin/conda" update -q --all -y 1>/dev/null "$HOME/miniconda3/bin/conda" install python=3.11 -q -y -"$HOME/miniconda3/bin/conda" install gcc=14 gxx ffmpeg cmake make unzip -q -y +"$HOME/miniconda3/bin/conda" install gcc=11 gxx ffmpeg cmake make unzip $SYSROOT_PKG "libstdcxx-ng>=11" -q -y if [ "$CUDA_VERSION" = "12.8" ]; then "$HOME/miniconda3/bin/pip" install torch torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/cu128 + "$HOME/miniconda3/bin/conda" install cuda-nvcc=12.8 -c nvidia elif [ "$CUDA_VERSION" = "12.6" ]; then - "$HOME/miniconda3/bin/pip" install torch==2.6 torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/cu126 + "$HOME/miniconda3/bin/pip" install torch torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/cu126 + "$HOME/miniconda3/bin/conda" install cuda-nvcc=12.6 -c nvidia fi +CUDA_PATH=$(echo "$HOME/miniconda3/targets/"*-linux | awk '{print $1}') + +export CUDA_HOME=$CUDA_PATH +export PATH="$HOME/miniconda3/bin:$PATH" +export PATH="$CUDA_HOME/bin:$PATH" +export PATH="$CUDA_HOME/nvvm/bin:$PATH" + +"$HOME/miniconda3/bin/pip" install psutil ninja packaging wheel "setuptools>=42" +"$HOME/miniconda3/bin/pip" install flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation + "$HOME/miniconda3/bin/pip" cache purge rm $LOG_PATH diff --git a/GPT_SoVITS/AR/__init__.py b/GPT_SoVITS/AR/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/GPT_SoVITS/AR/data/bucket_sampler.py b/GPT_SoVITS/AR/data/bucket_sampler.py index d8457334..4d2ed0b8 100644 --- a/GPT_SoVITS/AR/data/bucket_sampler.py +++ b/GPT_SoVITS/AR/data/bucket_sampler.py @@ -39,12 +39,12 @@ class DistributedBucketSampler(Sampler[T_co]): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1 + num_replicas = dist.get_world_size() if torch.cuda.device_count() > 1 else 1 if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() if torch.cuda.is_available() else 0 - if torch.cuda.is_available(): + rank = dist.get_rank() if torch.cuda.device_count() > 1 else 0 + if torch.cuda.device_count() > 1: torch.cuda.set_device(rank) if rank >= num_replicas or rank < 0: raise ValueError("Invalid rank {}, rank should be in the interval [0, {}]".format(rank, num_replicas - 1)) diff --git a/GPT_SoVITS/AR/data/data_module.py b/GPT_SoVITS/AR/data/data_module.py index f360503b..f0d62674 100644 --- a/GPT_SoVITS/AR/data/data_module.py +++ b/GPT_SoVITS/AR/data/data_module.py @@ -3,8 +3,8 @@ from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader -from AR.data.bucket_sampler import DistributedBucketSampler -from AR.data.dataset import Text2SemanticDataset +from GPT_SoVITS.AR.data.bucket_sampler import DistributedBucketSampler +from GPT_SoVITS.AR.data.dataset import Text2SemanticDataset class Text2SemanticDataModule(LightningDataModule): diff --git a/GPT_SoVITS/AR/data/dataset.py b/GPT_SoVITS/AR/data/dataset.py index 402483d9..e3164865 100644 --- a/GPT_SoVITS/AR/data/dataset.py +++ b/GPT_SoVITS/AR/data/dataset.py @@ -13,7 +13,7 @@ from torch.utils.data import DataLoader, Dataset version = os.environ.get("version", None) -from text import cleaned_text_to_sequence +from GPT_SoVITS.text import cleaned_text_to_sequence # from config import exp_dir @@ -220,7 +220,7 @@ class Text2SemanticDataset(Dataset): flag = 0 path_bert = "%s/%s.pt" % (self.path3, item_name) - if os.path.exists(path_bert) == True: + if os.path.exists(path_bert) is True: bert_feature = torch.load(path_bert, map_location="cpu") else: flag = 1 diff --git a/GPT_SoVITS/AR/models/__init__.py b/GPT_SoVITS/AR/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/GPT_SoVITS/AR/models/t2s_lightning_module.py b/GPT_SoVITS/AR/models/t2s_lightning_module.py index fd357b94..3daa8dbc 100644 --- a/GPT_SoVITS/AR/models/t2s_lightning_module.py +++ b/GPT_SoVITS/AR/models/t2s_lightning_module.py @@ -1,18 +1,14 @@ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py # reference: https://github.com/lifeiteng/vall-e -import os -import sys -now_dir = os.getcwd() -sys.path.append(now_dir) from typing import Dict import torch from pytorch_lightning import LightningModule -from AR.models.t2s_model import Text2SemanticDecoder -from AR.modules.lr_schedulers import WarmupCosineLRSchedule -from AR.modules.optim import ScaledAdam +from ..modules.lr_schedulers import WarmupCosineLRSchedule +from ..modules.optim import ScaledAdam +from .t2s_model import Text2SemanticDecoder class Text2SemanticLightningModule(LightningModule): @@ -42,7 +38,7 @@ class Text2SemanticLightningModule(LightningModule): def training_step(self, batch: Dict, batch_idx: int): opt = self.optimizers() scheduler = self.lr_schedulers() - forward = self.model.forward if self.config["train"].get("if_dpo", False) == True else self.model.forward_old + forward = self.model.forward if self.config["train"].get("if_dpo", False) is True else self.model.forward_old loss, acc = forward( batch["phoneme_ids"], batch["phoneme_ids_len"], diff --git a/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py index b0ab59c4..b87c4ad7 100644 --- a/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py +++ b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py @@ -1,18 +1,10 @@ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py # reference: https://github.com/lifeiteng/vall-e -import os -import sys -now_dir = os.getcwd() -sys.path.append(now_dir) -from typing import Dict -import torch from pytorch_lightning import LightningModule -from AR.models.t2s_model_onnx import Text2SemanticDecoder -from AR.modules.lr_schedulers import WarmupCosineLRSchedule -from AR.modules.optim import ScaledAdam +from .t2s_model_onnx import Text2SemanticDecoder class Text2SemanticLightningModule(LightningModule): @@ -21,90 +13,3 @@ class Text2SemanticLightningModule(LightningModule): self.config = config self.top_k = 3 self.model = Text2SemanticDecoder(config=config, top_k=self.top_k) - pretrained_s1 = config.get("pretrained_s1") - if pretrained_s1 and is_train: - # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"])) - print( - self.load_state_dict( - torch.load( - pretrained_s1, - map_location="cpu", - )["weight"], - ), - ) - if is_train: - self.automatic_optimization = False - self.save_hyperparameters() - self.eval_dir = output_dir / "eval" - self.eval_dir.mkdir(parents=True, exist_ok=True) - - def training_step(self, batch: Dict, batch_idx: int): - opt = self.optimizers() - scheduler = self.lr_schedulers() - loss, acc = self.model.forward( - batch["phoneme_ids"], - batch["phoneme_ids_len"], - batch["semantic_ids"], - batch["semantic_ids_len"], - batch["bert_feature"], - ) - self.manual_backward(loss) - if batch_idx > 0 and batch_idx % 4 == 0: - opt.step() - opt.zero_grad() - scheduler.step() - - self.log( - "total_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - sync_dist=True, - ) - self.log( - "lr", - scheduler.get_last_lr()[0], - on_epoch=True, - prog_bar=True, - sync_dist=True, - ) - self.log( - f"top_{self.top_k}_acc", - acc, - on_step=True, - on_epoch=True, - prog_bar=True, - sync_dist=True, - ) - - def validation_step(self, batch: Dict, batch_idx: int): - return - - def configure_optimizers(self): - model_parameters = self.model.parameters() - parameters_names = [] - parameters_names.append([name_param_pair[0] for name_param_pair in self.model.named_parameters()]) - lm_opt = ScaledAdam( - model_parameters, - lr=0.01, - betas=(0.9, 0.95), - clipping_scale=2.0, - parameters_names=parameters_names, - show_dominant_parameters=False, - clipping_update_period=1000, - ) - - return { - "optimizer": lm_opt, - "lr_scheduler": { - "scheduler": WarmupCosineLRSchedule( - lm_opt, - init_lr=self.config["optimizer"]["lr_init"], - peak_lr=self.config["optimizer"]["lr"], - end_lr=self.config["optimizer"]["lr_end"], - warmup_steps=self.config["optimizer"]["warmup_steps"], - total_steps=self.config["optimizer"]["decay_steps"], - ) - }, - } diff --git a/GPT_SoVITS/AR/models/t2s_model.py b/GPT_SoVITS/AR/models/t2s_model.py index 7196d6ab..8809460a 100644 --- a/GPT_SoVITS/AR/models/t2s_model.py +++ b/GPT_SoVITS/AR/models/t2s_model.py @@ -9,7 +9,7 @@ from torch.nn import functional as F from torchmetrics.classification import MulticlassAccuracy from tqdm import tqdm -from AR.models.utils import ( +from GPT_SoVITS.AR.models.utils import ( dpo_loss, get_batch_logps, make_pad_mask, @@ -18,8 +18,8 @@ from AR.models.utils import ( sample, topk_sampling, ) -from AR.modules.embedding import SinePositionalEmbedding, TokenEmbedding -from AR.modules.transformer import LayerNorm, TransformerEncoder, TransformerEncoderLayer +from GPT_SoVITS.AR.modules.embedding import SinePositionalEmbedding, TokenEmbedding +from GPT_SoVITS.AR.modules.transformer import LayerNorm, TransformerEncoder, TransformerEncoderLayer default_config = { "embedding_dim": 512, @@ -420,7 +420,7 @@ class Text2SemanticDecoder(nn.Module): mask=xy_attn_mask, ) x_len = x_lens.max() - logits = self.ar_predict_layer(xy_dec[:, x_len-1:]) + logits = self.ar_predict_layer(xy_dec[:, x_len - 1 :]) ###### DPO ############# reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data( @@ -432,7 +432,7 @@ class Text2SemanticDecoder(nn.Module): mask=reject_xy_attn_mask, ) x_len = x_lens.max() - reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len-1:]) + reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len - 1 :]) # loss # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum @@ -502,7 +502,7 @@ class Text2SemanticDecoder(nn.Module): (xy_pos, None), mask=xy_attn_mask, ) - logits = self.ar_predict_layer(xy_dec[:, x_len-1:]).permute(0, 2, 1) + logits = self.ar_predict_layer(xy_dec[:, x_len - 1 :]).permute(0, 2, 1) # loss # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum loss = F.cross_entropy(logits, targets, reduction="sum") @@ -724,8 +724,8 @@ class Text2SemanticDecoder(nn.Module): l1 = samples[:, 0] == self.EOS l2 = tokens == self.EOS l = l1.logical_or(l2) - removed_idx_of_batch_for_y = torch.where(l == True)[0].tolist() - reserved_idx_of_batch_for_y = torch.where(l == False)[0] + removed_idx_of_batch_for_y = torch.where(l is True)[0].tolist() + reserved_idx_of_batch_for_y = torch.where(l is False)[0] # batch_indexs = torch.tensor(batch_idx_map, device=y.device)[removed_idx_of_batch_for_y] for i in removed_idx_of_batch_for_y: batch_index = batch_idx_map[i] diff --git a/GPT_SoVITS/AR/models/t2s_model_onnx.py b/GPT_SoVITS/AR/models/t2s_model_onnx.py index 4f7b50a3..4548563c 100644 --- a/GPT_SoVITS/AR/models/t2s_model_onnx.py +++ b/GPT_SoVITS/AR/models/t2s_model_onnx.py @@ -5,8 +5,8 @@ from torch import nn from torch.nn import functional as F from torchmetrics.classification import MulticlassAccuracy -from AR.modules.embedding_onnx import SinePositionalEmbedding, TokenEmbedding -from AR.modules.transformer_onnx import LayerNorm, TransformerEncoder, TransformerEncoderLayer +from GPT_SoVITS.AR.modules.embedding_onnx import SinePositionalEmbedding, TokenEmbedding +from GPT_SoVITS.AR.modules.transformer_onnx import LayerNorm, TransformerEncoder, TransformerEncoderLayer default_config = { "embedding_dim": 512, diff --git a/GPT_SoVITS/AR/modules/__init__.py b/GPT_SoVITS/AR/modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/GPT_SoVITS/AR/modules/activation.py b/GPT_SoVITS/AR/modules/activation.py index 936f9c3f..bb254b03 100644 --- a/GPT_SoVITS/AR/modules/activation.py +++ b/GPT_SoVITS/AR/modules/activation.py @@ -9,7 +9,7 @@ from torch.nn.init import constant_, xavier_normal_, xavier_uniform_ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear from torch.nn.parameter import Parameter -from AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched +from .patched_mha_with_cache import multi_head_attention_forward_patched F.multi_head_attention_forward = multi_head_attention_forward_patched @@ -86,8 +86,8 @@ class MultiheadAttention(Module): kdim=None, vdim=None, batch_first=False, - linear1_cls=Linear, - linear2_cls=Linear, + linear1_cls: type[Module] = Linear, + linear2_cls: type[Module] = Linear, device=None, dtype=None, ) -> None: @@ -383,7 +383,7 @@ class MultiheadAttention(Module): k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight, average_attn_weights=average_attn_weights, - cache=cache, + cache=cache, # type: ignore ) else: attn_output, attn_output_weights = F.multi_head_attention_forward( @@ -405,7 +405,7 @@ class MultiheadAttention(Module): need_weights=need_weights, attn_mask=attn_mask, average_attn_weights=average_attn_weights, - cache=cache, + cache=cache, # type: ignore ) if self.batch_first and is_batched: return attn_output.transpose(1, 0), attn_output_weights diff --git a/GPT_SoVITS/AR/modules/activation_onnx.py b/GPT_SoVITS/AR/modules/activation_onnx.py index c14ce40c..6f2f8dab 100644 --- a/GPT_SoVITS/AR/modules/activation_onnx.py +++ b/GPT_SoVITS/AR/modules/activation_onnx.py @@ -1,5 +1,5 @@ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py -from typing import Optional, Tuple +from typing import Optional import torch from torch import Tensor @@ -8,7 +8,7 @@ from torch.nn.init import constant_, xavier_normal_, xavier_uniform_ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear from torch.nn.parameter import Parameter -from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched +from .patched_mha_with_cache_onnx import multi_head_attention_forward_patched class MultiheadAttention(Module): @@ -161,7 +161,7 @@ class MultiheadAttention(Module): attn_mask: Optional[Tensor] = None, average_attn_weights: bool = True, cache=None, - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> Tensor: any_nested = query.is_nested or key.is_nested or value.is_nested query = key = value = query.transpose(1, 0) attn_output = multi_head_attention_forward_patched( diff --git a/GPT_SoVITS/AR/modules/lr_schedulers.py b/GPT_SoVITS/AR/modules/lr_schedulers.py index 707a911f..226823b4 100644 --- a/GPT_SoVITS/AR/modules/lr_schedulers.py +++ b/GPT_SoVITS/AR/modules/lr_schedulers.py @@ -1,6 +1,7 @@ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py # reference: https://github.com/lifeiteng/vall-e import math +from typing import Optional import torch from matplotlib import pyplot as plt @@ -38,10 +39,9 @@ class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler): def set_lr(self, lr): self._last_lr = [g["lr"] for g in self.optimizer.param_groups] for g in self.optimizer.param_groups: - # g['lr'] = lr - g["lr"] = self.end_lr ###锁定用线性 + g["lr"] = self.end_lr - def step(self): + def step(self, epoch: Optional[int] = None): if self._current_step < self.warmup_steps: lr = self.init_lr + self._warmup_rate * self._current_step @@ -55,11 +55,10 @@ class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler): coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) lr = self.end_lr + coeff * (self.peak_lr - self.end_lr) - self.lr = lr = self.end_lr = 0.002 ###锁定用线性###不听话,直接锁定! + self.lr = lr = self.end_lr = 0.002 self.set_lr(lr) self.lr = lr self._current_step += 1 - return self.lr if __name__ == "__main__": diff --git a/GPT_SoVITS/AR/modules/optim.py b/GPT_SoVITS/AR/modules/optim.py index fb878485..b0db48dd 100644 --- a/GPT_SoVITS/AR/modules/optim.py +++ b/GPT_SoVITS/AR/modules/optim.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import contextlib import logging from collections import defaultdict diff --git a/GPT_SoVITS/AR/modules/patched_mha_with_cache.py b/GPT_SoVITS/AR/modules/patched_mha_with_cache.py index 5bffcea6..e27eab8a 100644 --- a/GPT_SoVITS/AR/modules/patched_mha_with_cache.py +++ b/GPT_SoVITS/AR/modules/patched_mha_with_cache.py @@ -1,43 +1,46 @@ -from torch.nn.functional import * -from torch.nn.functional import ( - _mha_shape_check, - _canonical_mask, - _none_or_dtype, - _in_projection_packed, -) +import math +from typing import Optional, Tuple + import torch -# Tensor = torch.Tensor -# from typing import Callable, List, Optional, Tuple, Union +from torch.nn.functional import * # noqa: F403 +from torch.nn.functional import ( + _canonical_mask, + _in_projection_packed, # type: ignore + _mha_shape_check, # type: ignore + _none_or_dtype, +) + +Tensor = torch.Tensor def multi_head_attention_forward_patched( query, key, value, - embed_dim_to_check, - num_heads, + embed_dim_to_check: int, + num_heads: int, in_proj_weight, - in_proj_bias, - bias_k, - bias_v, - add_zero_attn, + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, dropout_p: float, - out_proj_weight, - out_proj_bias, - training=True, - key_padding_mask=None, - need_weights=True, - attn_mask=None, - use_separate_proj_weight=False, - q_proj_weight=None, - k_proj_weight=None, - v_proj_weight=None, - static_k=None, - static_v=None, - average_attn_weights=True, - is_causal=False, - cache=None, -): + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, + cache: dict | None = None, +) -> Tuple[Tensor, Tensor | None]: r""" Args: query, key, value: map a query and a set of key-value pairs to an output. @@ -250,27 +253,18 @@ def multi_head_attention_forward_patched( b_k, b_v, ) - if cache != None: + if cache is not None: if cache["first_infer"] == 1: cache["k"][cache["stage"]] = k - # print(0,cache["k"].shape) cache["v"][cache["stage"]] = v - else: ###12个layer每个都要留自己的cache_kv - # print(1,cache["k"].shape) - cache["k"][cache["stage"]] = torch.cat( - [cache["k"][cache["stage"]], k], 0 - ) ##本来时序是1,但是proj的时候可能transpose了所以时序到0维了 + else: + cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]], k], 0) cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]], v], 0) # print(2, cache["k"].shape) src_len = cache["k"][cache["stage"]].shape[0] k = cache["k"][cache["stage"]] v = cache["v"][cache["stage"]] - # if attn_mask is not None: - # attn_mask=attn_mask[-1:,] - # print(attn_mask.shape,attn_mask) cache["stage"] = (cache["stage"] + 1) % cache["all_stage"] - # print(2333,cache) - # prep attention mask attn_mask = _canonical_mask( mask=attn_mask, diff --git a/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py index 8144c9c6..6f0220c3 100644 --- a/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py +++ b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py @@ -1,7 +1,9 @@ -from torch.nn.functional import * -from torch.nn.functional import ( - _canonical_mask, -) +from typing import Optional + +import torch +from torch.nn.functional import _canonical_mask, linear, scaled_dot_product_attention + +Tensor = torch.Tensor def multi_head_attention_forward_patched( @@ -30,8 +32,8 @@ def multi_head_attention_forward_patched( static_v: Optional[Tensor] = None, average_attn_weights: bool = True, is_causal: bool = False, - cache=None, -) -> Tuple[Tensor, Optional[Tensor]]: + cache: dict | None = None, +) -> Tensor: # set up shape vars _, _, embed_dim = query.shape attn_mask = _canonical_mask( @@ -48,6 +50,7 @@ def multi_head_attention_forward_patched( proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2] + assert cache if cache["first_infer"] == 1: cache["k"][cache["stage"]] = k cache["v"][cache["stage"]] = v @@ -66,6 +69,7 @@ def multi_head_attention_forward_patched( target_type=q.dtype, check_other=False, ) + assert attn_mask attn_mask = attn_mask.unsqueeze(0) q = q.view(-1, num_heads, head_dim).transpose(0, 1) diff --git a/GPT_SoVITS/AR/modules/scaling.py b/GPT_SoVITS/AR/modules/scaling.py index aae14533..b0108f45 100644 --- a/GPT_SoVITS/AR/modules/scaling.py +++ b/GPT_SoVITS/AR/modules/scaling.py @@ -14,8 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import random -from typing import Optional -from typing import Tuple +from typing import Optional, Tuple import torch import torch.nn as nn @@ -41,7 +40,6 @@ class DoubleSwishFunction(torch.autograd.Function): @staticmethod def forward(ctx, x: Tensor) -> Tensor: requires_grad = x.requires_grad - x_dtype = x.dtype if x.dtype == torch.float16: x = x.to(torch.float32) diff --git a/GPT_SoVITS/AR/modules/transformer.py b/GPT_SoVITS/AR/modules/transformer.py index 1bf21cdb..a07f4fe6 100644 --- a/GPT_SoVITS/AR/modules/transformer.py +++ b/GPT_SoVITS/AR/modules/transformer.py @@ -2,20 +2,15 @@ import copy import numbers from functools import partial -from typing import Any -from typing import Callable -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union +from typing import Any, Callable, List, Optional, Tuple, Union import torch -from AR.modules.activation import MultiheadAttention -from AR.modules.scaling import BalancedDoubleSwish -from torch import nn -from torch import Tensor +from torch import Tensor, nn from torch.nn import functional as F +from .activation import MultiheadAttention +from .scaling import BalancedDoubleSwish + _shape_t = Union[int, List[int], torch.Size] @@ -55,7 +50,7 @@ class LayerNorm(nn.Module): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) - def forward(self, input: Tensor, embedding: Any = None) -> Tensor: + def forward(self, input: Tensor, embedding: Any = None) -> tuple[Tensor, Any] | Tensor: if isinstance(input, tuple): input, embedding = input return ( @@ -128,7 +123,7 @@ class TransformerEncoder(nn.Module): src_key_padding_mask: Optional[Tensor] = None, return_layer_states: bool = False, cache=None, - ) -> Tensor: + ) -> Tensor | tuple[list[Tensor], Tensor]: r"""Pass the input through the encoder layers in turn. Args: @@ -186,11 +181,11 @@ class TransformerEncoderLayer(nn.Module): norm_first: bool = False, device=None, dtype=None, - linear1_self_attention_cls: nn.Module = nn.Linear, - linear2_self_attention_cls: nn.Module = nn.Linear, - linear1_feedforward_cls: nn.Module = nn.Linear, - linear2_feedforward_cls: nn.Module = nn.Linear, - layer_norm_cls: nn.Module = LayerNorm, + linear1_self_attention_cls: type[nn.Module] = nn.Linear, + linear2_self_attention_cls: type[nn.Module] = nn.Linear, + linear1_feedforward_cls: type[nn.Module] = nn.Linear, + linear2_feedforward_cls: type[nn.Module] = nn.Linear, + layer_norm_cls: type[nn.Module] = LayerNorm, layer_norm_eps: float = 1e-5, adaptive_layer_norm=False, ) -> None: @@ -260,7 +255,7 @@ class TransformerEncoderLayer(nn.Module): src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None, cache=None, - ) -> Tensor: + ) -> Tensor | tuple[Tensor, Any]: r"""Pass the input through the encoder layer. Args: diff --git a/GPT_SoVITS/AR/modules/transformer_onnx.py b/GPT_SoVITS/AR/modules/transformer_onnx.py index fa170254..b90f9966 100644 --- a/GPT_SoVITS/AR/modules/transformer_onnx.py +++ b/GPT_SoVITS/AR/modules/transformer_onnx.py @@ -2,20 +2,15 @@ import copy import numbers from functools import partial -from typing import Any -from typing import Callable -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union +from typing import Any, Callable, List, Optional, Tuple, Union import torch -from AR.modules.activation_onnx import MultiheadAttention -from AR.modules.scaling import BalancedDoubleSwish -from torch import nn -from torch import Tensor +from torch import Tensor, nn from torch.nn import functional as F +from GPT_SoVITS.AR.modules.activation_onnx import MultiheadAttention +from GPT_SoVITS.AR.modules.scaling import BalancedDoubleSwish + _shape_t = Union[int, List[int], torch.Size] diff --git a/GPT_SoVITS/AR/text_processing/__init__.py b/GPT_SoVITS/AR/text_processing/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/GPT_SoVITS/AR/text_processing/phonemizer.py b/GPT_SoVITS/AR/text_processing/phonemizer.py deleted file mode 100644 index 1003040e..00000000 --- a/GPT_SoVITS/AR/text_processing/phonemizer.py +++ /dev/null @@ -1,72 +0,0 @@ -# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py -# reference: https://github.com/lifeiteng/vall-e -import itertools -import re -from typing import Dict -from typing import List - -import regex -from gruut import sentences -from gruut.const import Sentence -from gruut.const import Word -from AR.text_processing.symbols import SYMBOL_TO_ID - - -class GruutPhonemizer: - def __init__(self, language: str): - self._phonemizer = sentences - self.lang = language - self.symbol_to_id = SYMBOL_TO_ID - self._special_cases_dict: Dict[str] = { - r"\.\.\.": "... ", - ";": "; ", - ":": ": ", - ",": ", ", - r"\.": ". ", - "!": "! ", - r"\?": "? ", - "—": "—", - "…": "… ", - "«": "«", - "»": "»", - } - self._punctuation_regexp: str = rf"([{''.join(self._special_cases_dict.keys())}])" - - def _normalize_punctuation(self, text: str) -> str: - text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text) - text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text) - text = regex.sub(r"\pZ+", r" ", text) - return text.strip() - - def _convert_punctuation(self, word: Word) -> str: - if not word.phonemes: - return "" - if word.phonemes[0] in ["‖", "|"]: - return word.text.strip() - - phonemes = "".join(word.phonemes) - # remove modifier characters ˈˌː with regex - phonemes = re.sub(r"[ˈˌː͡]", "", phonemes) - return phonemes.strip() - - def phonemize(self, text: str, espeak: bool = False) -> str: - text_to_phonemize: str = self._normalize_punctuation(text) - sents: List[Sentence] = [sent for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)] - words: List[str] = [self._convert_punctuation(word) for word in itertools.chain(*sents)] - return " ".join(words) - - def transform(self, phonemes): - # convert phonemes to ids - # dictionary is in symbols.py - return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()] - - -if __name__ == "__main__": - phonemizer = GruutPhonemizer("en-us") - # text -> IPA - phonemes = phonemizer.phonemize("Hello, wor-ld ?") - print("phonemes:", phonemes) - print("len(phonemes):", len(phonemes)) - phoneme_ids = phonemizer.transform(phonemes) - print("phoneme_ids:", phoneme_ids) - print("len(phoneme_ids):", len(phoneme_ids)) diff --git a/GPT_SoVITS/AR/text_processing/symbols.py b/GPT_SoVITS/AR/text_processing/symbols.py deleted file mode 100644 index f7ef57fa..00000000 --- a/GPT_SoVITS/AR/text_processing/symbols.py +++ /dev/null @@ -1,12 +0,0 @@ -# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py -# reference: https://github.com/lifeiteng/vall-e -PAD = "_" -PUNCTUATION = ';:,.!?¡¿—…"«»“” ' -LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" -IPA_LETTERS = ( - "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" -) -SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS) -SPACE_ID = SYMBOLS.index(" ") -SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)} -ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)} diff --git a/GPT_SoVITS/Accelerate/MLX/__init__.py b/GPT_SoVITS/Accelerate/MLX/__init__.py new file mode 100644 index 00000000..20691fcf --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/__init__.py @@ -0,0 +1,12 @@ +import importlib.util +import platform + +if importlib.util.find_spec("mlx") is not None and platform.system() == "Darwin": + from .sample_funcs_mlx import sample_naive as sample_naive_mlx + from .t2s_engine_mlx import T2SEngine as T2SEngineMLX + + backends = ["mlx_static", "mlx_quantized_mxfp4", "mlx_quantized_affine", "mlx_varlen"] +else: + backends = [] + +__all__ = ["T2SEngineMLX", "sample_naive_mlx", "backends"] diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py new file mode 100644 index 00000000..a624b4a5 --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +from typing import cast + +import mlx.core as mx +import mlx.nn as nn + +from ..structs_mlx import KVCacheQ +from ..t2s_model_abc import ( + AttentionABC, + KVCache, + KVCacheHND, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Array = mx.array + + +class Attention(AttentionABC): + def __init__(self, n_head: int, hidden_dim: int, max_seq_length: int): + super().__init__(n_head, hidden_dim, max_seq_length) + self.kc_class = KVCacheHND + + @staticmethod + def quantized_scaled_dot_product_attention( + queries: Array, + q_keys: tuple[Array, Array, Array], + q_values: tuple[Array, Array, Array], + scale: float, + mask: Array, + group_size: int = 32, + bits: int = 8, + ) -> Array: + queries *= scale + + scores = mx.quantized_matmul(queries, *q_keys, transpose=True, group_size=group_size, bits=bits) + scores = mx.where(mask, scores, -mx.inf) + scores = mx.softmax(scores, axis=-1, precise=True) # type: ignore + out = mx.quantized_matmul(scores, *q_values, transpose=False, group_size=group_size, bits=bits) + + return out + + def __call__(self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array): + bsz, seqlen, _ = cast(tuple[int, ...], x.shape) + + q, k, v = self.in_proj(x).split(3, axis=-1) + + q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + q, k, v = map(lambda x: x.swapaxes(1, 2), (q, k, v)) + + kv_cache = self.kc_class.update_cache(input_pos, k, v, kv_cache, cache_idx) + assert len(kv_cache) == 2 + + max_idx = int(input_pos.max()) + + q, k, v = map(lambda x: x[..., :max_idx, :], (q, *kv_cache)) + + mask = attn_mask[..., :max_idx] + + attn = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask=mask) + + attn = attn.swapaxes(1, 2).reshape(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + # def __call__(self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array): + # bsz, seqlen, _ = cast(tuple[int, ...], x.shape) + + # q, k, v = self.in_proj(x).split(3, axis=-1) + + # q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + # q, k, v = map(lambda x: x.swapaxes(1, 2), (q, k, v)) + + # kv_cache = self.kc_class.update_cache(input_pos, k, v, kv_cache, cache_idx) + + # assert len(kv_cache) == 3 + # (k_q, k_s, k_b), (v_q, v_s, v_b), (group_size, bits) = kv_cache + + # k_q, k_s, k_b, v_q, v_s, v_b = map(lambda x: x[..., : int(input_pos.max()), :], (k_q, k_s, k_b, v_q, v_s, v_b)) + + # mask = attn_mask[..., : int(input_pos.max())] + + # attn = Attention.quantized_scaled_dot_product_attention( + # q, + # (k_q, k_s, k_b), + # (v_q, v_s, v_b), + # self.scale, + # mask, + # group_size, + # bits, + # ) + + # attn = attn.swapaxes(1, 2).reshape(bsz, seqlen, self.hidden_dim) + + # output = self.out_proj(attn) + + # return output + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int, *args, **kwds) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length, *args, **kwds) + + self.attention = Attention(n_head, hidden_dim, max_seq_length, *args, **kwds) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim: int, + n_layer: int, + n_head: int, + ffn_dim: int, + vocab_size: int, + max_seq_length: int, + max_batch_size: int, + *args, + **kwds, + ) -> None: + super().__init__( + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + *args, + **kwds, + ) + + self.layers = [ + TransformerBlock( + n_head, + ffn_dim, + hidden_dim, + max_seq_length, + *args, + **kwds, + ) + for _ in range(n_layer) + ] + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config: dict, + max_seq_length: int = 1800, + max_batch_size: int = 10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.h = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND + self.group_size = 32 + self.bits = 8 + self.mode = "affine" + + def set_mode(self, mode: str): + assert mode in ["affine", "mxfp4"] + self.mode = mode + if self.mode == "mxfp4": + self.bits = 4 + else: + self.bits = 8 + + def quantized(self): + nn.quantize(self, self.group_size, self.bits, mode=self.mode) + # for layer in self.h.layers: + # nn.quantize(layer.feed_forward, self.group_size, self.bits) + # nn.quantize(layer.attention, self.group_size, self.bits) diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py new file mode 100644 index 00000000..6716fb32 --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from typing import cast + +import mlx.core as mx + +from ..structs_mlx import KVCache, KVCacheQ +from ..t2s_model_abc import ( + AttentionABC, + KVCacheHND, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Array = mx.array + + +class Attention(AttentionABC): + def __init__(self, n_head: int, hidden_dim: int, max_seq_length: int): + super().__init__(n_head, hidden_dim, max_seq_length) + self.kc_class = KVCacheHND + + def __call__(self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array): + bsz, seqlen, _ = cast(tuple[int, ...], x.shape) + + q, k, v = self.in_proj(x).split(3, axis=-1) + + q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + q, k, v = map(lambda x: x.swapaxes(1, 2), (q, k, v)) + + kv_cache = self.kc_class.update_cache(input_pos, k, v, kv_cache, cache_idx) + assert len(kv_cache) == 2 + + k, v = kv_cache + + attn = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask=attn_mask) + + attn = attn.swapaxes(1, 2).reshape(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim: int, + n_layer: int, + n_head: int, + ffn_dim: int, + vocab_size: int, + max_seq_length: int, + max_batch_size: int, + ) -> None: + super().__init__( + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) + + self.layers = [ + TransformerBlock( + n_head, + ffn_dim, + hidden_dim, + max_seq_length, + ) + for _ in range(n_layer) + ] + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config: dict, + max_seq_length: int = 1800, + max_batch_size: int = 10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.h = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py new file mode 100644 index 00000000..3f07f6e2 --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +from typing import cast + +import mlx.core as mx + +from ..structs_mlx import KVCache, KVCacheQ +from ..t2s_model_abc import ( + AttentionABC, + KVCacheHND, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Array = mx.array + + +class Attention(AttentionABC): + def __init__(self, n_head: int, hidden_dim: int, max_seq_length: int): + super().__init__(n_head, hidden_dim, max_seq_length) + self.kc_class = KVCacheHND + + def __call__(self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array): + bsz, seqlen, _ = cast(tuple[int, ...], x.shape) + + q, k, v = self.in_proj(x).split(3, axis=-1) + + q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + q, k, v = map(lambda x: x.swapaxes(1, 2), (q, k, v)) + + kv_cache = self.kc_class.update_cache(input_pos, k, v, kv_cache, cache_idx) + assert len(kv_cache) == 2 + + max_idx = int(input_pos.max()) + + q, k, v = map(lambda x: x[..., :max_idx, :], (q, *kv_cache)) + + mask = attn_mask[..., :max_idx] + + attn = mx.fast.scaled_dot_product_attention(q, k, v, scale=self.scale, mask=mask) + + attn = attn.swapaxes(1, 2).reshape(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim: int, + n_layer: int, + n_head: int, + ffn_dim: int, + vocab_size: int, + max_seq_length: int, + max_batch_size: int, + ) -> None: + super().__init__( + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) + + self.layers = [ + TransformerBlock( + n_head, + ffn_dim, + hidden_dim, + max_seq_length, + ) + for _ in range(n_layer) + ] + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config: dict, + max_seq_length: int = 1800, + max_batch_size: int = 10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.h = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND diff --git a/GPT_SoVITS/Accelerate/MLX/sample_funcs_mlx.py b/GPT_SoVITS/Accelerate/MLX/sample_funcs_mlx.py new file mode 100644 index 00000000..01244a00 --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/sample_funcs_mlx.py @@ -0,0 +1,65 @@ +from typing import Protocol, cast + +import mlx.core as mx + +Array = mx.array + + +class SampleProtocolMLX(Protocol): + @staticmethod + def __call__( + logits: Array, + previous_tokens: Array, + temperature: float, + top_k: int, + top_p: float, + repetition_penalty: float, + ) -> Array: ... + + +class sample_naive(SampleProtocolMLX): + # @partial(mx.compile) + @staticmethod + def __call__( + logits, + previous_tokens, + temperature, + top_k, + top_p, + repetition_penalty, + ): + if temperature <= 1e-5: + probs = mx.softmax(logits, axis=-1) + return mx.argmax(probs, axis=-1, keepdims=True).astype(mx.int32) + + if repetition_penalty != 1.0: + batch_idx = mx.arange(cast(tuple[int, ...], previous_tokens.shape)[0]) + previous_tokens = previous_tokens.astype(mx.int64) + selected_logists = logits[batch_idx, previous_tokens] + selected_logists = mx.where( + selected_logists < 0, selected_logists * repetition_penalty, selected_logists / repetition_penalty + ) + logits[batch_idx, previous_tokens] = selected_logists + + if top_p < 1.0: + sorted_indices = mx.argsort(-logits, axis=-1) + sorted_logits = mx.take_along_axis(logits, sorted_indices, axis=-1) + cum_probs = mx.cumsum(mx.softmax(sorted_logits, axis=-1), axis=-1) + sorted_indices_to_remove = cum_probs > top_p + sorted_indices_to_remove[:, -1] = False + indices_to_remove = mx.zeros_like(logits).astype(mx.bool_) + batch_indices = mx.arange(cast(tuple[int, ...], logits.shape)[0])[:, None] + indices_to_remove[batch_indices, sorted_indices] = sorted_indices_to_remove + logits = mx.where(indices_to_remove, -mx.inf, logits) + + if temperature < 1.0: + logits = logits / temperature + + v = mx.topk(logits, top_k) + pivot = mx.expand_dims(v[:, 0], -1) + logits = mx.where(logits < pivot, -mx.inf, logits) + + gumbel_noise = mx.random.gumbel(shape=cast(tuple[int, ...], logits.shape), dtype=logits.dtype) + idx_next = mx.argmax(logits + gumbel_noise, axis=-1, keepdims=True).astype(mx.int32) + + return idx_next diff --git a/GPT_SoVITS/Accelerate/MLX/structs_mlx.py b/GPT_SoVITS/Accelerate/MLX/structs_mlx.py new file mode 100644 index 00000000..e02c9b4d --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/structs_mlx.py @@ -0,0 +1,152 @@ +""" +Modified From https://github.com/XXXXRT666/GPT-SoVITS +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import List, MutableSequence, Protocol, TypeAlias, cast + +import mlx.core as mx +import torch + +from ..PyTorch.structs import T2SRequest +from .sample_funcs_mlx import SampleProtocolMLX, sample_naive + +Tensor = torch.Tensor +Array = mx.array + + +@dataclass(slots=True) +class T2SRequestMLX: + x: List[Array] + x_lens: Array + prompts: Array + bert_feature: List[Array] + valid_length: int + top_k: int = 5 + top_p: float = 1 + early_stop_num: int = -1 + temperature: float = 1.0 + repetition_penalty: float = 1.35 + + @classmethod + def from_torch(cls, request: T2SRequest) -> T2SRequestMLX: + x = list(map(lambda tensor: mx.array(tensor.cpu()), request.x)) + x_lens = mx.array(request.x_lens.cpu()) + prompts = mx.array(request.prompts.cpu()) + bert_feature = list(map(lambda tensor: mx.array(tensor.cpu()), request.bert_feature)) + + return cls( + x, + x_lens, + prompts, + bert_feature, + request.valid_length, + request.top_k, + request.top_p, + request.early_stop_num, + request.temperature, + request.repetition_penalty, + ) + + +KVCache: TypeAlias = tuple[Array, Array] +KVCacheQ: TypeAlias = tuple[tuple[Array, Array, Array], tuple[Array, Array, Array], tuple[int, int]] + + +class KVCacheProtocol(Protocol): + @staticmethod + def empty(kv_cache: KVCache | KVCacheQ) -> None: ... + + @staticmethod + def update_cache( + input_pos: Array, k_val: Array, v_val: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array + ) -> KVCache | KVCacheQ: ... + + @staticmethod + def prefill_kv(k_val: Array, v_val: Array, kv_cache: KVCache | KVCacheQ) -> None: ... + + @staticmethod + def init_cache( + batch_size: int, max_seq_length: int, n_heads: int, head_dim: int, dtype: mx.Dtype, *args, **kwds + ) -> KVCache | KVCacheQ: ... + + +class T2SDecoderProtocol(Protocol): + max_seq_length: int + EOS: int + n_head: int + + def embed(self, x: list[Array], y: Array, bert_features: list[Array]) -> Array: ... + + +class T2SSessionMLX: + def __init__( + self, + decoder: T2SDecoderProtocol, + request_torch: T2SRequest, + sample_func: type[SampleProtocolMLX] = sample_naive, + device: mx.Device = mx.Device(mx.cpu), + dtype: mx.Dtype = mx.float32, + ): + with mx.stream(device): + request = T2SRequestMLX.from_torch(request_torch) + + self.decoder = decoder + self.request = request + self.device = device + self.dtype = dtype + + bsz = len(request.x) + y_len: int = cast(tuple[int, ...], request.prompts.shape)[-1] + self.bsz = bsz + self.y_len = y_len + + # Cache + self.kv_cache: MutableSequence[KVCache | KVCacheQ] + self.sample = sample_func() + + # Forward args + self.x = [i.astype(mx.int32) for i in request.x] + self.x_lens = request.x_lens.astype(mx.int32) + self.y = mx.zeros((bsz, decoder.max_seq_length)).astype(mx.int32) + self.y[:, : cast(tuple[int, ...], request.prompts.shape)[-1]] = request.prompts.astype(mx.int32) + self.bert_feature = [i.astype(dtype) for i in request.bert_feature] + + self.prefill_len = self.x_lens + cast(tuple[int, ...], request.prompts.shape)[1] + + self.input_pos = mx.zeros_like(self.prefill_len) + self.input_pos += self.prefill_len + + # EOS + self.completed = mx.array([False] * len(self.x)).astype(mx.bool_) + self.y_results: List[Array] = [None] * len(self.x) # type: ignore + + self.xy_pos = decoder.embed(self.x, request.prompts, self.bert_feature) + + max_len = int(self.prefill_len.max(-1)) + attn_mask = mx.zeros(shape=(bsz, max_len, max_len), dtype=mx.bool_) + + for bs in range(bsz): + pos = int(self.x_lens[bs]) + seq_len = pos + y_len + + attn_mask[bs, :seq_len, :pos] = True + + ar_mask = ~mx.triu( + x=mx.ones( + shape=( + y_len, + y_len, + ), + dtype=mx.bool_, + ), + k=1, + ) + attn_mask[bs, pos:seq_len, pos:seq_len] = ar_mask + + attn_mask = mx.repeat(mx.expand_dims(attn_mask, 1), decoder.n_head, 1) + self.attn_mask = attn_mask + + mx.eval(self.attn_mask) diff --git a/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py b/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py new file mode 100644 index 00000000..390c57cb --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py @@ -0,0 +1,238 @@ +import gc +import os +import time +import traceback +from typing import cast + +import mlx.core as mx +import torch +from rich.progress import BarColumn, Progress, TextColumn + +from ..logger import SpeedColumnToken, console, logger +from ..PyTorch.structs import T2SEngineProtocol, T2SRequest, T2SResult +from .backends import mlx_quantized, mlx_static, mlx_varlen +from .structs_mlx import T2SSessionMLX +from .t2s_model_abc import T2SDecoderABC + +Array = mx.array +Tensor = torch.Tensor + + +class T2SEngine(T2SEngineProtocol): + def __init__( + self, + decoder_model: T2SDecoderABC, + device: mx.Device | str = mx.Device(mx.cpu), + dtype: torch.dtype | mx.Dtype = torch.float32, + ) -> None: + if isinstance(device, str): + match device: + case "mx.cpu": + device = mx.Device(mx.cpu) + case "mx.gpu": + device = mx.Device(mx.gpu) + + match dtype: + case torch.float32: + dtype = mx.float32 + case torch.float16: + dtype = mx.float16 + case torch.bfloat16: + dtype = mx.bfloat16 + + device = cast(mx.Device, device) + dtype = cast(mx.Dtype, dtype) + + assert device.type.value in {0, 1} + assert dtype in {mx.float16, mx.bfloat16, mx.float32} + + self.device = device + self.dtype = dtype + + mx.set_default_device(device) + decoder_model.set_dtype(self.dtype) + + self.decoder_model: T2SDecoderABC = decoder_model + self.decoder_model.compile() + + def _handle_request(self, request: T2SRequest): + decoder = self.decoder_model + session = T2SSessionMLX(decoder, request, device=self.device, dtype=self.dtype) + batch_idx = mx.arange(session.bsz) + + t1 = 0.0 + infer_speed = 0.0 + infer_time = 0.0 + + with ( + mx.stream(session.device), + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnToken(show_speed=True), + console=console, + transient=True, + ) as progress, + ): + max_token = min(1800 - int(session.input_pos.max()), 1500) + + task = progress.add_task("T2S Decoding", total=max_token) + for idx in range(1500): + progress.update(task, advance=1) + if idx == 0: + session.kv_cache = decoder.init_cache(session.bsz) + xy_dec = decoder.h.prefill( + session.xy_pos, + session.attn_mask, + session.kv_cache, + ) # bs, seq_len, embed_dim + xy_dec = xy_dec[None, batch_idx, session.input_pos - 1] + else: + args, kwds = decoder.pre_forward(session) + xy_dec = decoder.h( + session.input_pos, + session.xy_pos, + session.kv_cache, + batch_idx, + *args, + **kwds, + ) + + decoder.post_forward(idx, session) + logits = decoder.ar_predict_layer(xy_dec[:, -1]) + session.input_pos += 1 + + if idx == 0: + logits[:, -1] = -mx.inf + + samples = session.sample( + logits=logits, + previous_tokens=session.y[:, : session.y_len + idx], + top_k=request.top_k, + top_p=request.top_p, + repetition_penalty=request.repetition_penalty, + temperature=request.temperature, + ) + + session.y[batch_idx, session.y_len + idx] = samples + + argmax_token = mx.argmax(logits, axis=-1) + sample_token = samples.squeeze(1) + EOS_mask = (cast(Array, argmax_token == decoder.EOS)) | (sample_token == decoder.EOS) + + newly_done_mask = EOS_mask & (~session.completed) + newly_done_indices = mx.where(newly_done_mask, batch_idx, -1) + pos = mx.where(newly_done_indices != -1, batch_idx, session.bsz) + pos_sorted = mx.sort(pos, axis=0) + valid_count = session.bsz - mx.sum(cast(Array, pos_sorted == session.bsz)) + pos_final = pos_sorted[: int(valid_count)] + newly_done_indices = mx.expand_dims(newly_done_indices[pos_final], 0) + + if newly_done_indices.size > 0: + for i in newly_done_indices: + session.y_results[int(i)] = session.y[i, session.y_len : session.y_len + idx] + session.completed[newly_done_indices] = True + + if mx.all(session.completed).item(): + if session.y[:, session.y_len :].sum() == 0: + session.y_results = [mx.array([0]) for _ in range(session.bsz)] + logger.error("Bad Zero Prediction") + else: + logger.info( + f"T2S Decoding EOS {session.prefill_len.tolist().__str__().strip('[]')} -> {[cast(tuple[int, ...], i.shape)[-1] for i in session.y_results].__str__().strip('[]')}" + ) + logger.info(f"Infer Speed: {(idx - 1) / (time.perf_counter() - t1):.2f} token/s") + infer_time = time.perf_counter() - t1 + infer_speed = (idx - 1) / infer_time + break + + if (request.early_stop_num != -1 and idx >= request.early_stop_num) or idx == max_token - 1: + for j in range(session.bsz): + if not session.completed[j].item(): + session.y_results[j] = session.y[[j], session.y_len : session.y_len + 1499] + session.completed[j] = True + logger.error("Bad Full Prediction") + logger.info(f"Infer Speed: {(idx - 1) / (time.perf_counter() - t1):.2f} token/s") + infer_time = time.perf_counter() - t1 + infer_speed = (idx - 1) / infer_time + break + + y_emb = decoder.ar_audio_embedding(samples) + session.xy_pos = decoder.ar_audio_position(session.input_pos - session.x_lens, y_emb) + mx.eval(session.xy_pos, session.y) + + if idx == 1: + t1 = time.perf_counter() + + if idx % 100 == 0: + mx.clear_cache() + + match session.device: + case mx.gpu: + mx.clear_cache() + case mx.cpu: + gc.collect() + + result_mlx = session.y_results[: request.valid_length] + mx.eval(result_mlx) + result = [torch.tensor(k) for k in result_mlx] + return result, infer_speed, infer_time + + def generate(self, request: T2SRequest): + try: + result, infer_speed, infer_time = self._handle_request(request) + t2s_result = T2SResult(result=result, infer_speed=(infer_speed, infer_time), status="Success") + except Exception as e: + t2s_result = T2SResult(status="Error", exception=e, traceback=traceback.format_exc()) + return t2s_result + + @staticmethod + def replace_key(state_dict: dict[str, Tensor]): + state_dict_mlx: list[tuple[str, Array]] = [] + for key, value in state_dict.items(): + key = ( + key.replace("model.", "") + .replace("in_proj_", "in_proj.") + .replace("self_attn", "attention") + .replace("linear", "feed_forward.linear") + .replace("norm1", "attention_norm") + .replace("norm2", "ffn_norm") + ) + value_mlx = mx.array(value) + state_dict_mlx.append((key, value_mlx)) + return state_dict_mlx + + @staticmethod + def load_decoder(weights_path: os.PathLike, max_batch_size: int = 1, backend: str = "MLX-Varlen"): + logger.info(f"Loading Text2Semantic Weights from {weights_path} with {backend} Backend") + dict_s1 = torch.load(weights_path, map_location="cpu", weights_only=False, mmap=True) + config = dict_s1["config"] + match backend: + case "MLX-Varlen": + decoder_cls: type[T2SDecoderABC] = mlx_varlen.T2SDecoder + case "MLX-Static": + decoder_cls = mlx_static.T2SDecoder + case "MLX-Quantized-Affine" | "MLX-Quantized-MXFP4": + decoder_cls = mlx_quantized.T2SDecoder + case _: + raise RuntimeError(f"Backend {backend} Not Found") + + decoder: T2SDecoderABC = decoder_cls(config, max_batch_size=max_batch_size) + state_dict = dict_s1["weight"] + state_dict_mlx = T2SEngine.replace_key(state_dict) + decoder.load_weights(state_dict_mlx) + decoder.eval() + mx.eval(decoder) + + if "Quantized" in backend and isinstance(decoder, mlx_quantized.T2SDecoder): + if backend == "MLX-Quantized-Affine": + decoder.set_mode("affine") + elif backend == "MLX-Quantized-MXFP4": + decoder.set_mode("mxfp4") + else: + raise RuntimeError(f"Quantized Backend {backend} Not Supported") + decoder.quantized() + mx.eval(decoder) + + return decoder diff --git a/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py b/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py new file mode 100644 index 00000000..fb295179 --- /dev/null +++ b/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py @@ -0,0 +1,530 @@ +from __future__ import annotations + +import math +from abc import ABC, abstractmethod +from typing import MutableSequence, cast + +import mlx.core as mx +import mlx.nn as nn + +from .structs_mlx import KVCache, KVCacheProtocol, KVCacheQ, T2SDecoderProtocol, T2SSessionMLX + +Array = mx.array + + +class TokenEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + vocab_size: int, + ): + super().__init__() + + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + + self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim) + + @property + def weight(self): + return self.word_embeddings.weight + + def embedding(self, index: int): + return self.word_embeddings.weight[index : index + 1] + + def __call__(self, x: Array): + x = self.word_embeddings(x) + return x + + +class SinePositionalEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + scale: bool = False, + max_batch_size: int = 10, + max_seq_len: int = 1800, + ): + super().__init__() + self.embedding_dim = embedding_dim + self.x_scale = math.sqrt(embedding_dim) if scale else 1.0 + self.alpha = mx.ones(1) + self.max_batch_size = max_batch_size + self.max_seq_len = max_seq_len + + self.reverse = False + self._pe = mx.zeros((max_batch_size, max_seq_len, embedding_dim)) + self.compute_pe() + + def compute_pe(self): + """Reset the positional encodings.""" + + if self.reverse: + position = mx.expand_dims(mx.arange(self.max_seq_len - 1, -1, -1.0), axis=1) + else: + position = mx.expand_dims(mx.arange(self.max_seq_len), axis=1) + div_term = mx.exp( + mx.arange( + 0, + self.embedding_dim, + 2, + ) + * -(math.log(10000.0) / self.embedding_dim) + ) + pe = self._pe + pe[:, :, 0::2] = mx.sin(position * div_term) + pe[:, :, 1::2] = mx.cos(position * div_term) + + def __call__(self, input_pos: Array, x: Array): + """ + Args: + input_pos (Array): [batch_size, ] + x (Array): [batch_size, 1, embed_dim] + + Returns: + embedded_x (Array): [batch_size, 1, embed_dim] + """ + + batch_size = cast(tuple[int, ...], x.shape)[0] + pe_values = self._pe[mx.arange(batch_size), input_pos - 1] # (batch_size, embed_dim) + + return x * self.x_scale + self.alpha * mx.expand_dims(pe_values, 1) # (batch_size, 1, embed_dim) + + def prefill(self, x: Array): + """ + Args: + x (Array): [batch_size, seq_len, embed_dim] + + Returns: + embedded_x (Array): [batch_size, seq_len, embed_dim] + """ + pe_values = self._pe[:, : cast(tuple[int, ...], x.shape)[-2]] + return x * self.x_scale + self.alpha * pe_values + + +class KVCacheHND(KVCacheProtocol): + @staticmethod + def empty(kv_cache): + assert len(kv_cache) == 2 + k_cache, v_cache = kv_cache + + k_cache[:] = 0 + v_cache[:] = 0 + + @staticmethod + def update_cache(input_pos, k_val, v_val, kv_cache, cache_idx): + # input_pos: [B, ], k_val: [B, H, 1, D] + assert len(kv_cache) == 2 + k_out, v_out = kv_cache + ip0 = input_pos - 1 + + k_out[cache_idx, :, ip0, None] = k_val + v_out[cache_idx, :, ip0, None] = v_val + + return k_out, v_out + + @staticmethod + def prefill_kv(k_val, v_val, kv_cache): + # k_val: [B, S, H, D] + assert len(kv_cache) == 2 + k_cache, v_cache = kv_cache + + k_cache[..., : cast(tuple[int, ...], k_val.shape)[1], :] = k_val.swapaxes(1, 2) + v_cache[..., : cast(tuple[int, ...], v_val.shape)[1], :] = v_val.swapaxes(1, 2) + + @staticmethod + def init_cache(batch_size: int, max_seq_length: int, n_heads: int, head_dim: int, dtype: mx.Dtype) -> KVCache: + cache_shape = (batch_size, n_heads, max_seq_length, head_dim) + + return (mx.zeros(cache_shape, dtype=dtype), mx.zeros(cache_shape, dtype=dtype)) + + +class KVCacheHNDQuantized(KVCacheProtocol): + @staticmethod + def _el_per_int(bits: int) -> int: + return 32 // bits + + @staticmethod + def _packed_dim(head_dim: int, bits: int = 8) -> int: + el_per_int = KVCacheHNDQuantized._el_per_int(bits) + if head_dim % el_per_int != 0: + raise ValueError(f"{head_dim=} is not divisible by {el_per_int=} ({bits=})") + return head_dim // el_per_int + + @staticmethod + def _group_count(head_dim: int, group_size: int = 32) -> int: + assert group_size in {32, 64, 128} + if head_dim % group_size != 0: + raise ValueError(f"{head_dim} is not divisible by {group_size=}") + return head_dim // group_size + + @staticmethod + def empty(kv_cache) -> None: + assert len(kv_cache) == 3 + (k_q, k_s, k_b), (v_q, v_s, v_b), (_, __) = kv_cache + + k_q[:] = 0 + k_s[:] = 0 + k_b[:] = 0 + v_q[:] = 0 + v_s[:] = 0 + v_b[:] = 0 + + @staticmethod + def update_cache( + input_pos, + k_val, + v_val, + kv_cache, + cache_idx, + ): + # input_pos: [B, ], k_val: [B, H, 1, D] + + assert len(kv_cache) == 3 + (k_q_out, k_s_out, k_b_out), (v_q_out, v_s_out, v_b_out), (group_size, bits) = kv_cache + + k_q, k_s, k_b = mx.quantize(k_val, group_size=group_size, bits=bits) + v_q, v_s, v_b = mx.quantize(v_val, group_size=group_size, bits=bits) + + ip0 = input_pos - 1 + + k_q_out[cache_idx, :, ip0, None] = k_q + k_s_out[cache_idx, :, ip0, None] = k_s + k_b_out[cache_idx, :, ip0, None] = k_b + + v_q_out[cache_idx, :, ip0, None] = v_q + v_s_out[cache_idx, :, ip0, None] = v_s + v_b_out[cache_idx, :, ip0, None] = v_b + + return (k_q_out, k_s_out, k_b_out), (v_q_out, v_s_out, v_b_out), (group_size, bits) + + @staticmethod + def prefill_kv( + k_val, + v_val, + kv_cache, + ) -> None: + assert len(kv_cache) == 3 + (k_q_out, k_s_out, k_b_out), (v_q_out, v_s_out, v_b_out), (group_size, bits) = kv_cache + + S = cast(tuple[int, ...], k_val.shape)[1] + + k_sw = k_val.swapaxes(1, 2) + v_sw = v_val.swapaxes(1, 2) + + k_q, k_s, k_b = mx.quantize(k_sw, group_size=group_size, bits=bits) + v_q, v_s, v_b = mx.quantize(v_sw, group_size=group_size, bits=bits) + + k_q_out[..., :S, :] = k_q + k_s_out[..., :S, :] = k_s + k_b_out[..., :S, :] = k_b + + v_q_out[..., :S, :] = v_q + v_s_out[..., :S, :] = v_s + v_b_out[..., :S, :] = v_b + + @staticmethod + def init_cache( + batch_size: int, + max_seq_length: int, + n_heads: int, + head_dim: int, + dtype: mx.Dtype, + *, + group_size: int = 32, + bits: int = 8, + ) -> KVCacheQ: + packed_dim = KVCacheHNDQuantized._packed_dim(head_dim, bits=bits) + group_cnt = KVCacheHNDQuantized._group_count(head_dim, group_size=group_size) + + packed_shape = (batch_size, n_heads, max_seq_length, packed_dim) + group_shape = (batch_size, n_heads, max_seq_length, group_cnt) + + k_q = mx.zeros(packed_shape, dtype=mx.uint32) + k_s = mx.zeros(group_shape, dtype=dtype) + k_b = mx.zeros(group_shape, dtype=dtype) + + v_q = mx.zeros(packed_shape, dtype=mx.uint32) + v_s = mx.zeros(group_shape, dtype=dtype) + v_b = mx.zeros(group_shape, dtype=dtype) + + return (k_q, k_s, k_b), (v_q, v_s, v_b), (group_size, bits) + + +class AttentionABC(ABC, nn.Module): + def __init__(self, n_head: int, hidden_dim: int, max_seq_length: int, *args, **kwds): + super().__init__() + + self.n_head = n_head + self.hidden_dim = hidden_dim + assert hidden_dim % n_head == 0 + self.head_dim = hidden_dim // n_head + + self.max_seq_length = max_seq_length + + # key, query, value projections for all heads, but in a batch + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + self.scale = 1 / math.sqrt(self.head_dim) + + self.kc_class: KVCacheProtocol + + @abstractmethod + def __call__( + self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array + ) -> Array: ... + + def prefill(self, x: Array, kv_cache: KVCache | KVCacheQ, attn_mask: Array): + bsz, seqlen, _ = cast(tuple[int, ...], x.shape) + + q, k, v = self.in_proj(mx.expand_dims(x, 0)).split(3, axis=-1) + + q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + self.kc_class.prefill_kv(k, v, kv_cache) + + q, k, v = map(lambda x: x.swapaxes(1, 2), (q, k, v)) + + attn = mx.fast.scaled_dot_product_attention(q, k, v, mask=attn_mask, scale=self.scale) + + attn = mx.nan_to_num(attn) + + attn = attn.swapaxes(1, 2).reshape(1, -1, self.hidden_dim) + + output = self.out_proj(attn) + + return output + + +class FeedForward(nn.Module): + def __init__(self, dim: int, hidden_dim: int) -> None: + super().__init__() + + self.linear1 = nn.Linear(dim, hidden_dim, bias=True) + self.linear2 = nn.Linear(hidden_dim, dim, bias=True) + + def __call__(self, x: Array): + return self.linear2(nn.relu(self.linear1(x))) + + +class TransformerBlockABC(nn.Module): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int, *args, **kwds) -> None: + super().__init__() + + self.hidden_dim = hidden_dim + self.max_seq_length = max_seq_length + + self.attention: AttentionABC + + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm(self.hidden_dim) + self.ffn_norm = nn.LayerNorm(self.hidden_dim) + + def __call__(self, x: Array, input_pos: Array, kv_cache: KVCache | KVCacheQ, cache_idx: Array, attn_mask: Array): + h = self.attention_norm( + x + + self.attention( + x, + input_pos, + kv_cache, + cache_idx, + attn_mask, + ) + ) + out = self.ffn_norm(h + self.feed_forward(h)) + return out + + def prefill(self, x: Array, attn_mask: Array, kv_cache: KVCache | KVCacheQ): + h = self.attention_norm( + x + + self.attention.prefill( + x, + kv_cache, + attn_mask, + ) + ) + out = self.ffn_norm(h + self.feed_forward(h)) + + return out + + +class TransformerDecoderABC(nn.Module): + def __init__( + self, + hidden_dim: int, + n_layer: int, + n_head: int, + ffn_dim: int, + vocab_size: int, + max_seq_length: int, + max_batch_size: int, + *args, + **kwds, + ) -> None: + super().__init__() + + self.hidden_dim = hidden_dim + self.n_head = n_head + assert hidden_dim % n_head == 0 + + self.head_dim = hidden_dim // n_head + self.vocab_size = vocab_size + + self.n_layer = n_layer + + self.layers: MutableSequence[TransformerBlockABC] + + self.max_seq_length = max_seq_length + self.max_batch_size = max_batch_size + + def __call__( + self, + input_pos: Array, + x: Array, + kv_caches: MutableSequence[KVCache | KVCacheQ], + cache_idx: Array, + *args, + **kwds, + ): + for layer, kv_cache in zip(self.layers, kv_caches): + x = layer( + x, + input_pos, + kv_cache, + cache_idx, + *args, + **kwds, + ) + + return x + + def prefill(self, x: Array, mask: Array, kv_caches: MutableSequence[KVCache | KVCacheQ]): + for layer, kv_cache in zip(self.layers, kv_caches): + x = layer.prefill( + x, + mask, + kv_cache, + ) + return x + + +class T2SDecoderABC(nn.Module, T2SDecoderProtocol): + def __init__( + self, + config: dict, + max_seq_length: int = 1800, + max_batch_size: int = 10, + ) -> None: + super().__init__() + + hidden_dim: int = config["model"]["hidden_dim"] + embedding_dim: int = config["model"]["embedding_dim"] + n_head: int = config["model"]["head"] + n_layer: int = config["model"]["n_layer"] + vocab_size: int = config["model"]["vocab_size"] + phoneme_vocab_size: int = config["model"]["phoneme_vocab_size"] + EOS: int = config["model"]["EOS"] + ffn_dim: int = hidden_dim * 4 + + self.n_layer = int(n_layer) + self.hidden_dim = int(hidden_dim) + self.n_head = int(n_head) + assert hidden_dim % n_head == 0 + + self.head_dim = int(hidden_dim // n_head) + self.embedding_dim = int(embedding_dim) + self.ffn_dim = int(ffn_dim) + self.vocab_size = int(vocab_size) + self.phoneme_vocab_size = int(phoneme_vocab_size) + self.max_seq_length = max_seq_length + self.max_batch_size = max_batch_size + self.EOS = EOS + assert self.EOS == self.vocab_size - 1 + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC + + self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size) + self.ar_text_position = SinePositionalEmbedding( + self.embedding_dim, + scale=False, + max_batch_size=max_batch_size, + max_seq_len=max_seq_length, + ) + self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size) + self.ar_audio_position = SinePositionalEmbedding( + self.embedding_dim, + scale=False, + max_batch_size=max_batch_size, + max_seq_len=max_seq_length, + ) + + self.kv_class: KVCacheProtocol + + def init_cache(self, bsz: int = 0, *args, **kwds) -> MutableSequence[KVCache | KVCacheQ]: + bsz = bsz or self.h.max_batch_size + assert bsz <= self.h.max_batch_size + seq_lens = self.h.max_seq_length + dtype = self.bert_proj.bias.dtype + cache: MutableSequence[KVCache | KVCacheQ] = [ + self.kv_class.init_cache(bsz, seq_lens, self.n_head, self.head_dim, dtype, *args, **kwds) + for _ in range(self.n_layer) + ] + mx.eval(cache) + return cache + + def embed( + self, + x: list[Array], + y: Array, + bert_features: list[Array], + ): + x_len: list[int] = [cast(tuple[int, ...], i.shape)[0] for i in x] + x_len_max = max(x_len) + xy_pos = mx.zeros((len(x), x_len_max + cast(tuple[int, ...], y.shape)[1], self.embedding_dim)).astype( + bert_features[0].dtype + ) + + bert_features = list(map(lambda x: x.swapaxes(0, 1), bert_features)) + + y_len = cast(tuple[int, ...], y.shape)[1] + y_emb = self.ar_audio_embedding(y) + y_pos = self.ar_audio_position.prefill(y_emb) + + for bs, (x_, len_, bert_feature) in enumerate(zip(x, x_len, bert_features)): + x_emb = self.ar_text_embedding(x_) + bert = self.bert_proj(bert_feature) + x_emb = x_emb + bert + x_pos = self.ar_text_position.prefill(mx.expand_dims(x_emb, 0)) + xy_pos[[bs], :len_] = x_pos + xy_pos[[bs], len_ : len_ + y_len] = y_pos + + mx.eval(xy_pos) + return xy_pos + + def compile(self): + setattr(self.h, "__call__", mx.compile(self.h.__call__)) + # setattr(self.h, "prefill", mx.compile(self.h.prefill, shapeless=True)) + + def pre_forward(self, session: T2SSessionMLX): + attn_mask = session.attn_mask + return list(), dict(attn_mask=attn_mask) + + def post_forward(self, idx: int, session: T2SSessionMLX) -> None: + if idx == 0: + prefill_len = session.prefill_len + bsz = session.bsz + + range_tensor = mx.arange(self.max_seq_length).reshape(1, 1, 1, self.max_seq_length) + prefill_len_expanded = prefill_len.reshape(bsz, 1, 1, 1) + attn_mask = range_tensor < prefill_len_expanded + attn_mask = mx.repeat(attn_mask, self.n_head, 1) + + session.attn_mask = attn_mask + + attn_mask = session.attn_mask + input_pos = session.input_pos + attn_mask[mx.arange(session.bsz), :, :, input_pos] = True + mx.eval(attn_mask) diff --git a/GPT_SoVITS/Accelerate/PyTorch/__init__.py b/GPT_SoVITS/Accelerate/PyTorch/__init__.py new file mode 100644 index 00000000..0265e2b4 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/__init__.py @@ -0,0 +1,30 @@ +import importlib.util + +import torch + +from .sample_funcs import sample_naive +from .structs import T2SRequest, T2SResult +from .t2s_engine import T2SEngine as T2SEngineTorch + +torch.set_grad_enabled(False) + +backends = ["torch_varlen"] +if torch.cuda.is_available(): + backends.append("torch_static_cuda_graph") + # if importlib.util.find_spec("sageattention") is not None: + # for i in range(torch.cuda.device_count()): + # major, minor = torch.cuda.get_device_capability(i) + # sm_version = major + minor / 10.0 + # if sm_version >= 7.0: + # backends.append("sage_attn_varlen_cuda_graph") + if importlib.util.find_spec("flash_attn") is not None: + for i in range(torch.cuda.device_count()): + major, minor = torch.cuda.get_device_capability(i) + sm_version = major + minor / 10.0 + if sm_version >= 7.5: + backends.append("flash_attn_varlen_cuda_graph") +# if torch.mps.is_available(): +# backends.append("mps_flash_attn_varlen") + + +__all__ = ["T2SEngineTorch", "T2SRequest", "sample_naive", "T2SResult", "backends"] diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py new file mode 100644 index 00000000..62a187da --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py @@ -0,0 +1,158 @@ +""" +Modified From https://github.com/XXXXRT666/GPT-SoVITS +""" + +from typing import Dict, List, Tuple + +import kernels +import torch + +from .. import nn +from ..structs import T2SSession +from ..t2s_model_abc import ( + AttentionABC, + CUDAGraphCacheABC, + FeedForward, + KVCacheNHD, + KVCacheProtocol, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +flash_attn_kernel = None +try: + import flash_attn_interface as flash_attn # type: ignore + + flash_attn_kernel = flash_attn.flash_attn_with_kvcache +except ModuleNotFoundError: + try: + import flash_attn # type: ignore + + flash_attn_kernel = flash_attn.flash_attn_with_kvcache + + except ModuleNotFoundError: + pass + +if flash_attn_kernel is None: + flash_attn_kernel = kernels.get_kernel("kernels-community/flash-attn").flash_attn_with_kvcache + + +Tensor = torch.Tensor + + +class Attention(AttentionABC): + def __init__(self, n_head, hidden_dim, max_seq_length): + super().__init__(n_head, hidden_dim, max_seq_length) + + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, *args, **kwds) -> Tensor: + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x).chunk(3, dim=-1) + + q = q.view(bsz, seqlen, self.n_head, self.head_dim) + k = k.view(bsz, seqlen, self.n_head, self.head_dim) + v = v.view(bsz, seqlen, self.n_head, self.head_dim) + + attn: Tensor = flash_attn.flash_attn_with_kvcache( # type: ignore + q, kv_cache.k_cache, kv_cache.v_cache, k, v, cache_seqlens=input_pos - 1 + ) + + attn = attn.view(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head, ffn_dim, hidden_dim, max_seq_length) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm([self.hidden_dim]) + self.ffn_norm = nn.LayerNorm([self.hidden_dim]) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) -> None: + super().__init__(hidden_dim, n_layer, n_head, ffn_dim, vocab_size, max_seq_length, max_batch_size) + + self.layers = nn.ModuleList( # type: ignore + TransformerBlock(n_head, ffn_dim, hidden_dim, max_seq_length) for _ in range(n_layer) + ) + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config, + max_seq_length=1800, + max_batch_size=10, + ) -> None: + assert torch.cuda.is_available() + super().__init__(config, max_seq_length, max_batch_size) + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheNHD + + def post_forward(self, idx: int, session: T2SSession) -> None: + return super().post_forward(idx, session) + + def pre_forward(self, session: T2SSession) -> Tuple[List, Dict]: + return super().pre_forward(session) + + +class CUDAGraphCache(CUDAGraphCacheABC): + def __init__( + self, + decoder: T2SDecoder, + ) -> None: + self.is_applicable = True + super().__init__(decoder) + + def release_graph(self, session: T2SSession): + if session.id == self.id: + self.assigned = False + else: + del session.graph, session.xy_pos_, session.xy_dec_, session.input_pos, session.kv_cache + + def get_cache_graph(self, session: T2SSession): + assert self.graph + session.graph = self.graph + session.stream = self.stream + + session.xy_pos_ = self.xy_pos + session.xy_dec_ = self.xy_dec + session.input_pos = self.input_pos.copy_(session.input_pos) + + for cache, cache_ in zip(self.kv_cache, session.kv_cache): + cache.sync_cache(cache_) + + def capture_new_graph(self, session: T2SSession): + session.xy_pos_ = self.xy_pos.clone() + session.xy_dec_ = self.xy_dec.clone() + session.input_pos = self.input_pos.clone().copy_(session.input_pos) + + args, kwds = self.decoder.pre_forward(session) + graph = self.decoder.capture(self.input_pos, self.xy_pos, self.xy_dec, self.kv_cache, *args, **kwds) + session.graph = graph + session.stream = torch.cuda.Stream() # type: ignore diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py b/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py new file mode 100644 index 00000000..7d50dae0 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py @@ -0,0 +1,166 @@ +import torch +from torch.nn import functional as F + +from .. import nn +from ..structs import KVCacheProtocol, T2SSession +from ..t2s_model_abc import ( + AttentionABC, + CUDAGraphCacheABC, + FeedForward, + KVCacheHND, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Tensor = torch.Tensor + + +class Attention(AttentionABC): + def __init__(self, n_head, hidden_dim, max_seq_length): + super().__init__(n_head, hidden_dim, max_seq_length) + + # key, query, value projections for all heads, but in a batch + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, attn_mask: Tensor): + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x).chunk(3, dim=-1) + + q = q.view(bsz, seqlen, self.n_head, self.head_dim) + k = k.view(bsz, seqlen, self.n_head, self.head_dim) + v = v.view(bsz, seqlen, self.n_head, self.head_dim) + + q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v)) + + k, v = kv_cache.update(input_pos, k, v) + + attn = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask) + + attn = attn.transpose(1, 2).contiguous().view(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm([self.hidden_dim]) + self.ffn_norm = nn.LayerNorm([self.hidden_dim]) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) -> None: + super().__init__(hidden_dim, n_layer, n_head, ffn_dim, vocab_size, max_seq_length, max_batch_size) + + self.layers = nn.ModuleList( # type: ignore + TransformerBlock(n_head, ffn_dim, hidden_dim, max_seq_length) for _ in range(n_layer) + ) + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config, + max_seq_length=1800, + max_batch_size=10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND + + def pre_forward(self, session: T2SSession): + attn_mask = session.attn_mask + return list(), dict(attn_mask=attn_mask) + + def post_forward(self, idx: int, session: T2SSession) -> None: + if idx == 0: + prefill_len = session.prefill_len + bsz = session.bsz + + range_tensor = torch.arange(self.max_seq_length).view(1, 1, 1, self.max_seq_length) + prefill_len_expanded = prefill_len.view(bsz, 1, 1, 1) + attn_mask = range_tensor < prefill_len_expanded + attn_mask = attn_mask.expand(-1, self.n_head, -1, -1) + + session.attn_mask = attn_mask + + attn_mask = session.attn_mask + input_pos = session.input_pos + attn_mask[torch.arange(session.bsz), :, :, input_pos] = True + + +class CUDAGraphCache(CUDAGraphCacheABC): + def __init__( + self, + decoder, + ) -> None: + self.is_applicable = False + super().__init__(decoder) + if torch.cuda.is_available(): + self.attn_mask = ( + torch.randint(0, 2, (decoder.max_batch_size, decoder.n_head, 1, decoder.max_seq_length)) + .bool() + .to(self.device, self.dtype) + ) + + def release_graph(self, session: T2SSession): + if session.id == self.id: + self.assigned = False + else: + del ( + session.graph, + session.xy_pos_, + session.xy_dec_, + session.input_pos, + session.kv_cache, + session.attn_mask, + ) + + def get_cache_graph(self, session: T2SSession): + assert self.graph + session.graph = self.graph + session.stream = self.stream + + session.xy_pos_ = self.xy_pos + session.xy_dec_ = self.xy_dec + session.input_pos = self.input_pos.copy_(session.input_pos) + + session.attn_mask = self.attn_mask + + for cache, cache_ in zip(self.kv_cache, session.kv_cache): + cache.sync_cache(cache_) + + def capture_new_graph(self, session: T2SSession): + session.xy_pos_ = self.xy_pos.clone() + session.xy_dec_ = self.xy_dec.clone() + session.input_pos = self.input_pos.clone().copy_(session.input_pos) + + session.attn_mask = self.attn_mask.clone().copy_(session.attn_mask) + + args, kwds = self.decoder.pre_forward(session) + graph = self.decoder.capture(self.input_pos, self.xy_pos, self.xy_dec, self.kv_cache, *args, **kwds) + session.graph = graph + session.stream = torch.cuda.Stream() # type: ignore diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py new file mode 100644 index 00000000..05db87ee --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py @@ -0,0 +1,175 @@ +import sageattention # type: ignore +import torch + +from .. import nn +from ..structs import T2SSession +from ..t2s_model_abc import ( + AttentionABC, + CUDAGraphCacheABC, + FeedForward, + KVCacheHND, + KVCacheProtocol, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Tensor = torch.Tensor + + +class Attention(AttentionABC): + def __init__(self, n_head, hidden_dim, max_seq_length): + super().__init__(n_head, hidden_dim, max_seq_length) + + # key, query, value projections for all heads, but in a batch + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def __call__( + self, + x: Tensor, + input_pos: Tensor, + kv_cache: KVCacheProtocol, + cu_seqlens_q: Tensor, + cu_seqlens_kv: Tensor, + ) -> Tensor: + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x).chunk(3, dim=-1) + + q = q.view(bsz, seqlen, self.n_head, self.head_dim) + k = k.view(bsz, seqlen, self.n_head, self.head_dim) + v = v.view(bsz, seqlen, self.n_head, self.head_dim) + + q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v)) + + k, v = kv_cache.update(input_pos, k, v) + + attn: Tensor = sageattention.sageattn_varlen( + q, + k, + v, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + max_seqlen_q=1, + max_seqlen_k=self.max_seq_length, + ) + + attn = attn.transpose(1, 2).contiguous().view(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head, ffn_dim, hidden_dim, max_seq_length) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm([self.hidden_dim]) + self.ffn_norm = nn.LayerNorm([self.hidden_dim]) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) -> None: + super().__init__(hidden_dim, n_layer, n_head, ffn_dim, vocab_size, max_seq_length, max_batch_size) + + self.layers = nn.ModuleList( # type: ignore + TransformerBlock(n_head, ffn_dim, hidden_dim, max_seq_length) for _ in range(n_layer) + ) + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config, + max_seq_length=1800, + max_batch_size=10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND + + def pre_forward(self, session: T2SSession) -> tuple[list[Tensor], dict[str, Tensor]]: + return list(), dict(cu_seqlens_q=session.cu_seqlens_q, cu_seqlens_kv=session.cu_seqlens_kv) + + def post_forward(self, idx: int, session: T2SSession): + if idx == 0: + session.cu_seqlens_q = torch.arange(0, session.bsz + 1, dtype=torch.int32) + session.cu_seqlens_kv = torch.cat([torch.tensor(0, dtype=torch.int32), session.input_pos]) + else: + cu_seqlens_q = session.cu_seqlens_q + cu_seqlens_kv = session.cu_seqlens_kv + cu_seqlens_kv.add_(cu_seqlens_q) + + +class CUDAGraphCache(CUDAGraphCacheABC): + def __init__( + self, + decoder: T2SDecoder, + ) -> None: + self.is_applicable = False + super().__init__(decoder) + + if torch.cuda.is_available(): + self.cu_seqlens_q = torch.arange(0, decoder.max_batch_size + 1, dtype=torch.int32).to(self.device) + self.cu_seqlens_kv = torch.cat([torch.tensor(0, dtype=torch.int32), self.input_pos]).to(self.device) + + def release_graph(self, session: T2SSession): + if session.id == self.id: + self.assigned = False + else: + del ( + session.graph, + session.xy_pos_, + session.xy_dec_, + session.input_pos, + session.kv_cache, + session.cu_seqlens_q, + session.cu_seqlens_kv, + ) + + def get_cache_graph(self, session: T2SSession): + assert self.graph + session.graph = self.graph + session.stream = self.stream + + session.xy_pos_ = self.xy_pos + session.xy_dec_ = self.xy_dec + session.input_pos = self.input_pos.copy_(session.input_pos) + + session.cu_seqlens_q = self.cu_seqlens_q + session.cu_seqlens_kv = self.cu_seqlens_kv + + for cache, cache_ in zip(self.kv_cache, session.kv_cache): + cache.sync_cache(cache_) + + def capture_new_graph(self, session: T2SSession): + session.xy_pos_ = self.xy_pos.clone() + session.xy_dec_ = self.xy_dec.clone() + session.input_pos = self.input_pos.clone().copy_(session.input_pos) + + session.cu_seqlens_q = self.cu_seqlens_q.clone().copy_(session.cu_seqlens_q) + session.cu_seqlens_kv = self.cu_seqlens_kv.clone().copy_(session.cu_seqlens_kv) + + args, kwds = self.decoder.pre_forward(session) + graph = self.decoder.capture(self.input_pos, self.xy_pos, self.xy_dec, self.kv_cache, *args, **kwds) + session.graph = graph + session.stream = torch.cuda.Stream() # type: ignore diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py new file mode 100644 index 00000000..f9ac2cd5 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py @@ -0,0 +1,166 @@ +import torch +from torch.nn import functional as F + +from .. import nn +from ..structs import KVCacheProtocol, T2SSession +from ..t2s_model_abc import ( + AttentionABC, + CUDAGraphCacheABC, + FeedForward, + KVCacheHND, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Tensor = torch.Tensor + + +class Attention(AttentionABC): + def __init__(self, n_head, hidden_dim, max_seq_length): + super().__init__(n_head, hidden_dim, max_seq_length) + + # key, query, value projections for all heads, but in a batch + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, attn_mask: Tensor): + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x).chunk(3, dim=-1) + + q = q.view(bsz, seqlen, self.n_head, self.head_dim) + k = k.view(bsz, seqlen, self.n_head, self.head_dim) + v = v.view(bsz, seqlen, self.n_head, self.head_dim) + + q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v)) + + k, v = kv_cache.update(input_pos, k, v) + + attn = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask) + + attn = attn.transpose(1, 2).contiguous().view(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm([self.hidden_dim]) + self.ffn_norm = nn.LayerNorm([self.hidden_dim]) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) -> None: + super().__init__(hidden_dim, n_layer, n_head, ffn_dim, vocab_size, max_seq_length, max_batch_size) + + self.layers = nn.ModuleList( # type: ignore + TransformerBlock(n_head, ffn_dim, hidden_dim, max_seq_length) for _ in range(n_layer) + ) + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config, + max_seq_length=1800, + max_batch_size=10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHND + + def pre_forward(self, session: T2SSession): + attn_mask = session.attn_mask + return list(), dict(attn_mask=attn_mask) + + def post_forward(self, idx: int, session: T2SSession) -> None: + if idx == 0: + prefill_len = session.prefill_len + bsz = session.bsz + + range_tensor = torch.arange(self.max_seq_length).view(1, 1, 1, self.max_seq_length) + prefill_len_expanded = prefill_len.view(bsz, 1, 1, 1) + attn_mask = range_tensor < prefill_len_expanded + attn_mask = attn_mask.expand(-1, self.n_head, -1, -1) + + session.attn_mask = attn_mask + + attn_mask = session.attn_mask + input_pos = session.input_pos + attn_mask[torch.arange(session.bsz), :, :, input_pos] = True + + +class CUDAGraphCache(CUDAGraphCacheABC): + def __init__( + self, + decoder, + ) -> None: + self.is_applicable = True + super().__init__(decoder) + if torch.cuda.is_available(): + self.attn_mask = ( + torch.randint(0, 2, (decoder.max_batch_size, decoder.n_head, 1, decoder.max_seq_length)) + .bool() + .to(self.device, self.dtype) + ) + + def release_graph(self, session: T2SSession): + if session.id == self.id: + self.assigned = False + else: + del ( + session.graph, + session.xy_pos_, + session.xy_dec_, + session.input_pos, + session.kv_cache, + session.attn_mask, + ) + + def get_cache_graph(self, session: T2SSession): + assert self.graph + session.graph = self.graph + session.stream = self.stream + + session.xy_pos_ = self.xy_pos + session.xy_dec_ = self.xy_dec + session.input_pos = self.input_pos.copy_(session.input_pos) + + session.attn_mask = self.attn_mask + + for cache, cache_ in zip(self.kv_cache, session.kv_cache): + cache.sync_cache(cache_) + + def capture_new_graph(self, session: T2SSession): + session.xy_pos_ = self.xy_pos.clone() + session.xy_dec_ = self.xy_dec.clone() + session.input_pos = self.input_pos.clone().copy_(session.input_pos) + + session.attn_mask = self.attn_mask.clone().copy_(session.attn_mask) + + args, kwds = self.decoder.pre_forward(session) + graph = self.decoder.capture(self.input_pos, self.xy_pos, self.xy_dec, self.kv_cache, *args, **kwds) + session.graph = graph + session.stream = torch.cuda.Stream() # type: ignore diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py new file mode 100644 index 00000000..a0d1be61 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py @@ -0,0 +1,145 @@ +from typing import NoReturn + +import torch +from torch.nn import functional as F + +from .. import nn +from ..structs import KVCacheProtocol, T2SSession +from ..t2s_model_abc import ( + AttentionABC, + CUDAGraphCacheABC, + FeedForward, + KVCacheHNDVarlen, + T2SDecoderABC, + TransformerBlockABC, + TransformerDecoderABC, +) + +Tensor = torch.Tensor + + +class Attention(AttentionABC): + def __init__(self, n_head, hidden_dim, max_seq_length): + super().__init__(n_head, hidden_dim, max_seq_length) + + # key, query, value projections for all heads, but in a batch + self.in_proj = nn.Linear(hidden_dim, hidden_dim * 3, bias=True) + self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, attn_mask: Tensor): + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x).chunk(3, dim=-1) + + q = q.view(bsz, seqlen, self.n_head, self.head_dim) + k = k.view(bsz, seqlen, self.n_head, self.head_dim) + v = v.view(bsz, seqlen, self.n_head, self.head_dim) + + q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v)) + + k, v = kv_cache.update(input_pos, k, v) + + max_idx = input_pos.max() + + q, k, v = map(lambda x: x[..., :max_idx, :], (q, k, v)) + + mask = attn_mask[..., :max_idx] + + attn = F.scaled_dot_product_attention(q, k, v, mask) + + attn = attn.transpose(1, 2).contiguous().view(bsz, seqlen, self.hidden_dim) + + attn = self.out_proj(attn) + + return attn + + +class TransformerBlock(TransformerBlockABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__(n_head, ffn_dim, hidden_dim, max_seq_length) + + self.attention = Attention(n_head, hidden_dim, max_seq_length) + self.feed_forward = FeedForward(hidden_dim, ffn_dim) + self.attention_norm = nn.LayerNorm([self.hidden_dim]) + self.ffn_norm = nn.LayerNorm([self.hidden_dim]) + + +class TransformerDecoder(TransformerDecoderABC): + def __init__( + self, + hidden_dim, + n_layer, + n_head, + ffn_dim, + vocab_size, + max_seq_length, + max_batch_size, + ) -> None: + super().__init__(hidden_dim, n_layer, n_head, ffn_dim, vocab_size, max_seq_length, max_batch_size) + + self.layers = nn.ModuleList( # type: ignore + TransformerBlock(n_head, ffn_dim, hidden_dim, max_seq_length) for _ in range(n_layer) + ) + + +class T2SDecoder(T2SDecoderABC): + def __init__( + self, + config, + max_seq_length=1800, + max_batch_size=10, + ) -> None: + super().__init__(config, max_seq_length, max_batch_size) + + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_predict_layer = nn.Linear(self.hidden_dim, self.vocab_size, bias=False) + self.h: TransformerDecoderABC = TransformerDecoder( + self.hidden_dim, self.n_layer, self.n_head, self.ffn_dim, self.vocab_size, max_seq_length, max_batch_size + ) + + self.kv_class = KVCacheHNDVarlen + + def capture( + self, + *args, + **kwds, + ) -> NoReturn: + raise NotImplementedError("Cuda Graph Is Not Supported For Varlen Model") + + def pre_forward(self, session: T2SSession): + attn_mask = session.attn_mask + return list(), dict(attn_mask=attn_mask) + + def post_forward(self, idx: int, session: T2SSession) -> None: + if idx == 0: + prefill_len = session.prefill_len + bsz = session.bsz + + range_tensor = torch.arange(self.max_seq_length).view(1, 1, 1, self.max_seq_length) + prefill_len_expanded = prefill_len.view(bsz, 1, 1, 1) + attn_mask = range_tensor < prefill_len_expanded + attn_mask = attn_mask.expand(-1, self.n_head, -1, -1) + + session.attn_mask = attn_mask + + attn_mask = session.attn_mask + input_pos = session.input_pos + attn_mask[torch.arange(session.bsz), :, :, input_pos] = True + + +class CUDAGraphCache(CUDAGraphCacheABC): + def __init__( + self, + decoder, + ) -> None: + self.is_applicable = False + super().__init__(decoder) + + def release_graph(self, session: T2SSession): + raise NotImplementedError("Cuda Graph Is Not Supported For Varlen Model") + + def get_cache_graph(self, session: T2SSession): + raise NotImplementedError("Cuda Graph Is Not Supported For Varlen Model") + + def capture_new_graph(self, session: T2SSession): + raise NotImplementedError("Cuda Graph Is Not Supported For Varlen Model") diff --git a/GPT_SoVITS/Accelerate/PyTorch/nn.py b/GPT_SoVITS/Accelerate/PyTorch/nn.py new file mode 100644 index 00000000..1e69e056 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/nn.py @@ -0,0 +1,69 @@ +""" +Enhanced Type Hint nn.Module +Modified From https://github.com/labmlai/labml/blob/master/helpers/labml_helpers/module.py +""" + +from typing import Any + +import torch.nn +from torch.nn import ( + functional as functional, +) +from torch.nn import ( + utils as utils, +) +from torch.nn.modules import * # type: ignore # noqa: F403 +from torch.nn.parameter import ( + Parameter as Parameter, +) + +Tensor = torch.Tensor + + +class Module(torch.nn.Module): + r""" + Wraps ``torch.nn.Module`` to overload ``__call__`` instead of + ``forward`` for better type checking. + + `PyTorch Github issue for clarification `_ + """ + + def _forward_unimplemented(self, *input: Any) -> None: + # To stop PyTorch from giving abstract methods warning + pass + + def __init_subclass__(cls, **kwargs): + if cls.__dict__.get("__call__", None) is None: + return + + setattr(cls, "forward", cls.__dict__["__call__"]) + delattr(cls, "__call__") + + @property + def device(self) -> torch.device: + params = self.parameters() + try: + sample_param = next(params) + return sample_param.device + except StopIteration: + raise RuntimeError(f"Unable to determine device of {self.__class__.__name__}") from None + + +class Linear(torch.nn.Linear): + def __call__(self, input: Tensor) -> Tensor: + return super().__call__(input) + + +class Dropout(torch.nn.Dropout): + def __call__(self, input: Tensor) -> Tensor: + return super().__call__(input) + + +class Embedding(torch.nn.Embedding): + def __call__(self, input: Tensor) -> Tensor: + return super().__call__(input) + + +class LayerNorm(torch.nn.LayerNorm): + def __call__(self, input: Tensor) -> Tensor: + return super().__call__(input) diff --git a/GPT_SoVITS/Accelerate/PyTorch/sample_funcs.py b/GPT_SoVITS/Accelerate/PyTorch/sample_funcs.py new file mode 100644 index 00000000..0b9eec0c --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/sample_funcs.py @@ -0,0 +1,67 @@ +from typing import Protocol + +import torch +import torch.nn.functional as F + +Tensor = torch.Tensor + + +class SampleProtocol(Protocol): + @staticmethod + def __call__( + logits: Tensor, + previous_tokens: Tensor, + temperature: float, + top_k: int, + top_p: float, + repetition_penalty: float, + ) -> Tensor: ... + + +class sample_naive(SampleProtocol): + @staticmethod + def __call__( + logits: Tensor, + previous_tokens: Tensor, + temperature: float, + top_k: int, + top_p: float, + repetition_penalty: float, + ): + if temperature <= 1e-5: + probs = F.softmax(logits, dim=-1) + return torch.argmax(probs, dim=-1, keepdim=True).to(dtype=torch.int32) + + if repetition_penalty != 1.0: + previous_tokens = previous_tokens.long() + score = torch.gather(logits, dim=1, index=previous_tokens) + score = torch.where( + score < 0, + score * repetition_penalty, + score / repetition_penalty, + ) + logits.scatter_(dim=1, index=previous_tokens, src=score) + + if top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + cum_probs[cum_probs > 1] = 1 + sorted_indices_to_remove = cum_probs > top_p + sorted_indices_to_remove[:, 0] = False # keep at least one option + indices_to_remove = sorted_indices_to_remove.scatter( + dim=1, index=sorted_indices, src=sorted_indices_to_remove + ) + logits = logits.masked_fill(indices_to_remove, -float("Inf")) + + if temperature < 1.0: + logits /= temperature + + v, _ = torch.topk(logits, top_k) + pivot = v[:, -1].unsqueeze(-1) + logits = torch.where(logits < pivot, -float("Inf"), logits) + + probs = F.softmax(logits, dim=-1) + q = -torch.log(torch.rand_like(probs)) + idx_next = torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int32) + + return idx_next diff --git a/GPT_SoVITS/Accelerate/PyTorch/structs.py b/GPT_SoVITS/Accelerate/PyTorch/structs.py new file mode 100644 index 00000000..1822acdc --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/structs.py @@ -0,0 +1,151 @@ +""" +Modified From https://github.com/XXXXRT666/GPT-SoVITS +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Literal, MutableSequence, Optional, Protocol + +import torch + +from .sample_funcs import SampleProtocol, sample_naive + +Tensor = torch.Tensor + + +@dataclass +class T2SResult: + result: list[Tensor] | None = None + infer_speed: tuple[float, float] = (0.0, 0.0) + status: Literal["Success", "Error"] = "Success" + exception: Optional[Exception] = None + traceback: Optional[str] = None + + +@dataclass +class T2SRequest: + x: list[torch.Tensor] + x_lens: Tensor + prompts: torch.Tensor + bert_feature: list[Tensor] + valid_length: int + top_k: int = 5 + top_p: float = 1 + early_stop_num: int = -1 + temperature: float = 1.0 + repetition_penalty: float = 1.35 + use_cuda_graph: bool = False + debug: bool = False + + +class KVCacheProtocol(Protocol): + k_cache: Tensor + v_cache: Tensor + + def __init__(self, batch_size: int, max_seq_length: int, n_heads: int, head_dim: int) -> None: ... + + def empty(self) -> None: ... + + def update(self, input_pos: Tensor, k_val: Tensor, v_val: Tensor, *args, **kwds) -> tuple[Tensor, Tensor]: ... + + def prefill_kv(self, k_val: Tensor, v_val: Tensor) -> None: ... + + def sync_cache(self, kv_cache: KVCacheProtocol) -> None: ... + + +class T2SDecoderProtocol(Protocol): + max_seq_length: int + EOS: int + n_head: int + + @property + def device(self) -> torch.device: ... + + def embed(self, x: list[Tensor], y: Tensor, bert_features: list[Tensor]) -> Tensor: ... + + +class T2SEngineProtocol(Protocol): + def _handle_request(self, request: T2SRequest) -> tuple[list[Tensor], float, float]: ... + + def generate(self, request: T2SRequest) -> T2SResult: ... + + +class T2SSession: + def __init__( + self, + decoder: T2SDecoderProtocol, + request: T2SRequest, + sapmle_func: type[SampleProtocol] = sample_naive, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, + ): + with device: + self.decoder = decoder + self.request = request + self.device = device + self.dtype = dtype + + bsz = len(request.x) + y_len = request.prompts.size(-1) + self.bsz = bsz + self.y_len = y_len + request.prompts = request.prompts.to(device, torch.int32) + + # Cache + self.kv_cache: MutableSequence[KVCacheProtocol] + self.sample = sapmle_func() + + # Forward args + self.x = [i.to(device) for i in request.x] + self.x_lens = request.x_lens.to(torch.int32) + self.y = torch.zeros((bsz, decoder.max_seq_length)).to(torch.int32) + self.y[:, : request.prompts.shape[-1]] = request.prompts + self.bert_feature = [i.to(device, dtype) for i in request.bert_feature] + + self.prefill_len = self.x_lens + request.prompts.size(1) + + self.input_pos = torch.zeros_like(self.prefill_len) + self.input_pos.add_(self.prefill_len) + + # CUDA Graph + self.stream: Optional[torch.cuda.Stream] = None + self.graph: Optional[torch.cuda.CUDAGraph] = None + self.xy_pos_: Tensor + self.xy_dec_: Tensor + + # EOS + self.completed = torch.Tensor([False] * len(self.x)).bool().to(device) + self.y_results: list[Tensor] = [None] * len(self.x) # type: ignore + + self.xy_pos = decoder.embed(self.x, request.prompts, self.bert_feature) + + max_len = int(self.prefill_len.max().item()) + attn_mask = torch.zeros(size=(bsz, max_len, max_len), dtype=torch.bool) + + for bs in range(bsz): + pos = int(self.x_lens[bs]) + seq_len = pos + y_len + + attn_mask[bs, :seq_len, :pos] = True + + ar_mask = ~torch.triu( + input=torch.ones( + size=( + y_len, + y_len, + ), + dtype=torch.bool, + ), + diagonal=1, + ) + attn_mask[bs, pos:seq_len, pos:seq_len] = ar_mask + + self.attn_mask = attn_mask + self.attn_mask = attn_mask.unsqueeze(0).expand(-1, decoder.n_head, -1, -1) + + self.id: int = -1 + + # Sage Attn & Transformer Engine Impl + self.cu_seqlens_q: Tensor + self.cu_seqlens_kv: Tensor diff --git a/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py b/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py new file mode 100644 index 00000000..47961c05 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py @@ -0,0 +1,223 @@ +import contextlib +import gc +import os +import sys +import time +import traceback +from importlib import import_module + +import torch +from rich.progress import BarColumn, Progress, TextColumn + +from ..logger import SpeedColumnToken, console, logger +from .structs import T2SEngineProtocol, T2SRequest, T2SResult, T2SSession +from .t2s_model_abc import ( + CUDAGraphCacheABC, + T2SDecoderABC, + TorchProfiler, +) + + +class T2SEngine(T2SEngineProtocol): + def __init__( + self, + decoder_model: T2SDecoderABC, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, + ) -> None: + assert device.type in {"cpu", "cuda", "mps", "xpu", "mtia"} + assert dtype in {torch.float16, torch.bfloat16, torch.float32} + + self.device = device + self.dtype = dtype + + self.decoder_model: T2SDecoderABC = decoder_model.to(self.device, self.dtype) + + self.graphcache: CUDAGraphCacheABC = self.init_cache() + + def _handle_request(self, request: T2SRequest): + with self.device: + decoder = self.decoder_model + session = T2SSession(decoder, request, device=self.device, dtype=self.dtype) + batch_idx = torch.arange(session.bsz) + + t1 = 0.0 + infer_speed = 0.0 + infer_time = 0.0 + + torch_profiler = TorchProfiler(request.debug) + with ( + torch_profiler.profiler(), + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total} tokens"), + SpeedColumnToken(show_speed=True), + console=console, + transient=True, + ) as progress, + ): + max_token = int(min(1800 - session.input_pos.max(), 1500)) + task = progress.add_task("T2S Decoding", total=max_token) + + for idx in range(max_token): + progress.update(task, advance=1) + if idx == 0: + session.kv_cache = decoder.init_cache(session.bsz) + xy_dec = decoder.h.prefill(session.xy_pos, session.kv_cache, session.attn_mask) + xy_dec = xy_dec[None, batch_idx, session.input_pos - 1] + else: + if ( + request.use_cuda_graph + and session.graph is None + and self.graphcache.is_applicable + and torch.cuda.is_available() + ): + self.graphcache.assign_graph(session) + + with torch_profiler.record("AR"): + if session.graph: + assert session.stream + session.stream.wait_stream(torch.cuda.default_stream()) + with torch.cuda.stream(session.stream): + session.xy_pos_.copy_(session.xy_pos) + session.graph.replay() + xy_dec = session.xy_dec_.clone() + else: + args, kwds = decoder.pre_forward(session) + xy_dec = decoder.h( + session.input_pos, + session.xy_pos, + session.kv_cache, + *args, + **kwds, + ) + + with torch.cuda.stream(session.stream) if session.stream is not None else contextlib.nullcontext(): + decoder.post_forward(idx, session) + logits = decoder.ar_predict_layer(xy_dec[:, -1]) + + if idx == 0: + logits[:, -1] = float("-inf") + + with torch_profiler.record("Sampling"): + samples = session.sample( + logits=logits, + previous_tokens=session.y[:, : session.y_len + idx], + top_k=request.top_k, + top_p=request.top_p, + repetition_penalty=request.repetition_penalty, + temperature=request.temperature, + ) + session.y[batch_idx, session.y_len + idx] = samples + session.input_pos.add_(1) + + with torch_profiler.record("EOS"): + argmax_token = torch.argmax(logits, dim=-1) + sample_token = samples.squeeze(1) + EOS_mask = (argmax_token == decoder.EOS) | (sample_token == decoder.EOS) + + newly_done_mask = EOS_mask & (~session.completed) + newly_done_indices = newly_done_mask.nonzero() + + if newly_done_indices.numel() > 0: + for i in newly_done_indices: + session.y_results[i] = session.y[i, session.y_len : session.y_len + idx] + session.completed[newly_done_indices] = True + + if torch.all(session.completed).item(): + if session.y[:, session.y_len :].sum() == 0: + session.y_results = [torch.tensor(0) for _ in range(session.bsz)] + logger.error("Bad Zero Prediction") + else: + logger.info( + f"T2S Decoding EOS {session.prefill_len.tolist().__str__().strip('[]')} -> {[i.size(-1) for i in session.y_results].__str__().strip('[]')}" + ) + logger.info(f"Infer Speed: {(idx - 1) / (time.perf_counter() - t1):.2f} token/s") + infer_time = time.perf_counter() - t1 + infer_speed = (idx - 1) / infer_time + break + + if (request.early_stop_num != -1 and idx >= request.early_stop_num) or idx == max_token - 1: + for i in range(session.bsz): + if not session.completed[i].item(): + session.y_results[i] = session.y[i, session.y_len : session.y_len + 1499] + session.completed[i] = True + logger.error("Bad Full Prediction") + break + + with torch_profiler.record("NextPos"): + y_emb = decoder.ar_audio_embedding(samples) + session.xy_pos = decoder.ar_audio_position(session.input_pos - session.x_lens, y_emb) + + if idx == 1: + torch_profiler.start() + t1 = time.perf_counter() + + if idx == 51: + torch_profiler.end() + + if idx % 100 == 0: + match session.device.type: + case "cuda": + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() + case "mtia": + torch.mtia.empty_cache() + + match session.device.type: + case "cuda": + if session.stream is not None: + torch.cuda.current_stream().wait_stream(session.stream) + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() + case "mtia": + torch.mtia.empty_cache() + case "cpu": + gc.collect() + + torch_profiler.end() + if request.use_cuda_graph and torch.cuda.is_available(): + self.graphcache.release_graph(session) + + return session.y_results[: request.valid_length], infer_speed, infer_time + + def generate(self, request: T2SRequest): + try: + result, infer_speed, infer_time = self._handle_request(request) + t2s_result = T2SResult(result=result, infer_speed=(infer_speed, infer_time), status="Success") + except Exception as e: + t2s_result = T2SResult(status="Error", exception=e, traceback=traceback.format_exc()) + return t2s_result + + @staticmethod + def load_decoder(weights_path: os.PathLike, max_batch_size: int = 1, backend: str = "Flash-Attn-Varlen-CUDAGraph"): + logger.info(f"Loading Text2Semantic Weights from {weights_path} with {backend} Backend") + module_path = f".backends.{backend.lower().replace('-', '_').replace('cudagraph', 'cuda_graph')}" + decoder_cls_name = "T2SDecoder" + decoder_mod = import_module(module_path, package=__package__) + decoder_cls: type[T2SDecoderABC] = getattr(decoder_mod, decoder_cls_name) + dict_s1 = torch.load(weights_path, map_location="cpu", weights_only=False, mmap=True) + config = dict_s1["config"] + decoder: T2SDecoderABC = decoder_cls(config, max_batch_size=max_batch_size) + state_dict = dict_s1["weight"] + decoder.load_state_dict(state_dict) + + return decoder.eval() + + def init_cache(self): + assert self.decoder_model + + module_name = self.decoder_model.__class__.__module__ + module = sys.modules.get(module_name) + assert module + + target_class: type[CUDAGraphCacheABC] = getattr(module, "CUDAGraphCache") + + return target_class(self.decoder_model) diff --git a/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py b/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py new file mode 100644 index 00000000..56032753 --- /dev/null +++ b/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py @@ -0,0 +1,671 @@ +""" +Modified From https://github.com/XXXXRT666/GPT-SoVITS +""" + +from __future__ import annotations + +import math +import os +import random +from abc import ABC, abstractmethod +from contextlib import nullcontext +from typing import MutableSequence + +import torch +import torch._inductor.config +import torch.nn.functional as F +from torch.cuda.graphs import CUDAGraph +from torch.profiler import ProfilerAction, tensorboard_trace_handler + +from . import nn +from .structs import KVCacheProtocol, T2SDecoderProtocol, T2SSession + +Tensor = torch.Tensor + + +class TokenEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + vocab_size: int, + ): + super().__init__() + + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + + self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim) + + @property + def weight(self) -> Tensor: + return self.word_embeddings.weight + + def embedding(self, index: int) -> Tensor: + return self.word_embeddings.weight[index : index + 1] + + def __call__(self, x: Tensor): + x = self.word_embeddings(x) + return x + + +class SinePositionalEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + scale: bool = False, + alpha: bool = False, + max_batch_size: int = 10, + max_seq_len: int = 1800, + ): + super().__init__() + self.embedding_dim = embedding_dim + self.x_scale = math.sqrt(embedding_dim) if scale else 1.0 + self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) + self.max_batch_size = max_batch_size + self.max_seq_len = max_seq_len + + self.reverse = False + self.register_buffer("pe", torch.zeros(max_batch_size, max_seq_len, embedding_dim), persistent=False) + self.pe: torch.Tensor + self.compute_pe() + + def compute_pe(self): + """Reset the positional encodings.""" + if self.reverse: + position = torch.arange(self.max_seq_len - 1, -1, -1.0, dtype=torch.float32).unsqueeze(1) + else: + position = torch.arange(self.max_seq_len, dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.embedding_dim, 2, dtype=torch.float32) * -(math.log(10000.0) / self.embedding_dim) + ) + pe = self.pe + pe[:, :, 0::2] = torch.sin(position * div_term) + pe[:, :, 1::2] = torch.cos(position * div_term) + + def __call__(self, input_pos: Tensor, x: Tensor) -> Tensor: + """ + Args: + input_pos (Tensor): [batch_size, ] + x (Tensor): [batch_size, 1, embed_dim] + + Returns: + embedded_x (Tensor): [batch_size, 1, embed_dim] + """ + + batch_size = x.shape[0] + pe_values = self.pe[torch.arange(batch_size), input_pos - 1] # (batch_size, embed_dim) + + return x * self.x_scale + self.alpha * pe_values.unsqueeze(1) # (batch_size, 1, embed_dim) + + def prefill(self, x: Tensor) -> Tensor: + """ + Args: + x (Tensor): [batch_size, seq_len, embed_dim] + + Returns: + embedded_x (Tensor): [batch_size, seq_len, embed_dim] + """ + + pe_values = self.pe[:, : x.shape[-2]] + return x * self.x_scale + self.alpha.item() * pe_values + + +class KVCacheABC(nn.Module, ABC, KVCacheProtocol): + def __init__(self, batch_size: int, max_seq_length: int, n_heads: int, head_dim: int) -> None: + super().__init__() + + self.n_head = n_heads + self.head_dim = head_dim + self.batch_size = batch_size + self.max_seq_length = max_seq_length + + self.k_cache: Tensor + self.v_cache: Tensor + + def empty(self): + self.k_cache.zero_() + self.v_cache.zero_() + + @abstractmethod + def update(self, input_pos: Tensor, k_val: Tensor, v_val: Tensor, *args, **kwds) -> tuple[Tensor, Tensor]: ... + + @abstractmethod + def prefill_kv(self, k_val: Tensor, v_val: Tensor) -> None: ... + + def sync_cache(self, kv_cache: KVCacheProtocol): + self.k_cache.copy_(kv_cache.k_cache) + self.v_cache.copy_(kv_cache.v_cache) + + +class KVCacheNHD(KVCacheABC): + def __init__(self, batch_size, max_seq_length, n_heads, head_dim): + super().__init__(batch_size, max_seq_length, n_heads, head_dim) + + assert batch_size > 0 + cache_shape = (batch_size, max_seq_length, n_heads, head_dim) + + self.register_buffer("k_cache", torch.zeros(size=cache_shape), persistent=False) + self.register_buffer("v_cache", torch.zeros(size=cache_shape), persistent=False) + + def update(self, input_pos: Tensor, k_val: Tensor, v_val: Tensor): + # input_pos: [B, ], k_val: [B, 1, H, D] + + index = ( + (input_pos - 1) + .unsqueeze(-1) + .unsqueeze(-1) + .unsqueeze(-1) + .expand( + -1, + -1, + self.n_head, + self.head_dim, + ) + .to(torch.int64) + ) # (bs, 1, num_head, head_dim) + + k_out = self.k_cache + v_out = self.v_cache + k_out.scatter_(1, index, k_val) + v_out.scatter_(1, index, v_val) + + return k_out, v_out + + def empty(self): + self.k_cache.zero_() + self.v_cache.zero_() + + def prefill_kv(self, k_val: Tensor, v_val: Tensor): + # input_pos: int, k_val: [B, S, H, D] + + self.k_cache[:, : k_val.shape[1]] = k_val + self.v_cache[:, : v_val.shape[1]] = v_val + + +class KVCacheHND(KVCacheABC): + def __init__(self, batch_size, max_seq_length, n_heads, head_dim): + super().__init__(batch_size, max_seq_length, n_heads, head_dim) + + cache_shape = (batch_size, n_heads, max_seq_length, head_dim) + + self.register_buffer("k_cache", torch.zeros(size=cache_shape), persistent=False) + self.register_buffer("v_cache", torch.zeros(size=cache_shape), persistent=False) + + def update(self, input_pos: Tensor, k_val: Tensor, v_val: Tensor): + # input_pos: [B, ], k_val: [B, H, 1, D] + + index = ( + (input_pos - 1) + .unsqueeze(-1) + .unsqueeze(-1) + .unsqueeze(-1) + .expand( + -1, + self.n_head, + -1, + self.head_dim, + ) + .to(torch.int64) + ) # (bs, num_head, 1, head_dim) + + k_out = self.k_cache + v_out = self.v_cache + k_out.scatter_(2, index, k_val) + v_out.scatter_(2, index, v_val) + + return k_out, v_out + + def empty(self): + self.k_cache.zero_() + self.v_cache.zero_() + + def prefill_kv(self, k_val: Tensor, v_val: Tensor): + # input_pos: int, k_val: [B, S, H, D] + + self.k_cache[..., : k_val.shape[1], :] = k_val.transpose(1, 2) + self.v_cache[..., : v_val.shape[1], :] = v_val.transpose(1, 2) + + +class KVCacheHNDVarlen(KVCacheABC): + def __init__(self, batch_size, max_seq_length, n_heads, head_dim): + super().__init__(batch_size, max_seq_length, n_heads, head_dim) + + cache_shape = (batch_size, n_heads, max_seq_length, head_dim) + self.cache_idx: Tensor + + self.register_buffer("cache_idx", torch.arange(batch_size), persistent=False) + self.register_buffer("k_cache", torch.zeros(size=cache_shape), persistent=False) + self.register_buffer("v_cache", torch.zeros(size=cache_shape), persistent=False) + + def update(self, input_pos: Tensor, k_val: Tensor, v_val: Tensor): + # input_pos: [B, ], k_val: [B, H, 1, D] + + k_out = self.k_cache + v_out = self.v_cache + + ip0 = input_pos - 1 + + k_out[self.cache_idx, :, ip0, None] = k_val + v_out[self.cache_idx, :, ip0, None] = v_val + + return k_out, v_out + + def empty(self): + self.k_cache.zero_() + self.v_cache.zero_() + + def prefill_kv(self, k_val: Tensor, v_val: Tensor): + # input_pos: int, k_val: [B, S, H, D] + + self.k_cache[..., : k_val.shape[1], :] = k_val.transpose(1, 2) + self.v_cache[..., : v_val.shape[1], :] = v_val.transpose(1, 2) + + +class AttentionABC(nn.Module, ABC): + def __init__(self, n_head: int, hidden_dim: int, max_seq_length: int): + super().__init__() + + self.n_head = n_head + self.hidden_dim = hidden_dim + assert hidden_dim % n_head == 0 + self.head_dim = hidden_dim // n_head + + self.max_seq_length = max_seq_length + + # key, query, value projections for all heads, but in a batch + self.in_proj: nn.Linear + self.out_proj: nn.Linear + + self._register_load_state_dict_pre_hook(self.load_hook) + + def load_hook(self, state_dict: dict[str, Tensor], prefix, *args): + keys_to_modify = [key for key in state_dict if "in_proj_" in key] + for key in keys_to_modify: + new_key = key.replace("in_proj_", "in_proj.") # in_proj_ -> in_proj. + state_dict[new_key] = state_dict.pop(key) + + @abstractmethod + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, *args, **kwds) -> Tensor: ... + + def prefill(self, x: Tensor, kv_cache: KVCacheProtocol, attn_mask: Tensor) -> Tensor: + bsz, seqlen, _ = x.shape + + q, k, v = self.in_proj(x.unsqueeze(0)).chunk(3, dim=-1) + + q, k, v = map(lambda x: x.contiguous().view(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) + + kv_cache.prefill_kv(k, v) + + q, k, v = map(lambda x: x.transpose(1, 2), (q, k, v)) + + attn = F.scaled_dot_product_attention(q, k, v, attn_mask) + + attn = attn.transpose(1, 2).contiguous().view(1, -1, self.hidden_dim) + + output = self.out_proj(attn) + + return output + + +class FeedForward(nn.Module): + def __init__(self, dim: int, hidden_dim: int) -> None: + super().__init__() + + self.linear1 = nn.Linear(dim, hidden_dim, bias=True) + self.linear2 = nn.Linear(hidden_dim, dim, bias=True) + + def __call__(self, x: Tensor): + return self.linear2(F.relu(self.linear1(x), inplace=True)) + + +class TransformerBlockABC(nn.Module, ABC): + def __init__(self, n_head: int, ffn_dim: int, hidden_dim: int, max_seq_length: int) -> None: + super().__init__() + + self.hidden_dim = hidden_dim + self.max_seq_length = max_seq_length + + self.attention: AttentionABC + self.feed_forward: FeedForward + self.attention_norm: nn.LayerNorm + self.ffn_norm: nn.LayerNorm + + self._register_load_state_dict_pre_hook(self.load_hook) + + def load_hook(self, state_dict: dict[str, Tensor], prefix, *args): + for key in list(state_dict.keys()): + new_key = ( + key.replace("self_attn", "attention") + .replace("linear", "feed_forward.linear") + .replace("norm1", "attention_norm") + .replace("norm2", "ffn_norm") + ) + state_dict[new_key] = state_dict.pop(key) + + def __call__(self, x: Tensor, input_pos: Tensor, kv_cache: KVCacheProtocol, *args, **kwds): + h = self.attention_norm( + x + + self.attention( + x, + input_pos, + kv_cache, + *args, + **kwds, + ) + ) + out = self.ffn_norm(h + self.feed_forward(h)) + return out + + def prefill( + self, + x: Tensor, + kv_cache: KVCacheProtocol, + attn_mask: Tensor, + ) -> Tensor: + h = self.attention_norm( + x + + self.attention.prefill( + x, + kv_cache, + attn_mask, + ) + ) + out = self.ffn_norm(h + self.feed_forward(h)) + return out + + +class TransformerDecoderABC(nn.Module, ABC): + def __init__( + self, + hidden_dim: int, + n_layer: int, + n_head: int, + ffn_dim: int, + vocab_size: int, + max_seq_length: int, + max_batch_size: int, + ) -> None: + super().__init__() + + self.hidden_dim = hidden_dim + self.n_head = n_head + assert hidden_dim % n_head == 0 + + self.head_dim = hidden_dim // n_head + self.vocab_size = vocab_size + + self.n_layer = n_layer + + self.layers: MutableSequence[TransformerBlockABC] + + self.max_seq_length = max_seq_length + self.max_batch_size = max_batch_size + + def __call__(self, input_pos: Tensor, x: Tensor, kv_caches: MutableSequence[KVCacheProtocol], *args, **kwds): + for layer, kv_cache in zip(self.layers, kv_caches): + x = layer(x, input_pos, kv_cache, *args, **kwds) + return x + + def prefill(self, x: Tensor, kv_caches: MutableSequence[KVCacheProtocol], attn_mask: Tensor): + for layer, kv_cache in zip(self.layers, kv_caches): + x = layer.prefill(x, kv_cache, attn_mask) + return x + + +class T2SDecoderABC(nn.Module, ABC, T2SDecoderProtocol): + def __init__( + self, + config: dict, + max_seq_length: int = 1800, + max_batch_size: int = 10, + ) -> None: + super().__init__() + + hidden_dim: int = config["model"]["hidden_dim"] + embedding_dim: int = config["model"]["embedding_dim"] + n_head: int = config["model"]["head"] + n_layer: int = config["model"]["n_layer"] + vocab_size: int = config["model"]["vocab_size"] + phoneme_vocab_size: int = config["model"]["phoneme_vocab_size"] + EOS: int = config["model"]["EOS"] + ffn_dim: int = hidden_dim * 4 + + self.n_layer = int(n_layer) + self.hidden_dim = int(hidden_dim) + self.n_head = int(n_head) + assert hidden_dim % n_head == 0 + + self.head_dim = int(hidden_dim // n_head) + self.embedding_dim = int(embedding_dim) + self.ffn_dim = int(ffn_dim) + self.vocab_size = int(vocab_size) + self.phoneme_vocab_size = int(phoneme_vocab_size) + self.max_seq_length = max_seq_length + self.max_batch_size = max_batch_size + self.EOS = EOS + assert self.EOS == self.vocab_size - 1 + + self.bert_proj: nn.Linear + self.ar_predict_layer: nn.Linear + self.h: TransformerDecoderABC + + self.kv_class: type[KVCacheABC] + + self.GraphCache: CUDAGraphCacheABC | None + + self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size) + self.ar_text_position = SinePositionalEmbedding( + self.embedding_dim, + scale=False, + alpha=True, + max_batch_size=max_batch_size, + max_seq_len=max_seq_length, + ) + self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size) + self.ar_audio_position = SinePositionalEmbedding( + self.embedding_dim, + scale=False, + alpha=True, + max_batch_size=max_batch_size, + max_seq_len=max_seq_length, + ) + + self._register_load_state_dict_pre_hook(self.load_hook) + + def load_hook(self, state_dict: dict[str, Tensor], prefix, *args): + model_keys = [key for key in state_dict if key.startswith("model.")] + for key in model_keys: + new_key = key[len("model.") :] + state_dict[new_key] = state_dict.pop(key) + + def init_cache(self, bsz: int = 0) -> MutableSequence[KVCacheProtocol]: + bsz = bsz or self.h.max_batch_size + assert bsz <= self.h.max_batch_size + seq_lens = self.h.max_seq_length + dtype = self.bert_proj.bias.dtype + kvclass = self.kv_class + + return nn.ModuleList( + [kvclass(bsz, seq_lens, self.n_head, self.head_dim) for _ in range(self.n_layer)], + ).to(self.device, dtype) # type: ignore + + def embed( + self, + x: list[torch.Tensor], + y: torch.Tensor, + bert_features: list[torch.Tensor], + ): + x_len: list[int] = [i.shape[0] for i in x] + x_len_max = max(x_len) + xy_pos = torch.zeros((len(x), x_len_max + y.shape[1], self.embedding_dim)).to(bert_features[0].dtype) + + bert_features = list(map(lambda x: x.transpose(0, 1), bert_features)) + + y_len = y.shape[1] + y_emb = self.ar_audio_embedding(y) + y_pos = self.ar_audio_position.prefill(y_emb) + + for bs, (x_, len_, bert_feature) in enumerate(zip(x, x_len, bert_features)): + x_emb = self.ar_text_embedding(x_) + bert = self.bert_proj(bert_feature) + x_emb = x_emb + bert + x_pos = self.ar_text_position.prefill(x_emb.unsqueeze(0)) + xy_pos[[bs], :len_] = x_pos + xy_pos[[bs], len_ : len_ + y_len] = y_pos + + return xy_pos + + def compile(self, *args, **kwds): + # Experimental features to reduce compilation times, will be on by default in future + torch._inductor.config.triton.cudagraph_skip_dynamic_graphs = True + torch._inductor.config.coordinate_descent_tuning = True + torch._inductor.config.triton.unique_kernel_names = True + torch._inductor.config.fx_graph_cache = True + torch._inductor.config.triton.cudagraph_trees = True + torch._inductor.config.triton.cudagraph_support_input_mutation = True + self.h.compile(fullgraph=True, mode="reduce-overhead") + + def capture( + self, input_pos: Tensor, x: Tensor, x_dec: Tensor, kv_caches: MutableSequence[KVCacheProtocol], *args, **kwds + ) -> CUDAGraph: + assert torch.cuda.is_available() + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + + graph = torch.cuda.CUDAGraph() + + with torch.cuda.stream(s): + for _ in range(5): + self.h(input_pos, x, kv_caches, *args, **kwds) + torch.cuda.current_stream().wait_stream(s) + + with torch.cuda.graph(graph): + x_dec.copy_(self.h(input_pos, x, kv_caches, *args, **kwds)) + torch.cuda.synchronize() + + return graph + + @abstractmethod + def pre_forward(self, session: T2SSession) -> tuple[list[Tensor], dict[str, Tensor]]: + return list(), dict() + + @abstractmethod + def post_forward(self, idx: int, session: T2SSession) -> None: + return + + +class CUDAGraphCacheABC(ABC): + def __init__( + self, + decoder: T2SDecoderABC, + ) -> None: + self.is_applicable: bool + + if torch.cuda.is_available() and self.is_applicable: + self.device: torch.device = decoder.device + self.dtype = decoder.bert_proj.bias.dtype + + self.assigned: bool = False + + self.decoder: T2SDecoderABC = decoder + self.kv_cache: MutableSequence[KVCacheProtocol] = decoder.init_cache(decoder.max_batch_size) + self.xy_pos = torch.rand(size=(decoder.max_batch_size, 1, decoder.embedding_dim), device=self.device).to( + self.dtype + ) + self.xy_dec = self.xy_pos.clone() + + self.input_pos = torch.tensor([10] * decoder.max_batch_size, device=self.device).int() + self.graph: torch.cuda.CUDAGraph | None = None + self.stream: torch.cuda.Stream | None + + self.id: int = random.randint(1, 2**32 - 1) + + def assign_graph(self, session: T2SSession): + if self.graph is None: + args, kwds = self.decoder.pre_forward(session) + graph = self.decoder.capture(self.input_pos, self.xy_pos, self.xy_dec, self.kv_cache, *args, **kwds) + self.graph = graph + self.stream = torch.cuda.Stream() + + if self.assigned is False: + self.get_cache_graph(session) + session.id = self.id + self.assigned = True + else: + self.capture_new_graph(session) + + @abstractmethod + def release_graph(self, session: T2SSession): ... + + @abstractmethod + def get_cache_graph(self, session: T2SSession): + pass + + @abstractmethod + def capture_new_graph(self, session: T2SSession): + pass + + +class TorchProfiler: + def __init__(self, debug: bool, log_dir: str = "./profiler") -> None: + self.debug = debug + self.log_dir = log_dir + self.__profiler: torch.profiler.profile + + if self.debug and not os.path.exists(self.log_dir): + os.makedirs(self.log_dir) + + self.tensorboard_handler = tensorboard_trace_handler(self.log_dir) + + def profiler_callback(self, prof: torch.profiler.profile): + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=30)) + print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=30)) + self.tensorboard_handler(prof) + + @staticmethod + def three_step_schedule(step: int) -> ProfilerAction: + if step == 0: + return ProfilerAction.NONE + elif step == 1: + return ProfilerAction.RECORD + elif step == 2: + return ProfilerAction.RECORD_AND_SAVE + else: + return ProfilerAction.NONE + + def start(self): + if not self.debug: + return + assert self.__profiler is not None + self.__profiler.step() + + def end(self): + if not self.debug: + return + assert self.__profiler is not None + self.__profiler.step() + + def profiler(self): + if self.debug: + activities_list = [torch.profiler.ProfilerActivity.CPU] + if torch.cuda.is_available(): + activities_list.append(torch.profiler.ProfilerActivity.CUDA) + + self.__profiler = torch.profiler.profile( + activities=activities_list, + record_shapes=True, + with_stack=True, + with_modules=True, + profile_memory=True, + schedule=self.three_step_schedule, + on_trace_ready=self.profiler_callback, + ) + return self.__profiler + else: + return nullcontext() + + def record(self, func_name: str): + if self.debug: + return torch.profiler.record_function(func_name) + else: + return nullcontext() diff --git a/GPT_SoVITS/Accelerate/__init__.py b/GPT_SoVITS/Accelerate/__init__.py new file mode 100644 index 00000000..797fe1d0 --- /dev/null +++ b/GPT_SoVITS/Accelerate/__init__.py @@ -0,0 +1,30 @@ +from . import MLX, PyTorch +from .logger import console, logger, tb +from .PyTorch import T2SEngineTorch, T2SRequest, T2SResult +from .PyTorch.structs import T2SEngineProtocol + +backends = PyTorch.backends + MLX.backends + +backends = [ + b.replace("_", "-") + .title() + .replace("Mlx", "MLX") + .replace("Mps", "MPS") + .replace("Cuda", "CUDA") + .replace("Mxfp4", "MXFP4") + for b in backends +] + + +__all__ = [ + "T2SEngineTorch", + "T2SRequest", + "T2SResult", + "backends", + "MLX", + "PyTorch", + "logger", + "console", + "tb", + "T2SEngineProtocol", +] diff --git a/GPT_SoVITS/Accelerate/logger.py b/GPT_SoVITS/Accelerate/logger.py new file mode 100644 index 00000000..021e72e3 --- /dev/null +++ b/GPT_SoVITS/Accelerate/logger.py @@ -0,0 +1,203 @@ +import sys +from typing import Optional + +from loguru import logger +from rich.console import Console, JustifyMethod +from rich.highlighter import Highlighter +from rich.logging import RichHandler +from rich.progress import Task, TextColumn +from rich.style import StyleType +from rich.table import Column +from rich.text import Text +from rich.traceback import Traceback, install + +console = Console(stderr=False) +install(console=console) + + +def loguru_format(record): + level = record["level"].name + color = { + "DEBUG": "green", + "INFO": "blue", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "bright_red", + }.get(level, "white") + + return f"[bold {color}][{level}][/bold {color}] " + "{message}" + + +handler_with_locals = RichHandler( + console=console, + show_time=False, + show_path=False, + rich_tracebacks=True, + tracebacks_show_locals=True, + show_level=False, + markup=True, +) +handler_without_locals = RichHandler( + console=console, + show_time=False, + show_path=False, + rich_tracebacks=True, + tracebacks_show_locals=False, + show_level=False, + markup=True, +) + + +def local_filter(r): + return r["extra"].get("show_locals", True) + + +logger.remove() +logger.add(handler_with_locals, format=loguru_format, filter=local_filter) +logger.add(handler_without_locals, format=loguru_format, filter=lambda x: not local_filter(x)) + + +class SpeedColumnToken(TextColumn): + """Show task progress as a percentage. + + Args: + text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%". + text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "". + style (StyleType, optional): Style of output. Defaults to "none". + justify (JustifyMethod, optional): Text justification. Defaults to "left". + markup (bool, optional): Enable markup. Defaults to True. + highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None. + table_column (Optional[Column], optional): Table Column to use. Defaults to None. + show_speed (bool, optional): Show speed if total is unknown. Defaults to False. + """ + + def __init__( + self, + text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", + text_format_no_percentage: str = "", + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + show_speed: bool = True, + ) -> None: + self.text_format_no_percentage = text_format_no_percentage + self.show_speed = show_speed + super().__init__( + text_format=text_format, + style=style, + justify=justify, + markup=markup, + highlighter=highlighter, + table_column=table_column, + ) + + @classmethod + def render_speed(cls, speed: Optional[float]) -> Text: + """Render the speed in iterations per second. + + Args: + task (Task): A Task object. + + Returns: + Text: Text object containing the task speed. + """ + if speed is None: + return Text("", style="progress.percentage") + return Text(f"{speed:.1f} token/s", style="progress.percentage") + + def render(self, task: Task) -> Text: + if self.show_speed: + return self.render_speed(task.finished_speed or task.speed) + text_format = self.text_format_no_percentage if task.total is None else self.text_format + _text = text_format.format(task=task) + if self.markup: + text = Text.from_markup(_text, style=self.style, justify=self.justify) + else: + text = Text(_text, style=self.style, justify=self.justify) + if self.highlighter: + self.highlighter.highlight(text) + return text + + +class SpeedColumnIteration(TextColumn): + """Show task progress as a percentage. + + Args: + text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%". + text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "". + style (StyleType, optional): Style of output. Defaults to "none". + justify (JustifyMethod, optional): Text justification. Defaults to "left". + markup (bool, optional): Enable markup. Defaults to True. + highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None. + table_column (Optional[Column], optional): Table Column to use. Defaults to None. + show_speed (bool, optional): Show speed if total is unknown. Defaults to False. + """ + + def __init__( + self, + text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", + text_format_no_percentage: str = "", + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + show_speed: bool = True, + ) -> None: + self.text_format_no_percentage = text_format_no_percentage + self.show_speed = show_speed + super().__init__( + text_format=text_format, + style=style, + justify=justify, + markup=markup, + highlighter=highlighter, + table_column=table_column, + ) + + @classmethod + def render_speed(cls, speed: Optional[float]) -> Text: + """Render the speed in iterations per second. + + Args: + task (Task): A Task object. + + Returns: + Text: Text object containing the task speed. + """ + if speed is None: + return Text("", style="progress.percentage") + return Text(f"{speed:.1f} it/s", style="progress.percentage") + + def render(self, task: Task) -> Text: + if self.show_speed: + return self.render_speed(task.finished_speed or task.speed) + text_format = self.text_format_no_percentage if task.total is None else self.text_format + _text = text_format.format(task=task) + if self.markup: + text = Text.from_markup(_text, style=self.style, justify=self.justify) + else: + text = Text(_text, style=self.style, justify=self.justify) + if self.highlighter: + self.highlighter.highlight(text) + return text + + +def tb(show_locals: bool = True): + exc_type, exc_value, exc_tb = sys.exc_info() + assert exc_type + assert exc_value + tb = Traceback.from_exception(exc_type, exc_value, exc_tb, show_locals=show_locals) + + return tb + + +__all__ = ["logger", "console", "tb", "SpeedColumnToken", "SpeedColumnIteration"] + +if __name__ == "__main__": + try: + raise RuntimeError() + except Exception: + logger.bind(show_locals=False).exception("TEST") diff --git a/GPT_SoVITS/BigVGAN/README.md b/GPT_SoVITS/BigVGAN/README.md deleted file mode 100644 index 2fa70cee..00000000 --- a/GPT_SoVITS/BigVGAN/README.md +++ /dev/null @@ -1,266 +0,0 @@ -## BigVGAN: A Universal Neural Vocoder with Large-Scale Training - -#### Sang-gil Lee, Wei Ping, Boris Ginsburg, Bryan Catanzaro, Sungroh Yoon - -[[Paper]](https://arxiv.org/abs/2206.04658) - [[Code]](https://github.com/NVIDIA/BigVGAN) - [[Showcase]](https://bigvgan-demo.github.io/) - [[Project Page]](https://research.nvidia.com/labs/adlr/projects/bigvgan/) - [[Weights]](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a) - [[Demo]](https://huggingface.co/spaces/nvidia/BigVGAN) - -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/bigvgan-a-universal-neural-vocoder-with-large/speech-synthesis-on-libritts)](https://paperswithcode.com/sota/speech-synthesis-on-libritts?p=bigvgan-a-universal-neural-vocoder-with-large) - -
- -## News -- **Sep 2024 (v2.4):** - - We have updated the pretrained checkpoints trained for 5M steps. This is final release of the BigVGAN-v2 checkpoints. - -- **Jul 2024 (v2.3):** - - General refactor and code improvements for improved readability. - - Fully fused CUDA kernel of anti-alised activation (upsampling + activation + downsampling) with inference speed benchmark. - -- **Jul 2024 (v2.2):** The repository now includes an interactive local demo using gradio. - -- **Jul 2024 (v2.1):** BigVGAN is now integrated with 🤗 Hugging Face Hub with easy access to inference using pretrained checkpoints. We also provide an interactive demo on Hugging Face Spaces. - -- **Jul 2024 (v2):** We release BigVGAN-v2 along with pretrained checkpoints. Below are the highlights: - - Custom CUDA kernel for inference: we provide a fused upsampling + activation kernel written in CUDA for accelerated inference speed. Our test shows 1.5 - 3x faster speed on a single A100 GPU. - - Improved discriminator and loss: BigVGAN-v2 is trained using a [multi-scale sub-band CQT discriminator](https://arxiv.org/abs/2311.14957) and a [multi-scale mel spectrogram loss](https://arxiv.org/abs/2306.06546). - - Larger training data: BigVGAN-v2 is trained using datasets containing diverse audio types, including speech in multiple languages, environmental sounds, and instruments. - - We provide pretrained checkpoints of BigVGAN-v2 using diverse audio configurations, supporting up to 44 kHz sampling rate and 512x upsampling ratio. - -## Installation - -The codebase has been tested on Python `3.10` and PyTorch `2.3.1` conda packages with either `pytorch-cuda=12.1` or `pytorch-cuda=11.8`. Below is an example command to create the conda environment: - -```shell -conda create -n bigvgan python=3.10 pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia -conda activate bigvgan -``` - -Clone the repository and install dependencies: - -```shell -git clone https://github.com/NVIDIA/BigVGAN -cd BigVGAN -pip install -r requirements.txt -``` - -## Inference Quickstart using 🤗 Hugging Face Hub - -Below example describes how you can use BigVGAN: load the pretrained BigVGAN generator from Hugging Face Hub, compute mel spectrogram from input waveform, and generate synthesized waveform using the mel spectrogram as the model's input. - -```python -device = 'cuda' - -import torch -import bigvgan -import librosa -from meldataset import get_mel_spectrogram - -# instantiate the model. You can optionally set use_cuda_kernel=True for faster inference. -model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_24khz_100band_256x', use_cuda_kernel=False) - -# remove weight norm in the model and set to eval mode -model.remove_weight_norm() -model = model.eval().to(device) - -# load wav file and compute mel spectrogram -wav_path = '/path/to/your/audio.wav' -wav, sr = librosa.load(wav_path, sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1] -wav = torch.FloatTensor(wav).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time] - -# compute mel spectrogram from the ground truth audio -mel = get_mel_spectrogram(wav, model.h).to(device) # mel is FloatTensor with shape [B(1), C_mel, T_frame] - -# generate waveform from mel -with torch.inference_mode(): - wav_gen = model(mel) # wav_gen is FloatTensor with shape [B(1), 1, T_time] and values in [-1, 1] -wav_gen_float = wav_gen.squeeze(0).cpu() # wav_gen is FloatTensor with shape [1, T_time] - -# you can convert the generated waveform to 16 bit linear PCM -wav_gen_int16 = (wav_gen_float * 32767.0).numpy().astype('int16') # wav_gen is now np.ndarray with shape [1, T_time] and int16 dtype -``` - -## Local gradio demo - -You can run a local gradio demo using below command: - -```python -pip install -r demo/requirements.txt -python demo/app.py -``` - -## Training - -Create symbolic link to the root of the dataset. The codebase uses filelist with the relative path from the dataset. Below are the example commands for LibriTTS dataset: - -```shell -cd filelists/LibriTTS && \ -ln -s /path/to/your/LibriTTS/train-clean-100 train-clean-100 && \ -ln -s /path/to/your/LibriTTS/train-clean-360 train-clean-360 && \ -ln -s /path/to/your/LibriTTS/train-other-500 train-other-500 && \ -ln -s /path/to/your/LibriTTS/dev-clean dev-clean && \ -ln -s /path/to/your/LibriTTS/dev-other dev-other && \ -ln -s /path/to/your/LibriTTS/test-clean test-clean && \ -ln -s /path/to/your/LibriTTS/test-other test-other && \ -cd ../.. -``` - -Train BigVGAN model. Below is an example command for training BigVGAN-v2 using LibriTTS dataset at 24kHz with a full 100-band mel spectrogram as input: - -```shell -python train.py \ ---config configs/bigvgan_v2_24khz_100band_256x.json \ ---input_wavs_dir filelists/LibriTTS \ ---input_training_file filelists/LibriTTS/train-full.txt \ ---input_validation_file filelists/LibriTTS/val-full.txt \ ---list_input_unseen_wavs_dir filelists/LibriTTS filelists/LibriTTS \ ---list_input_unseen_validation_file filelists/LibriTTS/dev-clean.txt filelists/LibriTTS/dev-other.txt \ ---checkpoint_path exp/bigvgan_v2_24khz_100band_256x -``` - -## Synthesis - -Synthesize from BigVGAN model. Below is an example command for generating audio from the model. -It computes mel spectrograms using wav files from `--input_wavs_dir` and saves the generated audio to `--output_dir`. - -```shell -python inference.py \ ---checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \ ---input_wavs_dir /path/to/your/input_wav \ ---output_dir /path/to/your/output_wav -``` - -`inference_e2e.py` supports synthesis directly from the mel spectrogram saved in `.npy` format, with shapes `[1, channel, frame]` or `[channel, frame]`. -It loads mel spectrograms from `--input_mels_dir` and saves the generated audio to `--output_dir`. - -Make sure that the STFT hyperparameters for mel spectrogram are the same as the model, which are defined in `config.json` of the corresponding model. - -```shell -python inference_e2e.py \ ---checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \ ---input_mels_dir /path/to/your/input_mel \ ---output_dir /path/to/your/output_wav -``` - -## Using Custom CUDA Kernel for Synthesis - -You can apply the fast CUDA inference kernel by using a parameter `use_cuda_kernel` when instantiating BigVGAN: - -```python -generator = BigVGAN(h, use_cuda_kernel=True) -``` - -You can also pass `--use_cuda_kernel` to `inference.py` and `inference_e2e.py` to enable this feature. - -When applied for the first time, it builds the kernel using `nvcc` and `ninja`. If the build succeeds, the kernel is saved to `alias_free_activation/cuda/build` and the model automatically loads the kernel. The codebase has been tested using CUDA `12.1`. - -Please make sure that both are installed in your system and `nvcc` installed in your system matches the version your PyTorch build is using. - -We recommend running `test_cuda_vs_torch_model.py` first to build and check the correctness of the CUDA kernel. See below example command and its output, where it returns `[Success] test CUDA fused vs. plain torch BigVGAN inference`: - -```python -python tests/test_cuda_vs_torch_model.py \ ---checkpoint_file /path/to/your/bigvgan_generator.pt -``` - -```shell -loading plain Pytorch BigVGAN -... -loading CUDA kernel BigVGAN with auto-build -Detected CUDA files, patching ldflags -Emitting ninja build file /path/to/your/BigVGAN/alias_free_activation/cuda/build/build.ninja.. -Building extension module anti_alias_activation_cuda... -... -Loading extension module anti_alias_activation_cuda... -... -Loading '/path/to/your/bigvgan_generator.pt' -... -[Success] test CUDA fused vs. plain torch BigVGAN inference - > mean_difference=0.0007238413265440613 -... -``` - -If you see `[Fail] test CUDA fused vs. plain torch BigVGAN inference`, it means that the CUDA kernel inference is incorrect. Please check if `nvcc` installed in your system is compatible with your PyTorch version. - -## Pretrained Models - -We provide the [pretrained models on Hugging Face Collections](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a). -One can download the checkpoints of the generator weight (named `bigvgan_generator.pt`) and its discriminator/optimizer states (named `bigvgan_discriminator_optimizer.pt`) within the listed model repositories. - -| Model Name | Sampling Rate | Mel band | fmax | Upsampling Ratio | Params | Dataset | Steps | Fine-Tuned | -|:--------------------------------------------------------------------------------------------------------:|:-------------:|:--------:|:-----:|:----------------:|:------:|:--------------------------:|:-----:|:----------:| -| [bigvgan_v2_44khz_128band_512x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_512x) | 44 kHz | 128 | 22050 | 512 | 122M | Large-scale Compilation | 5M | No | -| [bigvgan_v2_44khz_128band_256x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_256x) | 44 kHz | 128 | 22050 | 256 | 112M | Large-scale Compilation | 5M | No | -| [bigvgan_v2_24khz_100band_256x](https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x) | 24 kHz | 100 | 12000 | 256 | 112M | Large-scale Compilation | 5M | No | -| [bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x) | 22 kHz | 80 | 11025 | 256 | 112M | Large-scale Compilation | 5M | No | -| [bigvgan_v2_22khz_80band_fmax8k_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_fmax8k_256x) | 22 kHz | 80 | 8000 | 256 | 112M | Large-scale Compilation | 5M | No | -| [bigvgan_24khz_100band](https://huggingface.co/nvidia/bigvgan_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 112M | LibriTTS | 5M | No | -| [bigvgan_base_24khz_100band](https://huggingface.co/nvidia/bigvgan_base_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 14M | LibriTTS | 5M | No | -| [bigvgan_22khz_80band](https://huggingface.co/nvidia/bigvgan_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 112M | LibriTTS + VCTK + LJSpeech | 5M | No | -| [bigvgan_base_22khz_80band](https://huggingface.co/nvidia/bigvgan_base_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 14M | LibriTTS + VCTK + LJSpeech | 5M | No | - -The paper results are based on the original 24kHz BigVGAN models (`bigvgan_24khz_100band` and `bigvgan_base_24khz_100band`) trained on LibriTTS dataset. -We also provide 22kHz BigVGAN models with band-limited setup (i.e., fmax=8000) for TTS applications. -Note that the checkpoints use `snakebeta` activation with log scale parameterization, which have the best overall quality. - -You can fine-tune the models by: - -1. downloading the checkpoints (both the generator weight and its discriminator/optimizer states) -2. resuming training using your audio dataset by specifying `--checkpoint_path` that includes the checkpoints when launching `train.py` - -## Training Details of BigVGAN-v2 - -Comapred to the original BigVGAN, the pretrained checkpoints of BigVGAN-v2 used `batch_size=32` with a longer `segment_size=65536` and are trained using 8 A100 GPUs. - -Note that the BigVGAN-v2 `json` config files in `./configs` use `batch_size=4` as default to fit in a single A100 GPU for training. You can fine-tune the models adjusting `batch_size` depending on your GPUs. - -When training BigVGAN-v2 from scratch with small batch size, it can potentially encounter the early divergence problem mentioned in the paper. In such case, we recommend lowering the `clip_grad_norm` value (e.g. `100`) for the early training iterations (e.g. 20000 steps) and increase the value to the default `500`. - -## Evaluation Results of BigVGAN-v2 - -Below are the objective results of the 24kHz model (`bigvgan_v2_24khz_100band_256x`) obtained from the LibriTTS `dev` sets. BigVGAN-v2 shows noticeable improvements of the metrics. The model also exhibits reduced perceptual artifacts, especially for non-speech audio. - -| Model | Dataset | Steps | PESQ(↑) | M-STFT(↓) | MCD(↓) | Periodicity(↓) | V/UV F1(↑) | -|:----------:|:-----------------------:|:-----:|:---------:|:----------:|:----------:|:--------------:|:----------:| -| BigVGAN | LibriTTS | 1M | 4.027 | 0.7997 | 0.3745 | 0.1018 | 0.9598 | -| BigVGAN | LibriTTS | 5M | 4.256 | 0.7409 | 0.2988 | 0.0809 | 0.9698 | -| BigVGAN-v2 | Large-scale Compilation | 3M | 4.359 | 0.7134 | 0.3060 | 0.0621 | 0.9777 | -| BigVGAN-v2 | Large-scale Compilation | 5M | **4.362** | **0.7026** | **0.2903** | **0.0593** | **0.9793** | - -## Speed Benchmark - -Below are the speed and VRAM usage benchmark results of BigVGAN from `tests/test_cuda_vs_torch_model.py`, using `bigvgan_v2_24khz_100band_256x` as a reference model. - -| GPU | num_mel_frame | use_cuda_kernel | Speed (kHz) | Real-time Factor | VRAM (GB) | -|:--------------------------:|:-------------:|:---------------:|:-----------:|:----------------:|:---------:| -| NVIDIA A100 | 256 | False | 1672.1 | 69.7x | 1.3 | -| | | True | 3916.5 | 163.2x | 1.3 | -| | 2048 | False | 1899.6 | 79.2x | 1.7 | -| | | True | 5330.1 | 222.1x | 1.7 | -| | 16384 | False | 1973.8 | 82.2x | 5.0 | -| | | True | 5761.7 | 240.1x | 4.4 | -| NVIDIA GeForce RTX 3080 | 256 | False | 841.1 | 35.0x | 1.3 | -| | | True | 1598.1 | 66.6x | 1.3 | -| | 2048 | False | 929.9 | 38.7x | 1.7 | -| | | True | 1971.3 | 82.1x | 1.6 | -| | 16384 | False | 943.4 | 39.3x | 5.0 | -| | | True | 2026.5 | 84.4x | 3.9 | -| NVIDIA GeForce RTX 2080 Ti | 256 | False | 515.6 | 21.5x | 1.3 | -| | | True | 811.3 | 33.8x | 1.3 | -| | 2048 | False | 576.5 | 24.0x | 1.7 | -| | | True | 1023.0 | 42.6x | 1.5 | -| | 16384 | False | 589.4 | 24.6x | 5.0 | -| | | True | 1068.1 | 44.5x | 3.2 | - -## Acknowledgements - -We thank Vijay Anand Korthikanti and Kevin J. Shih for their generous support in implementing the CUDA kernel for inference. - -## References - -- [HiFi-GAN](https://github.com/jik876/hifi-gan) (for generator and multi-period discriminator) -- [Snake](https://github.com/EdwardDixon/snake) (for periodic activation) -- [Alias-free-torch](https://github.com/junjun3518/alias-free-torch) (for anti-aliasing) -- [Julius](https://github.com/adefossez/julius) (for low-pass filter) -- [UnivNet](https://github.com/mindslab-ai/univnet) (for multi-resolution discriminator) -- [descript-audio-codec](https://github.com/descriptinc/descript-audio-codec) and [vocos](https://github.com/gemelo-ai/vocos) (for multi-band multi-scale STFT discriminator and multi-scale mel spectrogram loss) -- [Amphion](https://github.com/open-mmlab/Amphion) (for multi-scale sub-band CQT discriminator) diff --git a/GPT_SoVITS/BigVGAN/activations.py b/GPT_SoVITS/BigVGAN/activations.py index abe3ad9e..6dfda595 100644 --- a/GPT_SoVITS/BigVGAN/activations.py +++ b/GPT_SoVITS/BigVGAN/activations.py @@ -2,7 +2,7 @@ # LICENSE is in incl_licenses directory. import torch -from torch import nn, sin, pow +from torch import nn, pow, sin from torch.nn import Parameter diff --git a/GPT_SoVITS/BigVGAN/bigvgan.py b/GPT_SoVITS/BigVGAN/bigvgan.py index febdf165..1470c8fd 100644 --- a/GPT_SoVITS/BigVGAN/bigvgan.py +++ b/GPT_SoVITS/BigVGAN/bigvgan.py @@ -4,22 +4,22 @@ # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. -import os import json +import os from pathlib import Path -from typing import Optional, Union, Dict +from typing import Dict, Optional, Union import torch import torch.nn as nn +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download from torch.nn import Conv1d, ConvTranspose1d -from torch.nn.utils import weight_norm, remove_weight_norm +from torch.nn.utils import remove_weight_norm +from torch.nn.utils import weight_norm from . import activations -from .utils0 import init_weights, get_padding from .alias_free_activation.torch.act import Activation1d as TorchActivation1d from .env import AttrDict - -from huggingface_hub import PyTorchModelHubMixin, hf_hub_download +from .utils0 import get_padding, init_weights def load_hparams_from_json(path) -> AttrDict: diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json deleted file mode 100644 index 64bca784..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 32, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [4,4,2,2,2,2], - "upsample_kernel_sizes": [8,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "activation": "snakebeta", - "snake_logscale": true, - - "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "segment_size": 8192, - "num_mels": 80, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 22050, - - "fmin": 0, - "fmax": 8000, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json deleted file mode 100644 index e7f7ff08..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 32, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [4,4,2,2,2,2], - "upsample_kernel_sizes": [8,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "activation": "snakebeta", - "snake_logscale": true, - - "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "segment_size": 8192, - "num_mels": 100, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 24000, - - "fmin": 0, - "fmax": 12000, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json deleted file mode 100644 index fd244848..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 32, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [8,8,2,2], - "upsample_kernel_sizes": [16,16,4,4], - "upsample_initial_channel": 512, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "activation": "snakebeta", - "snake_logscale": true, - - "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "segment_size": 8192, - "num_mels": 80, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 22050, - - "fmin": 0, - "fmax": 8000, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json deleted file mode 100644 index 0911508c..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 32, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [8,8,2,2], - "upsample_kernel_sizes": [16,16,4,4], - "upsample_initial_channel": 512, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "activation": "snakebeta", - "snake_logscale": true, - - "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "segment_size": 8192, - "num_mels": 100, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 24000, - - "fmin": 0, - "fmax": 12000, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json deleted file mode 100644 index e96bd5fd..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 4, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [4,4,2,2,2,2], - "upsample_kernel_sizes": [8,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "use_tanh_at_final": false, - "use_bias_at_final": false, - - "activation": "snakebeta", - "snake_logscale": true, - - "use_cqtd_instead_of_mrd": true, - "cqtd_filters": 128, - "cqtd_max_filters": 1024, - "cqtd_filters_scale": 1, - "cqtd_dilations": [1, 2, 4], - "cqtd_hop_lengths": [512, 256, 256], - "cqtd_n_octaves": [9, 9, 9], - "cqtd_bins_per_octaves": [24, 36, 48], - - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "use_multiscale_melloss": true, - "lambda_melloss": 15, - - "clip_grad_norm": 500, - - "segment_size": 65536, - "num_mels": 80, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 22050, - - "fmin": 0, - "fmax": null, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json deleted file mode 100644 index a3c9699f..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 4, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [4,4,2,2,2,2], - "upsample_kernel_sizes": [8,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "use_tanh_at_final": false, - "use_bias_at_final": false, - - "activation": "snakebeta", - "snake_logscale": true, - - "use_cqtd_instead_of_mrd": true, - "cqtd_filters": 128, - "cqtd_max_filters": 1024, - "cqtd_filters_scale": 1, - "cqtd_dilations": [1, 2, 4], - "cqtd_hop_lengths": [512, 256, 256], - "cqtd_n_octaves": [9, 9, 9], - "cqtd_bins_per_octaves": [24, 36, 48], - - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "use_multiscale_melloss": true, - "lambda_melloss": 15, - - "clip_grad_norm": 500, - - "segment_size": 65536, - "num_mels": 80, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 22050, - - "fmin": 0, - "fmax": 8000, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json deleted file mode 100644 index b6999d30..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 4, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [4,4,2,2,2,2], - "upsample_kernel_sizes": [8,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "use_tanh_at_final": false, - "use_bias_at_final": false, - - "activation": "snakebeta", - "snake_logscale": true, - - "use_cqtd_instead_of_mrd": true, - "cqtd_filters": 128, - "cqtd_max_filters": 1024, - "cqtd_filters_scale": 1, - "cqtd_dilations": [1, 2, 4], - "cqtd_hop_lengths": [512, 256, 256], - "cqtd_n_octaves": [9, 9, 9], - "cqtd_bins_per_octaves": [24, 36, 48], - - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "use_multiscale_melloss": true, - "lambda_melloss": 15, - - "clip_grad_norm": 500, - - "segment_size": 65536, - "num_mels": 128, - "num_freq": 1025, - "n_fft": 1024, - "hop_size": 256, - "win_size": 1024, - - "sampling_rate": 44100, - - "fmin": 0, - "fmax": null, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json deleted file mode 100644 index 2d7176c9..00000000 --- a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "resblock": "1", - "num_gpus": 0, - "batch_size": 4, - "learning_rate": 0.0001, - "adam_b1": 0.8, - "adam_b2": 0.99, - "lr_decay": 0.9999996, - "seed": 1234, - - "upsample_rates": [8,4,2,2,2,2], - "upsample_kernel_sizes": [16,8,4,4,4,4], - "upsample_initial_channel": 1536, - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - - "use_tanh_at_final": false, - "use_bias_at_final": false, - - "activation": "snakebeta", - "snake_logscale": true, - - "use_cqtd_instead_of_mrd": true, - "cqtd_filters": 128, - "cqtd_max_filters": 1024, - "cqtd_filters_scale": 1, - "cqtd_dilations": [1, 2, 4], - "cqtd_hop_lengths": [512, 256, 256], - "cqtd_n_octaves": [9, 9, 9], - "cqtd_bins_per_octaves": [24, 36, 48], - - "mpd_reshapes": [2, 3, 5, 7, 11], - "use_spectral_norm": false, - "discriminator_channel_mult": 1, - - "use_multiscale_melloss": true, - "lambda_melloss": 15, - - "clip_grad_norm": 500, - - "segment_size": 65536, - "num_mels": 128, - "num_freq": 2049, - "n_fft": 2048, - "hop_size": 512, - "win_size": 2048, - - "sampling_rate": 44100, - - "fmin": 0, - "fmax": null, - "fmax_for_loss": null, - - "num_workers": 4, - - "dist_config": { - "dist_backend": "nccl", - "dist_url": "tcp://localhost:54321", - "world_size": 1 - } -} diff --git a/GPT_SoVITS/BigVGAN/discriminators.py b/GPT_SoVITS/BigVGAN/discriminators.py deleted file mode 100644 index 2d44c798..00000000 --- a/GPT_SoVITS/BigVGAN/discriminators.py +++ /dev/null @@ -1,625 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - - -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv2d -from torch.nn.utils import weight_norm, spectral_norm -from torchaudio.transforms import Spectrogram, Resample - -from env import AttrDict -from utils import get_padding -import typing -from typing import List, Tuple - - -class DiscriminatorP(torch.nn.Module): - def __init__( - self, - h: AttrDict, - period: List[int], - kernel_size: int = 5, - stride: int = 3, - use_spectral_norm: bool = False, - ): - super().__init__() - self.period = period - self.d_mult = h.discriminator_channel_mult - norm_f = weight_norm if not use_spectral_norm else spectral_norm - - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - int(32 * self.d_mult), - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - int(32 * self.d_mult), - int(128 * self.d_mult), - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - int(128 * self.d_mult), - int(512 * self.d_mult), - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - int(512 * self.d_mult), - int(1024 * self.d_mult), - (kernel_size, 1), - (stride, 1), - padding=(get_padding(5, 1), 0), - ) - ), - norm_f( - Conv2d( - int(1024 * self.d_mult), - int(1024 * self.d_mult), - (kernel_size, 1), - 1, - padding=(2, 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, 0.1) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, h: AttrDict): - super().__init__() - self.mpd_reshapes = h.mpd_reshapes - print(f"mpd_reshapes: {self.mpd_reshapes}") - self.discriminators = nn.ModuleList( - [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes] - ) - - def forward( - self, y: torch.Tensor, y_hat: torch.Tensor - ) -> Tuple[ - List[torch.Tensor], - List[torch.Tensor], - List[List[torch.Tensor]], - List[List[torch.Tensor]], - ]: - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorR(nn.Module): - def __init__(self, cfg: AttrDict, resolution: List[List[int]]): - super().__init__() - - self.resolution = resolution - assert len(self.resolution) == 3, f"MRD layer requires list with len=3, got {self.resolution}" - self.lrelu_slope = 0.1 - - norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm - if hasattr(cfg, "mrd_use_spectral_norm"): - print(f"[INFO] overriding MRD use_spectral_norm as {cfg.mrd_use_spectral_norm}") - norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm - self.d_mult = cfg.discriminator_channel_mult - if hasattr(cfg, "mrd_channel_mult"): - print(f"[INFO] overriding mrd channel multiplier as {cfg.mrd_channel_mult}") - self.d_mult = cfg.mrd_channel_mult - - self.convs = nn.ModuleList( - [ - norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))), - norm_f( - nn.Conv2d( - int(32 * self.d_mult), - int(32 * self.d_mult), - (3, 9), - stride=(1, 2), - padding=(1, 4), - ) - ), - norm_f( - nn.Conv2d( - int(32 * self.d_mult), - int(32 * self.d_mult), - (3, 9), - stride=(1, 2), - padding=(1, 4), - ) - ), - norm_f( - nn.Conv2d( - int(32 * self.d_mult), - int(32 * self.d_mult), - (3, 9), - stride=(1, 2), - padding=(1, 4), - ) - ), - norm_f( - nn.Conv2d( - int(32 * self.d_mult), - int(32 * self.d_mult), - (3, 3), - padding=(1, 1), - ) - ), - ] - ) - self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1))) - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - fmap = [] - - x = self.spectrogram(x) - x = x.unsqueeze(1) - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, self.lrelu_slope) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - def spectrogram(self, x: torch.Tensor) -> torch.Tensor: - n_fft, hop_length, win_length = self.resolution - x = F.pad( - x, - (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), - mode="reflect", - ) - x = x.squeeze(1) - x = torch.stft( - x, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - center=False, - return_complex=True, - ) - x = torch.view_as_real(x) # [B, F, TT, 2] - mag = torch.norm(x, p=2, dim=-1) # [B, F, TT] - - return mag - - -class MultiResolutionDiscriminator(nn.Module): - def __init__(self, cfg, debug=False): - super().__init__() - self.resolutions = cfg.resolutions - assert len(self.resolutions) == 3, ( - f"MRD requires list of list with len=3, each element having a list with len=3. Got {self.resolutions}" - ) - self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions]) - - def forward( - self, y: torch.Tensor, y_hat: torch.Tensor - ) -> Tuple[ - List[torch.Tensor], - List[torch.Tensor], - List[List[torch.Tensor]], - List[List[torch.Tensor]], - ]: - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(x=y) - y_d_g, fmap_g = d(x=y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec -# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. -# LICENSE is in incl_licenses directory. -class DiscriminatorB(nn.Module): - def __init__( - self, - window_length: int, - channels: int = 32, - hop_factor: float = 0.25, - bands: Tuple[Tuple[float, float], ...] = ( - (0.0, 0.1), - (0.1, 0.25), - (0.25, 0.5), - (0.5, 0.75), - (0.75, 1.0), - ), - ): - super().__init__() - self.window_length = window_length - self.hop_factor = hop_factor - self.spec_fn = Spectrogram( - n_fft=window_length, - hop_length=int(window_length * hop_factor), - win_length=window_length, - power=None, - ) - n_fft = window_length // 2 + 1 - bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands] - self.bands = bands - convs = lambda: nn.ModuleList( - [ - weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))), - weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), - weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), - weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), - weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))), - ] - ) - self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))]) - - self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1))) - - def spectrogram(self, x: torch.Tensor) -> List[torch.Tensor]: - # Remove DC offset - x = x - x.mean(dim=-1, keepdims=True) - # Peak normalize the volume of input audio - x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) - x = self.spec_fn(x) - x = torch.view_as_real(x) - x = x.permute(0, 3, 2, 1) # [B, F, T, C] -> [B, C, T, F] - # Split into bands - x_bands = [x[..., b[0] : b[1]] for b in self.bands] - return x_bands - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - x_bands = self.spectrogram(x.squeeze(1)) - fmap = [] - x = [] - - for band, stack in zip(x_bands, self.band_convs): - for i, layer in enumerate(stack): - band = layer(band) - band = torch.nn.functional.leaky_relu(band, 0.1) - if i > 0: - fmap.append(band) - x.append(band) - - x = torch.cat(x, dim=-1) - x = self.conv_post(x) - fmap.append(x) - - return x, fmap - - -# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec -# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. -# LICENSE is in incl_licenses directory. -class MultiBandDiscriminator(nn.Module): - def __init__( - self, - h, - ): - """ - Multi-band multi-scale STFT discriminator, with the architecture based on https://github.com/descriptinc/descript-audio-codec. - and the modified code adapted from https://github.com/gemelo-ai/vocos. - """ - super().__init__() - # fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h. - self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512]) - self.discriminators = nn.ModuleList([DiscriminatorB(window_length=w) for w in self.fft_sizes]) - - def forward( - self, y: torch.Tensor, y_hat: torch.Tensor - ) -> Tuple[ - List[torch.Tensor], - List[torch.Tensor], - List[List[torch.Tensor]], - List[List[torch.Tensor]], - ]: - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - - for d in self.discriminators: - y_d_r, fmap_r = d(x=y) - y_d_g, fmap_g = d(x=y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -# Adapted from https://github.com/open-mmlab/Amphion/blob/main/models/vocoders/gan/discriminator/mssbcqtd.py under the MIT license. -# LICENSE is in incl_licenses directory. -class DiscriminatorCQT(nn.Module): - def __init__(self, cfg: AttrDict, hop_length: int, n_octaves: int, bins_per_octave: int): - super().__init__() - self.cfg = cfg - - self.filters = cfg["cqtd_filters"] - self.max_filters = cfg["cqtd_max_filters"] - self.filters_scale = cfg["cqtd_filters_scale"] - self.kernel_size = (3, 9) - self.dilations = cfg["cqtd_dilations"] - self.stride = (1, 2) - - self.in_channels = cfg["cqtd_in_channels"] - self.out_channels = cfg["cqtd_out_channels"] - self.fs = cfg["sampling_rate"] - self.hop_length = hop_length - self.n_octaves = n_octaves - self.bins_per_octave = bins_per_octave - - # Lazy-load - from nnAudio import features - - self.cqt_transform = features.cqt.CQT2010v2( - sr=self.fs * 2, - hop_length=self.hop_length, - n_bins=self.bins_per_octave * self.n_octaves, - bins_per_octave=self.bins_per_octave, - output_format="Complex", - pad_mode="constant", - ) - - self.conv_pres = nn.ModuleList() - for _ in range(self.n_octaves): - self.conv_pres.append( - nn.Conv2d( - self.in_channels * 2, - self.in_channels * 2, - kernel_size=self.kernel_size, - padding=self.get_2d_padding(self.kernel_size), - ) - ) - - self.convs = nn.ModuleList() - - self.convs.append( - nn.Conv2d( - self.in_channels * 2, - self.filters, - kernel_size=self.kernel_size, - padding=self.get_2d_padding(self.kernel_size), - ) - ) - - in_chs = min(self.filters_scale * self.filters, self.max_filters) - for i, dilation in enumerate(self.dilations): - out_chs = min((self.filters_scale ** (i + 1)) * self.filters, self.max_filters) - self.convs.append( - weight_norm( - nn.Conv2d( - in_chs, - out_chs, - kernel_size=self.kernel_size, - stride=self.stride, - dilation=(dilation, 1), - padding=self.get_2d_padding(self.kernel_size, (dilation, 1)), - ) - ) - ) - in_chs = out_chs - out_chs = min( - (self.filters_scale ** (len(self.dilations) + 1)) * self.filters, - self.max_filters, - ) - self.convs.append( - weight_norm( - nn.Conv2d( - in_chs, - out_chs, - kernel_size=(self.kernel_size[0], self.kernel_size[0]), - padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), - ) - ) - ) - - self.conv_post = weight_norm( - nn.Conv2d( - out_chs, - self.out_channels, - kernel_size=(self.kernel_size[0], self.kernel_size[0]), - padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), - ) - ) - - self.activation = torch.nn.LeakyReLU(negative_slope=0.1) - self.resample = Resample(orig_freq=self.fs, new_freq=self.fs * 2) - - self.cqtd_normalize_volume = self.cfg.get("cqtd_normalize_volume", False) - if self.cqtd_normalize_volume: - print( - "[INFO] cqtd_normalize_volume set to True. Will apply DC offset removal & peak volume normalization in CQTD!" - ) - - def get_2d_padding( - self, - kernel_size: typing.Tuple[int, int], - dilation: typing.Tuple[int, int] = (1, 1), - ): - return ( - ((kernel_size[0] - 1) * dilation[0]) // 2, - ((kernel_size[1] - 1) * dilation[1]) // 2, - ) - - def forward(self, x: torch.tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - fmap = [] - - if self.cqtd_normalize_volume: - # Remove DC offset - x = x - x.mean(dim=-1, keepdims=True) - # Peak normalize the volume of input audio - x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) - - x = self.resample(x) - - z = self.cqt_transform(x) - - z_amplitude = z[:, :, :, 0].unsqueeze(1) - z_phase = z[:, :, :, 1].unsqueeze(1) - - z = torch.cat([z_amplitude, z_phase], dim=1) - z = torch.permute(z, (0, 1, 3, 2)) # [B, C, W, T] -> [B, C, T, W] - - latent_z = [] - for i in range(self.n_octaves): - latent_z.append( - self.conv_pres[i]( - z[ - :, - :, - :, - i * self.bins_per_octave : (i + 1) * self.bins_per_octave, - ] - ) - ) - latent_z = torch.cat(latent_z, dim=-1) - - for i, l in enumerate(self.convs): - latent_z = l(latent_z) - - latent_z = self.activation(latent_z) - fmap.append(latent_z) - - latent_z = self.conv_post(latent_z) - - return latent_z, fmap - - -class MultiScaleSubbandCQTDiscriminator(nn.Module): - def __init__(self, cfg: AttrDict): - super().__init__() - - self.cfg = cfg - # Using get with defaults - self.cfg["cqtd_filters"] = self.cfg.get("cqtd_filters", 32) - self.cfg["cqtd_max_filters"] = self.cfg.get("cqtd_max_filters", 1024) - self.cfg["cqtd_filters_scale"] = self.cfg.get("cqtd_filters_scale", 1) - self.cfg["cqtd_dilations"] = self.cfg.get("cqtd_dilations", [1, 2, 4]) - self.cfg["cqtd_in_channels"] = self.cfg.get("cqtd_in_channels", 1) - self.cfg["cqtd_out_channels"] = self.cfg.get("cqtd_out_channels", 1) - # Multi-scale params to loop over - self.cfg["cqtd_hop_lengths"] = self.cfg.get("cqtd_hop_lengths", [512, 256, 256]) - self.cfg["cqtd_n_octaves"] = self.cfg.get("cqtd_n_octaves", [9, 9, 9]) - self.cfg["cqtd_bins_per_octaves"] = self.cfg.get("cqtd_bins_per_octaves", [24, 36, 48]) - - self.discriminators = nn.ModuleList( - [ - DiscriminatorCQT( - self.cfg, - hop_length=self.cfg["cqtd_hop_lengths"][i], - n_octaves=self.cfg["cqtd_n_octaves"][i], - bins_per_octave=self.cfg["cqtd_bins_per_octaves"][i], - ) - for i in range(len(self.cfg["cqtd_hop_lengths"])) - ] - ) - - def forward( - self, y: torch.Tensor, y_hat: torch.Tensor - ) -> Tuple[ - List[torch.Tensor], - List[torch.Tensor], - List[List[torch.Tensor]], - List[List[torch.Tensor]], - ]: - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - - for disc in self.discriminators: - y_d_r, fmap_r = disc(y) - y_d_g, fmap_g = disc(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class CombinedDiscriminator(nn.Module): - """ - Wrapper of chaining multiple discrimiantor architectures. - Example: combine mbd and cqtd as a single class - """ - - def __init__(self, list_discriminator: List[nn.Module]): - super().__init__() - self.discrimiantor = nn.ModuleList(list_discriminator) - - def forward( - self, y: torch.Tensor, y_hat: torch.Tensor - ) -> Tuple[ - List[torch.Tensor], - List[torch.Tensor], - List[List[torch.Tensor]], - List[List[torch.Tensor]], - ]: - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - - for disc in self.discrimiantor: - y_d_r, y_d_g, fmap_r, fmap_g = disc(y, y_hat) - y_d_rs.extend(y_d_r) - fmap_rs.extend(fmap_r) - y_d_gs.extend(y_d_g) - fmap_gs.extend(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs diff --git a/GPT_SoVITS/BigVGAN/inference.py b/GPT_SoVITS/BigVGAN/inference.py deleted file mode 100644 index 5f892a3c..00000000 --- a/GPT_SoVITS/BigVGAN/inference.py +++ /dev/null @@ -1,85 +0,0 @@ -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import os -import argparse -import json -import torch -import librosa -from utils import load_checkpoint -from meldataset import get_mel_spectrogram -from scipy.io.wavfile import write -from env import AttrDict -from meldataset import MAX_WAV_VALUE -from bigvgan import BigVGAN as Generator - -h = None -device = None -torch.backends.cudnn.benchmark = False - - -def inference(a, h): - generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) - - state_dict_g = load_checkpoint(a.checkpoint_file, device) - generator.load_state_dict(state_dict_g["generator"]) - - filelist = os.listdir(a.input_wavs_dir) - - os.makedirs(a.output_dir, exist_ok=True) - - generator.eval() - generator.remove_weight_norm() - with torch.no_grad(): - for i, filname in enumerate(filelist): - # Load the ground truth audio and resample if necessary - wav, sr = librosa.load(os.path.join(a.input_wavs_dir, filname), sr=h.sampling_rate, mono=True) - wav = torch.FloatTensor(wav).to(device) - # Compute mel spectrogram from the ground truth audio - x = get_mel_spectrogram(wav.unsqueeze(0), generator.h) - - y_g_hat = generator(x) - - audio = y_g_hat.squeeze() - audio = audio * MAX_WAV_VALUE - audio = audio.cpu().numpy().astype("int16") - - output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated.wav") - write(output_file, h.sampling_rate, audio) - print(output_file) - - -def main(): - print("Initializing Inference Process..") - - parser = argparse.ArgumentParser() - parser.add_argument("--input_wavs_dir", default="test_files") - parser.add_argument("--output_dir", default="generated_files") - parser.add_argument("--checkpoint_file", required=True) - parser.add_argument("--use_cuda_kernel", action="store_true", default=False) - - a = parser.parse_args() - - config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - torch.manual_seed(h.seed) - global device - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - device = torch.device("cuda") - else: - device = torch.device("cpu") - - inference(a, h) - - -if __name__ == "__main__": - main() diff --git a/GPT_SoVITS/BigVGAN/inference_e2e.py b/GPT_SoVITS/BigVGAN/inference_e2e.py deleted file mode 100644 index 9c0df774..00000000 --- a/GPT_SoVITS/BigVGAN/inference_e2e.py +++ /dev/null @@ -1,100 +0,0 @@ -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import glob -import os -import numpy as np -import argparse -import json -import torch -from scipy.io.wavfile import write -from env import AttrDict -from meldataset import MAX_WAV_VALUE -from bigvgan import BigVGAN as Generator - -h = None -device = None -torch.backends.cudnn.benchmark = False - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print(f"Loading '{filepath}'") - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + "*") - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return "" - return sorted(cp_list)[-1] - - -def inference(a, h): - generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) - - state_dict_g = load_checkpoint(a.checkpoint_file, device) - generator.load_state_dict(state_dict_g["generator"]) - - filelist = os.listdir(a.input_mels_dir) - - os.makedirs(a.output_dir, exist_ok=True) - - generator.eval() - generator.remove_weight_norm() - with torch.no_grad(): - for i, filname in enumerate(filelist): - # Load the mel spectrogram in .npy format - x = np.load(os.path.join(a.input_mels_dir, filname)) - x = torch.FloatTensor(x).to(device) - if len(x.shape) == 2: - x = x.unsqueeze(0) - - y_g_hat = generator(x) - - audio = y_g_hat.squeeze() - audio = audio * MAX_WAV_VALUE - audio = audio.cpu().numpy().astype("int16") - - output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated_e2e.wav") - write(output_file, h.sampling_rate, audio) - print(output_file) - - -def main(): - print("Initializing Inference Process..") - - parser = argparse.ArgumentParser() - parser.add_argument("--input_mels_dir", default="test_mel_files") - parser.add_argument("--output_dir", default="generated_files_from_mel") - parser.add_argument("--checkpoint_file", required=True) - parser.add_argument("--use_cuda_kernel", action="store_true", default=False) - - a = parser.parse_args() - - config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - torch.manual_seed(h.seed) - global device - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - device = torch.device("cuda") - else: - device = torch.device("cpu") - - inference(a, h) - - -if __name__ == "__main__": - main() diff --git a/GPT_SoVITS/BigVGAN/loss.py b/GPT_SoVITS/BigVGAN/loss.py deleted file mode 100644 index c295a144..00000000 --- a/GPT_SoVITS/BigVGAN/loss.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - - -import torch -import torch.nn as nn -from librosa.filters import mel as librosa_mel_fn -from scipy import signal - -import typing -from typing import List, Tuple -from collections import namedtuple -import math -import functools - - -# Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license. -# LICENSE is in incl_licenses directory. -class MultiScaleMelSpectrogramLoss(nn.Module): - """Compute distance between mel spectrograms. Can be used - in a multi-scale way. - - Parameters - ---------- - n_mels : List[int] - Number of mels per STFT, by default [5, 10, 20, 40, 80, 160, 320], - window_lengths : List[int], optional - Length of each window of each STFT, by default [32, 64, 128, 256, 512, 1024, 2048] - loss_fn : typing.Callable, optional - How to compare each loss, by default nn.L1Loss() - clamp_eps : float, optional - Clamp on the log magnitude, below, by default 1e-5 - mag_weight : float, optional - Weight of raw magnitude portion of loss, by default 0.0 (no ampliciation on mag part) - log_weight : float, optional - Weight of log magnitude portion of loss, by default 1.0 - pow : float, optional - Power to raise magnitude to before taking log, by default 1.0 - weight : float, optional - Weight of this loss, by default 1.0 - match_stride : bool, optional - Whether to match the stride of convolutional layers, by default False - - Implementation copied from: https://github.com/descriptinc/lyrebird-audiotools/blob/961786aa1a9d628cca0c0486e5885a457fe70c1a/audiotools/metrics/spectral.py - Additional code copied and modified from https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py - """ - - def __init__( - self, - sampling_rate: int, - n_mels: List[int] = [5, 10, 20, 40, 80, 160, 320], - window_lengths: List[int] = [32, 64, 128, 256, 512, 1024, 2048], - loss_fn: typing.Callable = nn.L1Loss(), - clamp_eps: float = 1e-5, - mag_weight: float = 0.0, - log_weight: float = 1.0, - pow: float = 1.0, - weight: float = 1.0, - match_stride: bool = False, - mel_fmin: List[float] = [0, 0, 0, 0, 0, 0, 0], - mel_fmax: List[float] = [None, None, None, None, None, None, None], - window_type: str = "hann", - ): - super().__init__() - self.sampling_rate = sampling_rate - - STFTParams = namedtuple( - "STFTParams", - ["window_length", "hop_length", "window_type", "match_stride"], - ) - - self.stft_params = [ - STFTParams( - window_length=w, - hop_length=w // 4, - match_stride=match_stride, - window_type=window_type, - ) - for w in window_lengths - ] - self.n_mels = n_mels - self.loss_fn = loss_fn - self.clamp_eps = clamp_eps - self.log_weight = log_weight - self.mag_weight = mag_weight - self.weight = weight - self.mel_fmin = mel_fmin - self.mel_fmax = mel_fmax - self.pow = pow - - @staticmethod - @functools.lru_cache(None) - def get_window( - window_type, - window_length, - ): - return signal.get_window(window_type, window_length) - - @staticmethod - @functools.lru_cache(None) - def get_mel_filters(sr, n_fft, n_mels, fmin, fmax): - return librosa_mel_fn(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - - def mel_spectrogram( - self, - wav, - n_mels, - fmin, - fmax, - window_length, - hop_length, - match_stride, - window_type, - ): - """ - Mirrors AudioSignal.mel_spectrogram used by BigVGAN-v2 training from: - https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py - """ - B, C, T = wav.shape - - if match_stride: - assert hop_length == window_length // 4, "For match_stride, hop must equal n_fft // 4" - right_pad = math.ceil(T / hop_length) * hop_length - T - pad = (window_length - hop_length) // 2 - else: - right_pad = 0 - pad = 0 - - wav = torch.nn.functional.pad(wav, (pad, pad + right_pad), mode="reflect") - - window = self.get_window(window_type, window_length) - window = torch.from_numpy(window).to(wav.device).float() - - stft = torch.stft( - wav.reshape(-1, T), - n_fft=window_length, - hop_length=hop_length, - window=window, - return_complex=True, - center=True, - ) - _, nf, nt = stft.shape - stft = stft.reshape(B, C, nf, nt) - if match_stride: - """ - Drop first two and last two frames, which are added, because of padding. Now num_frames * hop_length = num_samples. - """ - stft = stft[..., 2:-2] - magnitude = torch.abs(stft) - - nf = magnitude.shape[2] - mel_basis = self.get_mel_filters(self.sampling_rate, 2 * (nf - 1), n_mels, fmin, fmax) - mel_basis = torch.from_numpy(mel_basis).to(wav.device) - mel_spectrogram = magnitude.transpose(2, -1) @ mel_basis.T - mel_spectrogram = mel_spectrogram.transpose(-1, 2) - - return mel_spectrogram - - def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - """Computes mel loss between an estimate and a reference - signal. - - Parameters - ---------- - x : torch.Tensor - Estimate signal - y : torch.Tensor - Reference signal - - Returns - ------- - torch.Tensor - Mel loss. - """ - - loss = 0.0 - for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params): - kwargs = { - "n_mels": n_mels, - "fmin": fmin, - "fmax": fmax, - "window_length": s.window_length, - "hop_length": s.hop_length, - "match_stride": s.match_stride, - "window_type": s.window_type, - } - - x_mels = self.mel_spectrogram(x, **kwargs) - y_mels = self.mel_spectrogram(y, **kwargs) - x_logmels = torch.log(x_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) - y_logmels = torch.log(y_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) - - loss += self.log_weight * self.loss_fn(x_logmels, y_logmels) - loss += self.mag_weight * self.loss_fn(x_logmels, y_logmels) - - return loss - - -# Loss functions -def feature_loss(fmap_r: List[List[torch.Tensor]], fmap_g: List[List[torch.Tensor]]) -> torch.Tensor: - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 # This equates to lambda=2.0 for the feature matching loss - - -def discriminator_loss( - disc_real_outputs: List[torch.Tensor], disc_generated_outputs: List[torch.Tensor] -) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]: - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg**2) - loss += r_loss + g_loss - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss( - disc_outputs: List[torch.Tensor], -) -> Tuple[torch.Tensor, List[torch.Tensor]]: - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/GPT_SoVITS/BigVGAN/meldataset.py b/GPT_SoVITS/BigVGAN/meldataset.py deleted file mode 100644 index dc12c987..00000000 --- a/GPT_SoVITS/BigVGAN/meldataset.py +++ /dev/null @@ -1,370 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.filters import mel as librosa_mel_fn -import pathlib -from tqdm import tqdm -from typing import List, Tuple, Optional -from .env import AttrDict - -MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases) - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - return dynamic_range_compression_torch(magnitudes) - - -def spectral_de_normalize_torch(magnitudes): - return dynamic_range_decompression_torch(magnitudes) - - -mel_basis_cache = {} -hann_window_cache = {} - - -def mel_spectrogram( - y: torch.Tensor, - n_fft: int, - num_mels: int, - sampling_rate: int, - hop_size: int, - win_size: int, - fmin: int, - fmax: int = None, - center: bool = False, -) -> torch.Tensor: - """ - Calculate the mel spectrogram of an input signal. - This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft). - - Args: - y (torch.Tensor): Input signal. - n_fft (int): FFT size. - num_mels (int): Number of mel bins. - sampling_rate (int): Sampling rate of the input signal. - hop_size (int): Hop size for STFT. - win_size (int): Window size for STFT. - fmin (int): Minimum frequency for mel filterbank. - fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn - center (bool): Whether to pad the input to center the frames. Default is False. - - Returns: - torch.Tensor: Mel spectrogram. - """ - if torch.min(y) < -1.0: - print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}") - if torch.max(y) > 1.0: - print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}") - - device = y.device - key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}" - - if key not in mel_basis_cache: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis_cache[key] = torch.from_numpy(mel).float().to(device) - hann_window_cache[key] = torch.hann_window(win_size).to(device) - - mel_basis = mel_basis_cache[key] - hann_window = hann_window_cache[key] - - padding = (n_fft - hop_size) // 2 - y = torch.nn.functional.pad(y.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window, - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=True, - ) - spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9) - - mel_spec = torch.matmul(mel_basis, spec) - mel_spec = spectral_normalize_torch(mel_spec) - - return mel_spec - - -def get_mel_spectrogram(wav, h): - """ - Generate mel spectrogram from a waveform using given hyperparameters. - - Args: - wav (torch.Tensor): Input waveform. - h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax. - - Returns: - torch.Tensor: Mel spectrogram. - """ - return mel_spectrogram( - wav, - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax, - ) - - -def get_dataset_filelist(a): - training_files = [] - validation_files = [] - list_unseen_validation_files = [] - - with open(a.input_training_file, "r", encoding="utf-8") as fi: - training_files = [ - os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 - ] - print(f"first training file: {training_files[0]}") - - with open(a.input_validation_file, "r", encoding="utf-8") as fi: - validation_files = [ - os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 - ] - print(f"first validation file: {validation_files[0]}") - - for i in range(len(a.list_input_unseen_validation_file)): - with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi: - unseen_validation_files = [ - os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav") - for x in fi.read().split("\n") - if len(x) > 0 - ] - print(f"first unseen {i}th validation fileset: {unseen_validation_files[0]}") - list_unseen_validation_files.append(unseen_validation_files) - - return training_files, validation_files, list_unseen_validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__( - self, - training_files: List[str], - hparams: AttrDict, - segment_size: int, - n_fft: int, - num_mels: int, - hop_size: int, - win_size: int, - sampling_rate: int, - fmin: int, - fmax: Optional[int], - split: bool = True, - shuffle: bool = True, - device: str = None, - fmax_loss: Optional[int] = None, - fine_tuning: bool = False, - base_mels_path: str = None, - is_seen: bool = True, - ): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.hparams = hparams - self.is_seen = is_seen - if self.is_seen: - self.name = pathlib.Path(self.audio_files[0]).parts[0] - else: - self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/") - - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - print("[INFO] checking dataset integrity...") - for i in tqdm(range(len(self.audio_files))): - assert os.path.exists(self.audio_files[i]), f"{self.audio_files[i]} not found" - - def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, str, torch.Tensor]: - try: - filename = self.audio_files[index] - - # Use librosa.load that ensures loading waveform into mono with [-1, 1] float values - # Audio is ndarray with shape [T_time]. Disable auto-resampling here to minimize overhead - # The on-the-fly resampling during training will be done only for the obtained random chunk - audio, source_sampling_rate = librosa.load(filename, sr=None, mono=True) - - # Main logic that uses pair for training BigVGAN - if not self.fine_tuning: - if self.split: # Training step - # Obtain randomized audio chunk - if source_sampling_rate != self.sampling_rate: - # Adjust segment size to crop if the source sr is different - target_segment_size = math.ceil(self.segment_size * (source_sampling_rate / self.sampling_rate)) - else: - target_segment_size = self.segment_size - - # Compute upper bound index for the random chunk - random_chunk_upper_bound = max(0, audio.shape[0] - target_segment_size) - - # Crop or pad audio to obtain random chunk with target_segment_size - if audio.shape[0] >= target_segment_size: - audio_start = random.randint(0, random_chunk_upper_bound) - audio = audio[audio_start : audio_start + target_segment_size] - else: - audio = np.pad( - audio, - (0, target_segment_size - audio.shape[0]), - mode="constant", - ) - - # Resample audio chunk to self.sampling rate - if source_sampling_rate != self.sampling_rate: - audio = librosa.resample( - audio, - orig_sr=source_sampling_rate, - target_sr=self.sampling_rate, - ) - if audio.shape[0] > self.segment_size: - # trim last elements to match self.segment_size (e.g., 16385 for 44khz downsampled to 24khz -> 16384) - audio = audio[: self.segment_size] - - else: # Validation step - # Resample full audio clip to target sampling rate - if source_sampling_rate != self.sampling_rate: - audio = librosa.resample( - audio, - orig_sr=source_sampling_rate, - target_sr=self.sampling_rate, - ) - # Trim last elements to match audio length to self.hop_size * n for evaluation - if (audio.shape[0] % self.hop_size) != 0: - audio = audio[: -(audio.shape[0] % self.hop_size)] - - # BigVGAN is trained using volume-normalized waveform - audio = librosa.util.normalize(audio) * 0.95 - - # Cast ndarray to torch tensor - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) # [B(1), self.segment_size] - - # Compute mel spectrogram corresponding to audio - mel = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax, - center=False, - ) # [B(1), self.num_mels, self.segment_size // self.hop_size] - - # Fine-tuning logic that uses pre-computed mel. Example: Using TTS model-generated mel as input - else: - # For fine-tuning, assert that the waveform is in the defined sampling_rate - # Fine-tuning won't support on-the-fly resampling to be fool-proof (the dataset should have been prepared properly) - assert source_sampling_rate == self.sampling_rate, ( - f"For fine_tuning, waveform must be in the spcified sampling rate {self.sampling_rate}, got {source_sampling_rate}" - ) - - # Cast ndarray to torch tensor - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) # [B(1), T_time] - - # Load pre-computed mel from disk - mel = np.load( - os.path.join( - self.base_mels_path, - os.path.splitext(os.path.split(filename)[-1])[0] + ".npy", - ) - ) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) # ensure [B, C, T] - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start : mel_start + frames_per_seg] - audio = audio[ - :, - mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size, - ] - - # Pad pre-computed mel and audio to match length to ensuring fine-tuning without error. - # NOTE: this may introduce a single-frame misalignment of the - # To remove possible misalignment, it is recommended to prepare the pair where the audio length is the integer multiple of self.hop_size - mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant") - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant") - - # Compute mel_loss used by spectral regression objective. Uses self.fmax_loss instead (usually None) - mel_loss = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax_loss, - center=False, - ) # [B(1), self.num_mels, self.segment_size // self.hop_size] - - # Shape sanity checks - assert ( - audio.shape[1] == mel.shape[2] * self.hop_size and audio.shape[1] == mel_loss.shape[2] * self.hop_size - ), ( - f"Audio length must be mel frame length * hop_size. Got audio shape {audio.shape} mel shape {mel.shape} mel_loss shape {mel_loss.shape}" - ) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - # If it encounters error during loading the data, skip this sample and load random other sample to the batch - except Exception as e: - if self.fine_tuning: - raise e # Terminate training if it is fine-tuning. The dataset should have been prepared properly. - else: - print(f"[WARNING] Failed to load waveform, skipping! filename: {filename} Error: {e}") - return self[random.randrange(len(self))] - - def __len__(self): - return len(self.audio_files) diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep b/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep deleted file mode 100644 index 8b137891..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep +++ /dev/null @@ -1 +0,0 @@ - diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md deleted file mode 100644 index 4b388c28..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md +++ /dev/null @@ -1,4 +0,0 @@ -| Field | Response | -| :--------------------------------------------------------------------------------------------------------- | :--------------------------------------------------- | -| Participation considerations from adversely impacted groups protected classes in model design and testing: | None | -| Measures taken to mitigate against unwanted bias: | No measures taken to mitigate against unwanted bias. | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md deleted file mode 100644 index 6f1a1667..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md +++ /dev/null @@ -1,13 +0,0 @@ -| Field | Response | -| :---------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Intended Application & Domain: | Generating waveform from mel spectrogram. | -| Model Type: | Convolutional Neural Network (CNN) | -| Intended Users: | This model is intended for developers to synthesize and generate waveforms from the AI-generated mel spectrograms. | -| Output: | Audio Waveform | -| Describe how the model works: | Model generates audio waveform corresponding to the input mel spectrogram. | -| Name the adversely impacted groups this has been tested to deliver comparable outcomes regardless of: | Not Applicable | -| Technical Limitations: | This may not perform well on synthetically-generated mel spectrograms that deviate significantly from the profile of mel spectrograms on which this was trained. | -| Verified to have met prescribed NVIDIA quality standards: | Yes | -| Performance Metrics: | Perceptual Evaluation of Speech Quality (PESQ), Virtual Speech Quality Objective Listener (VISQOL), Multi-resolution STFT (MRSTFT), Mel cepstral distortion (MCD), Periodicity RMSE, Voice/Unvoiced F1 Score (V/UV F1) | -| Potential Known Risks: | This model may generate low-quality or distorted soundwaves. | -| Licensing: | https://github.com/NVIDIA/BigVGAN/blob/main/LICENSE | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md deleted file mode 100644 index a39cba0b..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md +++ /dev/null @@ -1,126 +0,0 @@ -# Model Overview - -## Description: - -BigVGAN is a generative AI model specialized in synthesizing audio waveforms using Mel spectrogram as inputs. - -
- -BigVGAN is a fully convolutional architecture with several upsampling blocks using transposed convolution followed by multiple residual dilated convolution layers. - -BigVGAN consists of a novel module, called anti-aliased multi-periodicity composition (AMP), which is specifically designed for generating waveforms. AMP is specialized in synthesizing high-frequency and periodic soundwaves drawing inspiration from audio signal processing principles. - -It applies a periodic activation function, called Snake, which provides an inductive bias to the architecture in generating periodic soundwaves. It also applies anti-aliasing filters to reduce undesired artifacts in the generated waveforms.
- -This model is ready for commercial use.
- -## References(s): - -- [BigVGAN: A Universal Neural Vocoder with Large-Scale Training](https://arxiv.org/abs/2206.04658)
-- [Project Page](https://research.nvidia.com/labs/adlr/projects/bigvgan/)
-- [Audio Demo](https://bigvgan-demo.github.io/)
- -## Model Architecture: - -**Architecture Type:** Convolution Neural Network (CNN)
-**Network Architecture:** You can see the details of this model on this link: https://github.com/NVIDIA/BigVGAN and the related paper can be found here: https://arxiv.org/abs/2206.04658
-**Model Version:** 2.0
- -## Input: - -**Input Type:** Audio
-**Input Format:** Mel Spectrogram
-**Input Parameters:** None
-**Other Properties Related to Input:** The input mel spectrogram has shape `[batch, channels, frames]`, where `channels` refers to the number of mel bands defined by the model and `frames` refers to the temporal length. The model supports arbitrary long `frames` that fits into the GPU memory. - -## Output: - -**Input Type:** Audio
-**Output Format:** Audio Waveform
-**Output Parameters:** None
-**Other Properties Related to Output:** The output audio waveform has shape `[batch, 1, time]`, where `1` refers to the mono audio channels and `time` refers to the temporal length. `time` is defined as a fixed integer multiple of input `frames`, which is an upsampling ratio of the model (`time = upsampling ratio * frames`). The output audio waveform consitutes float values with a range of `[-1, 1]`. - -## Software Integration: - -**Runtime Engine(s):** PyTorch - -**Supported Hardware Microarchitecture Compatibility:** NVIDIA Ampere, NVIDIA Hopper, NVIDIA Lovelace, NVIDIA Turing, NVIDIA Volta
- -## Preferred/Supported Operating System(s): - -Linux - -## Model Version(s): - -v2.0 - -## Training, Testing, and Evaluation Datasets: - -### Training Dataset: - -The dataset contains diverse audio types, including speech in multiple languages, environmental sounds, and instruments. - -**Links:** - -- [AAM: Artificial Audio Multitracks Dataset](https://zenodo.org/records/5794629) -- [AudioCaps](https://audiocaps.github.io/) -- [AudioSet](https://research.google.com/audioset/index.html) -- [common-accent](https://huggingface.co/datasets/DTU54DL/common-accent) -- [Crowd Sourced Emotional Multimodal Actors Dataset (CREMA-D)](https://ieeexplore.ieee.org/document/6849440) -- [DCASE2017 Challenge, Task 4: Large-scale weakly supervised sound event detection for smart cars](https://dcase.community/challenge2017/task-large-scale-sound-event-detection) -- [FSDnoisy18k](https://zenodo.org/records/2529934) -- [Free Universal Sound Separation Dataset](https://zenodo.org/records/3694384) -- [Greatest Hits dataset](https://andrewowens.com/vis/) -- [GTZAN](https://ieeexplore.ieee.org/document/1021072) -- [JL corpus](https://www.kaggle.com/datasets/tli725/jl-corpus) -- [Medley-solos-DB: a cross-collection dataset for musical instrument recognition](https://zenodo.org/records/3464194) -- [MUSAN: A Music, Speech, and Noise Corpus](https://www.openslr.org/17/) -- [MusicBench](https://huggingface.co/datasets/amaai-lab/MusicBench) -- [MusicCaps](https://www.kaggle.com/datasets/googleai/musiccaps) -- [MusicNet](https://www.kaggle.com/datasets/imsparsh/musicnet-dataset) -- [NSynth](https://magenta.tensorflow.org/datasets/nsynth) -- [OnAir-Music-Dataset](https://github.com/sevagh/OnAir-Music-Dataset) -- [Audio Piano Triads Dataset](https://zenodo.org/records/4740877) -- [Pitch Audio Dataset (Surge synthesizer)](https://zenodo.org/records/4677097) -- [SONYC Urban Sound Tagging (SONYC-UST): a multilabel dataset from an urban acoustic sensor network](https://zenodo.org/records/3966543) -- [VocalSound: A Dataset for Improving Human Vocal Sounds Recognition](https://arxiv.org/abs/2205.03433) -- [WavText5K](https://github.com/microsoft/WavText5K) -- [CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages](https://github.com/Kyubyong/css10) -- [Hi-Fi Multi-Speaker English TTS Dataset (Hi-Fi TTS)](https://www.openslr.org/109/) -- [IIIT-H Indic Speech Databases](http://festvox.org/databases/iiit_voices/) -- [Libri-Light: A Benchmark for ASR with Limited or No Supervision](https://arxiv.org/abs/1912.07875) -- [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://www.openslr.org/60) -- [LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus](https://www.openslr.org/141/) -- [The SIWIS French Speech Synthesis Database](https://datashare.ed.ac.uk/handle/10283/2353) -- [Crowdsourced high-quality Colombian Spanish speech data set](https://openslr.org/72/) -- [TTS-Portuguese Corpus](https://github.com/Edresson/TTS-Portuguese-Corpus) -- [CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit](https://datashare.ed.ac.uk/handle/10283/3443) - -\*\* Data Collection Method by dataset
- -- Human
- -\*\* Labeling Method by dataset (for those with labels)
- -- Hybrid: Automated, Human, Unknown
- -### Evaluating Dataset: - -Properties: The audio generation quality of BigVGAN is evaluated using `dev` splits of the [LibriTTS dataset](https://www.openslr.org/60/) and [Hi-Fi TTS dataset](https://www.openslr.org/109/). The datasets include speech in English language with equal balance of genders. - -\*\* Data Collection Method by dataset
- -- Human
- -\*\* Labeling Method by dataset
- -- Automated
- -## Inference: - -**Engine:** PyTorch
-**Test Hardware:** NVIDIA A100 GPU
- -## Ethical Considerations: - -NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the Model Card++ Explainability, Bias, Safety & Security, and Privacy Subcards. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/). diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md deleted file mode 100644 index 73554a99..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md +++ /dev/null @@ -1,14 +0,0 @@ -| Field | Response | -| :------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------- | -| Generatable or reverse engineerable personal information? | None | -| Protected class data used to create this model? | None | -| Was consent obtained for any personal data used? | Not Applicable (No Personal Data) | -| How often is dataset reviewed? | Before Release | -| Is a mechanism in place to honor data subject right of access or deletion of personal data? | Not Applicable | -| If personal collected for the development of the model, was it collected directly by NVIDIA? | Not Applicable | -| If personal collected for the development of the model by NVIDIA, do you maintain or have access to disclosures made to data subjects? | Not Applicable | -| If personal collected for the development of this AI model, was it minimized to only what was required? | Not Applicable | -| Is data in dataset traceable? | Yes | -| Is there provenance for all datasets used in training? | Yes | -| Does data labeling (annotation, metadata) comply with privacy laws? | Yes | -| Is data compliant with data subject requests for data correction or removal, if such a request was made? | No, not possible with externally-sourced data. | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md deleted file mode 100644 index ed30370d..00000000 --- a/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md +++ /dev/null @@ -1,6 +0,0 @@ -| Field | Response | -| :---------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Model Application(s): | Synethic Audio Generation | -| Describe the life critical impact (if present). | Not Applicable | -| Use Case Restrictions: | None | -| Model and dataset restrictions: | The Principle of least privilege (PoLP) is applied limiting access for dataset generation and model development. Restrictions enforce dataset access during training, and dataset license constraints adhered to. | diff --git a/GPT_SoVITS/BigVGAN/requirements.txt b/GPT_SoVITS/BigVGAN/requirements.txt deleted file mode 100644 index 6e61d320..00000000 --- a/GPT_SoVITS/BigVGAN/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -torch -numpy -librosa>=0.8.1 -scipy -tensorboard -soundfile -matplotlib -pesq -auraloss -tqdm -nnAudio -ninja -huggingface_hub>=0.23.4 \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/tests/test_activation.py b/GPT_SoVITS/BigVGAN/tests/test_activation.py deleted file mode 100644 index 41348835..00000000 --- a/GPT_SoVITS/BigVGAN/tests/test_activation.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -import os -import sys - -# to import modules from parent_dir -parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(parent_dir) - -import torch -from alias_free_activation.cuda import activation1d -from activations import Snake - - -def test_load_fused_kernels(): - try: - print("[Success] load_fused_kernels") - except ImportError as e: - print("[Fail] load_fused_kernels") - raise e - - -def test_anti_alias_activation(): - data = torch.rand((10, 10, 200), device="cuda") - - # Check activations.Snake cuda vs. torch - fused_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=True).cuda() - fused_activation_output = fused_anti_alias_activation(data) - - torch_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=False).cuda() - torch_activation_output = torch_anti_alias_activation(data) - - test_result = (fused_activation_output - torch_activation_output).abs() - - while test_result.dim() != 1: - test_result = test_result.mean(dim=-1) - - diff = test_result.mean(dim=-1) - - if diff <= 1e-3: - print( - f"\n[Success] test_fused_anti_alias_activation" - f"\n > mean_difference={diff}" - f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" - f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" - ) - else: - print( - f"\n[Fail] test_fused_anti_alias_activation" - f"\n > mean_difference={diff}, " - f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " - f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" - ) - - -if __name__ == "__main__": - from alias_free_activation.cuda import load - - load.load() - test_load_fused_kernels() - test_anti_alias_activation() diff --git a/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py b/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py deleted file mode 100644 index 4cc46b98..00000000 --- a/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -import os -import sys - -# to import modules from parent_dir -parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(parent_dir) - -import torch -from alias_free_activation.cuda import activation1d -from activations import SnakeBeta - - -def test_load_fused_kernels(): - try: - print("[Success] load_fused_kernels") - except ImportError as e: - print("[Fail] load_fused_kernels") - raise e - - -def test_anti_alias_activation(): - data = torch.rand((10, 10, 200), device="cuda") - - # Check activations, Snake CUDA vs. Torch - fused_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=True).cuda() - fused_activation_output = fused_anti_alias_activation(data) - - torch_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=False).cuda() - torch_activation_output = torch_anti_alias_activation(data) - - test_result = (fused_activation_output - torch_activation_output).abs() - - while test_result.dim() != 1: - test_result = test_result.mean(dim=-1) - - diff = test_result.mean(dim=-1) - - if diff <= 1e-3: - print( - f"\n[Success] test_fused_anti_alias_activation" - f"\n > mean_difference={diff}" - f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" - f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" - ) - else: - print( - f"\n[Fail] test_fused_anti_alias_activation" - f"\n > mean_difference={diff}, " - f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " - f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" - ) - - -if __name__ == "__main__": - from alias_free_activation.cuda import load - - load.load() - test_load_fused_kernels() - test_anti_alias_activation() diff --git a/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py b/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py deleted file mode 100644 index 8ddb29e5..00000000 --- a/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -import os -import sys - -# to import modules from parent_dir -parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.append(parent_dir) - -import torch -import json -from env import AttrDict -from bigvgan import BigVGAN -from time import time -from tqdm import tqdm -from meldataset import mel_spectrogram, MAX_WAV_VALUE -from scipy.io.wavfile import write -import numpy as np - -import argparse - -torch.backends.cudnn.benchmark = True - -# For easier debugging -torch.set_printoptions(linewidth=200, threshold=10_000) - - -def generate_soundwave(duration=5.0, sr=24000): - t = np.linspace(0, duration, int(sr * duration), False, dtype=np.float32) - - modulation = np.sin(2 * np.pi * t / duration) - - min_freq = 220 - max_freq = 1760 - frequencies = min_freq + (max_freq - min_freq) * (modulation + 1) / 2 - soundwave = np.sin(2 * np.pi * frequencies * t) - - soundwave = soundwave / np.max(np.abs(soundwave)) * 0.95 - - return soundwave, sr - - -def get_mel(x, h): - return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print(f"Loading '{filepath}'") - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Test script to check CUDA kernel correctness.") - parser.add_argument( - "--checkpoint_file", - type=str, - required=True, - help="Path to the checkpoint file. Assumes config.json exists in the directory.", - ) - - args = parser.parse_args() - - config_file = os.path.join(os.path.split(args.checkpoint_file)[0], "config.json") - with open(config_file) as f: - config = f.read() - json_config = json.loads(config) - h = AttrDict({**json_config}) - - print("loading plain Pytorch BigVGAN") - generator_original = BigVGAN(h).to("cuda") - print("loading CUDA kernel BigVGAN with auto-build") - generator_cuda_kernel = BigVGAN(h, use_cuda_kernel=True).to("cuda") - - state_dict_g = load_checkpoint(args.checkpoint_file, "cuda") - generator_original.load_state_dict(state_dict_g["generator"]) - generator_cuda_kernel.load_state_dict(state_dict_g["generator"]) - - generator_original.remove_weight_norm() - generator_original.eval() - generator_cuda_kernel.remove_weight_norm() - generator_cuda_kernel.eval() - - # define number of samples and length of mel frame to benchmark - num_sample = 10 - num_mel_frame = 16384 - - # CUDA kernel correctness check - diff = 0.0 - for i in tqdm(range(num_sample)): - # Random mel - data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") - - with torch.inference_mode(): - audio_original = generator_original(data) - - with torch.inference_mode(): - audio_cuda_kernel = generator_cuda_kernel(data) - - # Both outputs should be (almost) the same - test_result = (audio_original - audio_cuda_kernel).abs() - diff += test_result.mean(dim=-1).item() - - diff /= num_sample - if diff <= 2e-3: # We can expect a small difference (~1e-3) which does not affect perceptual quality - print( - f"\n[Success] test CUDA fused vs. plain torch BigVGAN inference" - f"\n > mean_difference={diff}" - f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}" - f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" - ) - else: - print( - f"\n[Fail] test CUDA fused vs. plain torch BigVGAN inference" - f"\n > mean_difference={diff}" - f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}, " - f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" - ) - - del data, audio_original, audio_cuda_kernel - - # Variables for tracking total time and VRAM usage - toc_total_original = 0 - toc_total_cuda_kernel = 0 - vram_used_original_total = 0 - vram_used_cuda_kernel_total = 0 - audio_length_total = 0 - - # Measure Original inference in isolation - for i in tqdm(range(num_sample)): - torch.cuda.reset_peak_memory_stats(device="cuda") - data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") - torch.cuda.synchronize() - tic = time() - with torch.inference_mode(): - audio_original = generator_original(data) - torch.cuda.synchronize() - toc = time() - tic - toc_total_original += toc - - vram_used_original_total += torch.cuda.max_memory_allocated(device="cuda") - - del data, audio_original - torch.cuda.empty_cache() - - # Measure CUDA kernel inference in isolation - for i in tqdm(range(num_sample)): - torch.cuda.reset_peak_memory_stats(device="cuda") - data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") - torch.cuda.synchronize() - tic = time() - with torch.inference_mode(): - audio_cuda_kernel = generator_cuda_kernel(data) - torch.cuda.synchronize() - toc = time() - tic - toc_total_cuda_kernel += toc - - audio_length_total += audio_cuda_kernel.shape[-1] - - vram_used_cuda_kernel_total += torch.cuda.max_memory_allocated(device="cuda") - - del data, audio_cuda_kernel - torch.cuda.empty_cache() - - # Calculate metrics - audio_second = audio_length_total / h.sampling_rate - khz_original = audio_length_total / toc_total_original / 1000 - khz_cuda_kernel = audio_length_total / toc_total_cuda_kernel / 1000 - vram_used_original_gb = vram_used_original_total / num_sample / (1024**3) - vram_used_cuda_kernel_gb = vram_used_cuda_kernel_total / num_sample / (1024**3) - - # Print results - print( - f"Original BigVGAN: took {toc_total_original:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_original:.1f}kHz, {audio_second / toc_total_original:.1f} faster than realtime, VRAM used {vram_used_original_gb:.1f} GB" - ) - print( - f"CUDA kernel BigVGAN: took {toc_total_cuda_kernel:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_cuda_kernel:.1f}kHz, {audio_second / toc_total_cuda_kernel:.1f} faster than realtime, VRAM used {vram_used_cuda_kernel_gb:.1f} GB" - ) - print(f"speedup of CUDA kernel: {khz_cuda_kernel / khz_original}") - print(f"VRAM saving of CUDA kernel: {vram_used_original_gb / vram_used_cuda_kernel_gb}") - - # Use artificial sine waves for inference test - audio_real, sr = generate_soundwave(duration=5.0, sr=h.sampling_rate) - audio_real = torch.tensor(audio_real).to("cuda") - # Compute mel spectrogram from the ground truth audio - x = get_mel(audio_real.unsqueeze(0), h) - - with torch.inference_mode(): - y_g_hat_original = generator_original(x) - y_g_hat_cuda_kernel = generator_cuda_kernel(x) - - audio_real = audio_real.squeeze() - audio_real = audio_real * MAX_WAV_VALUE - audio_real = audio_real.cpu().numpy().astype("int16") - - audio_original = y_g_hat_original.squeeze() - audio_original = audio_original * MAX_WAV_VALUE - audio_original = audio_original.cpu().numpy().astype("int16") - - audio_cuda_kernel = y_g_hat_cuda_kernel.squeeze() - audio_cuda_kernel = audio_cuda_kernel * MAX_WAV_VALUE - audio_cuda_kernel = audio_cuda_kernel.cpu().numpy().astype("int16") - - os.makedirs("tmp", exist_ok=True) - output_file_real = os.path.join("tmp", "audio_real.wav") - output_file_original = os.path.join("tmp", "audio_generated_original.wav") - output_file_cuda_kernel = os.path.join("tmp", "audio_generated_cuda_kernel.wav") - write(output_file_real, h.sampling_rate, audio_real) - write(output_file_original, h.sampling_rate, audio_original) - write(output_file_cuda_kernel, h.sampling_rate, audio_cuda_kernel) - print("Example generated audios of original vs. fused CUDA kernel written to tmp!") - print("Done") diff --git a/GPT_SoVITS/BigVGAN/train.py b/GPT_SoVITS/BigVGAN/train.py deleted file mode 100644 index 39718cdb..00000000 --- a/GPT_SoVITS/BigVGAN/train.py +++ /dev/null @@ -1,716 +0,0 @@ -# Copyright (c) 2024 NVIDIA CORPORATION. -# Licensed under the MIT license. - -# Adapted from https://github.com/jik876/hifi-gan under the MIT license. -# LICENSE is in incl_licenses directory. - - -import warnings - -warnings.simplefilter(action="ignore", category=FutureWarning) -import itertools -import os -import time -import argparse -import json -import torch -import torch.nn.functional as F -from torch.utils.tensorboard import SummaryWriter -from torch.utils.data import DistributedSampler, DataLoader -import torch.multiprocessing as mp -from torch.distributed import init_process_group -from torch.nn.parallel import DistributedDataParallel -from env import AttrDict, build_env -from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist, MAX_WAV_VALUE - -from bigvgan import BigVGAN -from discriminators import ( - MultiPeriodDiscriminator, - MultiResolutionDiscriminator, - MultiBandDiscriminator, - MultiScaleSubbandCQTDiscriminator, -) -from loss import ( - feature_loss, - generator_loss, - discriminator_loss, - MultiScaleMelSpectrogramLoss, -) - -from utils import ( - plot_spectrogram, - plot_spectrogram_clipped, - scan_checkpoint, - load_checkpoint, - save_checkpoint, - save_audio, -) -import torchaudio as ta -from pesq import pesq -from tqdm import tqdm -import auraloss - -torch.backends.cudnn.benchmark = False - - -def train(rank, a, h): - if h.num_gpus > 1: - # initialize distributed - init_process_group( - backend=h.dist_config["dist_backend"], - init_method=h.dist_config["dist_url"], - world_size=h.dist_config["world_size"] * h.num_gpus, - rank=rank, - ) - - # Set seed and device - torch.cuda.manual_seed(h.seed) - torch.cuda.set_device(rank) - device = torch.device(f"cuda:{rank:d}") - - # Define BigVGAN generator - generator = BigVGAN(h).to(device) - - # Define discriminators. MPD is used by default - mpd = MultiPeriodDiscriminator(h).to(device) - - # Define additional discriminators. BigVGAN-v1 uses UnivNet's MRD as default - # New in BigVGAN-v2: option to switch to new discriminators: MultiBandDiscriminator / MultiScaleSubbandCQTDiscriminator - if h.get("use_mbd_instead_of_mrd", False): # Switch to MBD - print("[INFO] using MultiBandDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") - # Variable name is kept as "mrd" for backward compatibility & minimal code change - mrd = MultiBandDiscriminator(h).to(device) - elif h.get("use_cqtd_instead_of_mrd", False): # Switch to CQTD - print("[INFO] using MultiScaleSubbandCQTDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") - mrd = MultiScaleSubbandCQTDiscriminator(h).to(device) - else: # Fallback to original MRD in BigVGAN-v1 - mrd = MultiResolutionDiscriminator(h).to(device) - - # New in BigVGAN-v2: option to switch to multi-scale L1 mel loss - if h.get("use_multiscale_melloss", False): - print("[INFO] using multi-scale Mel l1 loss of BigVGAN-v2 instead of the original single-scale loss") - fn_mel_loss_multiscale = MultiScaleMelSpectrogramLoss( - sampling_rate=h.sampling_rate - ) # NOTE: accepts waveform as input - else: - fn_mel_loss_singlescale = F.l1_loss - - # Print the model & number of parameters, and create or scan the latest checkpoint from checkpoints directory - if rank == 0: - print(generator) - print(mpd) - print(mrd) - print(f"Generator params: {sum(p.numel() for p in generator.parameters())}") - print(f"Discriminator mpd params: {sum(p.numel() for p in mpd.parameters())}") - print(f"Discriminator mrd params: {sum(p.numel() for p in mrd.parameters())}") - os.makedirs(a.checkpoint_path, exist_ok=True) - print(f"Checkpoints directory: {a.checkpoint_path}") - - if os.path.isdir(a.checkpoint_path): - # New in v2.1: If the step prefix pattern-based checkpoints are not found, also check for renamed files in Hugging Face Hub to resume training - cp_g = scan_checkpoint(a.checkpoint_path, prefix="g_", renamed_file="bigvgan_generator.pt") - cp_do = scan_checkpoint( - a.checkpoint_path, - prefix="do_", - renamed_file="bigvgan_discriminator_optimizer.pt", - ) - - # Load the latest checkpoint if exists - steps = 0 - if cp_g is None or cp_do is None: - state_dict_do = None - last_epoch = -1 - else: - state_dict_g = load_checkpoint(cp_g, device) - state_dict_do = load_checkpoint(cp_do, device) - generator.load_state_dict(state_dict_g["generator"]) - mpd.load_state_dict(state_dict_do["mpd"]) - mrd.load_state_dict(state_dict_do["mrd"]) - steps = state_dict_do["steps"] + 1 - last_epoch = state_dict_do["epoch"] - - # Initialize DDP, optimizers, and schedulers - if h.num_gpus > 1: - generator = DistributedDataParallel(generator, device_ids=[rank]).to(device) - mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device) - mrd = DistributedDataParallel(mrd, device_ids=[rank]).to(device) - - optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2]) - optim_d = torch.optim.AdamW( - itertools.chain(mrd.parameters(), mpd.parameters()), - h.learning_rate, - betas=[h.adam_b1, h.adam_b2], - ) - - if state_dict_do is not None: - optim_g.load_state_dict(state_dict_do["optim_g"]) - optim_d.load_state_dict(state_dict_do["optim_d"]) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch) - - # Define training and validation datasets - - """ - unseen_validation_filelist will contain sample filepaths outside the seen training & validation dataset - Example: trained on LibriTTS, validate on VCTK - """ - training_filelist, validation_filelist, list_unseen_validation_filelist = get_dataset_filelist(a) - - trainset = MelDataset( - training_filelist, - h, - h.segment_size, - h.n_fft, - h.num_mels, - h.hop_size, - h.win_size, - h.sampling_rate, - h.fmin, - h.fmax, - shuffle=False if h.num_gpus > 1 else True, - fmax_loss=h.fmax_for_loss, - device=device, - fine_tuning=a.fine_tuning, - base_mels_path=a.input_mels_dir, - is_seen=True, - ) - - train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None - - train_loader = DataLoader( - trainset, - num_workers=h.num_workers, - shuffle=False, - sampler=train_sampler, - batch_size=h.batch_size, - pin_memory=True, - drop_last=True, - ) - - if rank == 0: - validset = MelDataset( - validation_filelist, - h, - h.segment_size, - h.n_fft, - h.num_mels, - h.hop_size, - h.win_size, - h.sampling_rate, - h.fmin, - h.fmax, - False, - False, - fmax_loss=h.fmax_for_loss, - device=device, - fine_tuning=a.fine_tuning, - base_mels_path=a.input_mels_dir, - is_seen=True, - ) - validation_loader = DataLoader( - validset, - num_workers=1, - shuffle=False, - sampler=None, - batch_size=1, - pin_memory=True, - drop_last=True, - ) - - list_unseen_validset = [] - list_unseen_validation_loader = [] - for i in range(len(list_unseen_validation_filelist)): - unseen_validset = MelDataset( - list_unseen_validation_filelist[i], - h, - h.segment_size, - h.n_fft, - h.num_mels, - h.hop_size, - h.win_size, - h.sampling_rate, - h.fmin, - h.fmax, - False, - False, - fmax_loss=h.fmax_for_loss, - device=device, - fine_tuning=a.fine_tuning, - base_mels_path=a.input_mels_dir, - is_seen=False, - ) - unseen_validation_loader = DataLoader( - unseen_validset, - num_workers=1, - shuffle=False, - sampler=None, - batch_size=1, - pin_memory=True, - drop_last=True, - ) - list_unseen_validset.append(unseen_validset) - list_unseen_validation_loader.append(unseen_validation_loader) - - # Tensorboard logger - sw = SummaryWriter(os.path.join(a.checkpoint_path, "logs")) - if a.save_audio: # Also save audio to disk if --save_audio is set to True - os.makedirs(os.path.join(a.checkpoint_path, "samples"), exist_ok=True) - - """ - Validation loop, "mode" parameter is automatically defined as (seen or unseen)_(name of the dataset). - If the name of the dataset contains "nonspeech", it skips PESQ calculation to prevent errors - """ - - def validate(rank, a, h, loader, mode="seen"): - assert rank == 0, "validate should only run on rank=0" - generator.eval() - torch.cuda.empty_cache() - - val_err_tot = 0 - val_pesq_tot = 0 - val_mrstft_tot = 0 - - # Modules for evaluation metrics - pesq_resampler = ta.transforms.Resample(h.sampling_rate, 16000).cuda() - loss_mrstft = auraloss.freq.MultiResolutionSTFTLoss(device="cuda") - - if a.save_audio: # Also save audio to disk if --save_audio is set to True - os.makedirs( - os.path.join(a.checkpoint_path, "samples", f"gt_{mode}"), - exist_ok=True, - ) - os.makedirs( - os.path.join(a.checkpoint_path, "samples", f"{mode}_{steps:08d}"), - exist_ok=True, - ) - - with torch.no_grad(): - print(f"step {steps} {mode} speaker validation...") - - # Loop over validation set and compute metrics - for j, batch in enumerate(tqdm(loader)): - x, y, _, y_mel = batch - y = y.to(device) - if hasattr(generator, "module"): - y_g_hat = generator.module(x.to(device)) - else: - y_g_hat = generator(x.to(device)) - y_mel = y_mel.to(device, non_blocking=True) - y_g_hat_mel = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax_for_loss, - ) - min_t = min(y_mel.size(-1), y_g_hat_mel.size(-1)) - val_err_tot += F.l1_loss(y_mel[..., :min_t], y_g_hat_mel[..., :min_t]).item() - - # PESQ calculation. only evaluate PESQ if it's speech signal (nonspeech PESQ will error out) - if "nonspeech" not in mode: # Skips if the name of dataset (in mode string) contains "nonspeech" - # Resample to 16000 for pesq - y_16k = pesq_resampler(y) - y_g_hat_16k = pesq_resampler(y_g_hat.squeeze(1)) - y_int_16k = (y_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() - y_g_hat_int_16k = (y_g_hat_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() - val_pesq_tot += pesq(16000, y_int_16k, y_g_hat_int_16k, "wb") - - # MRSTFT calculation - min_t = min(y.size(-1), y_g_hat.size(-1)) - val_mrstft_tot += loss_mrstft(y_g_hat[..., :min_t], y[..., :min_t]).item() - - # Log audio and figures to Tensorboard - if j % a.eval_subsample == 0: # Subsample every nth from validation set - if steps >= 0: - sw.add_audio(f"gt_{mode}/y_{j}", y[0], steps, h.sampling_rate) - if a.save_audio: # Also save audio to disk if --save_audio is set to True - save_audio( - y[0], - os.path.join( - a.checkpoint_path, - "samples", - f"gt_{mode}", - f"{j:04d}.wav", - ), - h.sampling_rate, - ) - sw.add_figure( - f"gt_{mode}/y_spec_{j}", - plot_spectrogram(x[0]), - steps, - ) - - sw.add_audio( - f"generated_{mode}/y_hat_{j}", - y_g_hat[0], - steps, - h.sampling_rate, - ) - if a.save_audio: # Also save audio to disk if --save_audio is set to True - save_audio( - y_g_hat[0, 0], - os.path.join( - a.checkpoint_path, - "samples", - f"{mode}_{steps:08d}", - f"{j:04d}.wav", - ), - h.sampling_rate, - ) - # Spectrogram of synthesized audio - y_hat_spec = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax, - ) - sw.add_figure( - f"generated_{mode}/y_hat_spec_{j}", - plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), - steps, - ) - - """ - Visualization of spectrogram difference between GT and synthesized audio, difference higher than 1 is clipped for better visualization. - """ - spec_delta = torch.clamp( - torch.abs(x[0] - y_hat_spec.squeeze(0).cpu()), - min=1e-6, - max=1.0, - ) - sw.add_figure( - f"delta_dclip1_{mode}/spec_{j}", - plot_spectrogram_clipped(spec_delta.numpy(), clip_max=1.0), - steps, - ) - - val_err = val_err_tot / (j + 1) - val_pesq = val_pesq_tot / (j + 1) - val_mrstft = val_mrstft_tot / (j + 1) - # Log evaluation metrics to Tensorboard - sw.add_scalar(f"validation_{mode}/mel_spec_error", val_err, steps) - sw.add_scalar(f"validation_{mode}/pesq", val_pesq, steps) - sw.add_scalar(f"validation_{mode}/mrstft", val_mrstft, steps) - - generator.train() - - # If the checkpoint is loaded, start with validation loop - if steps != 0 and rank == 0 and not a.debug: - if not a.skip_seen: - validate( - rank, - a, - h, - validation_loader, - mode=f"seen_{train_loader.dataset.name}", - ) - for i in range(len(list_unseen_validation_loader)): - validate( - rank, - a, - h, - list_unseen_validation_loader[i], - mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", - ) - # Exit the script if --evaluate is set to True - if a.evaluate: - exit() - - # Main training loop - generator.train() - mpd.train() - mrd.train() - for epoch in range(max(0, last_epoch), a.training_epochs): - if rank == 0: - start = time.time() - print(f"Epoch: {epoch + 1}") - - if h.num_gpus > 1: - train_sampler.set_epoch(epoch) - - for i, batch in enumerate(train_loader): - if rank == 0: - start_b = time.time() - x, y, _, y_mel = batch - - x = x.to(device, non_blocking=True) - y = y.to(device, non_blocking=True) - y_mel = y_mel.to(device, non_blocking=True) - y = y.unsqueeze(1) - - y_g_hat = generator(x) - y_g_hat_mel = mel_spectrogram( - y_g_hat.squeeze(1), - h.n_fft, - h.num_mels, - h.sampling_rate, - h.hop_size, - h.win_size, - h.fmin, - h.fmax_for_loss, - ) - - optim_d.zero_grad() - - # MPD - y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach()) - loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g) - - # MRD - y_ds_hat_r, y_ds_hat_g, _, _ = mrd(y, y_g_hat.detach()) - loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g) - - loss_disc_all = loss_disc_s + loss_disc_f - - # Set clip_grad_norm value - clip_grad_norm = h.get("clip_grad_norm", 1000.0) # Default to 1000 - - # Whether to freeze D for initial training steps - if steps >= a.freeze_step: - loss_disc_all.backward() - grad_norm_mpd = torch.nn.utils.clip_grad_norm_(mpd.parameters(), clip_grad_norm) - grad_norm_mrd = torch.nn.utils.clip_grad_norm_(mrd.parameters(), clip_grad_norm) - optim_d.step() - else: - print(f"[WARNING] skipping D training for the first {a.freeze_step} steps") - grad_norm_mpd = 0.0 - grad_norm_mrd = 0.0 - - # Generator - optim_g.zero_grad() - - # L1 Mel-Spectrogram Loss - lambda_melloss = h.get("lambda_melloss", 45.0) # Defaults to 45 in BigVGAN-v1 if not set - if h.get("use_multiscale_melloss", False): # uses wav for loss - loss_mel = fn_mel_loss_multiscale(y, y_g_hat) * lambda_melloss - else: # Uses mel for loss - loss_mel = fn_mel_loss_singlescale(y_mel, y_g_hat_mel) * lambda_melloss - - # MPD loss - y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat) - loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) - loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) - - # MRD loss - y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = mrd(y, y_g_hat) - loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) - loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) - - if steps >= a.freeze_step: - loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel - else: - print(f"[WARNING] using regression loss only for G for the first {a.freeze_step} steps") - loss_gen_all = loss_mel - - loss_gen_all.backward() - grad_norm_g = torch.nn.utils.clip_grad_norm_(generator.parameters(), clip_grad_norm) - optim_g.step() - - if rank == 0: - # STDOUT logging - if steps % a.stdout_interval == 0: - mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to stdout - print( - f"Steps: {steps:d}, " - f"Gen Loss Total: {loss_gen_all:4.3f}, " - f"Mel Error: {mel_error:4.3f}, " - f"s/b: {time.time() - start_b:4.3f} " - f"lr: {optim_g.param_groups[0]['lr']:4.7f} " - f"grad_norm_g: {grad_norm_g:4.3f}" - ) - - # Checkpointing - if steps % a.checkpoint_interval == 0 and steps != 0: - checkpoint_path = f"{a.checkpoint_path}/g_{steps:08d}" - save_checkpoint( - checkpoint_path, - {"generator": (generator.module if h.num_gpus > 1 else generator).state_dict()}, - ) - checkpoint_path = f"{a.checkpoint_path}/do_{steps:08d}" - save_checkpoint( - checkpoint_path, - { - "mpd": (mpd.module if h.num_gpus > 1 else mpd).state_dict(), - "mrd": (mrd.module if h.num_gpus > 1 else mrd).state_dict(), - "optim_g": optim_g.state_dict(), - "optim_d": optim_d.state_dict(), - "steps": steps, - "epoch": epoch, - }, - ) - - # Tensorboard summary logging - if steps % a.summary_interval == 0: - mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to tensorboard - sw.add_scalar("training/gen_loss_total", loss_gen_all.item(), steps) - sw.add_scalar("training/mel_spec_error", mel_error, steps) - sw.add_scalar("training/fm_loss_mpd", loss_fm_f.item(), steps) - sw.add_scalar("training/gen_loss_mpd", loss_gen_f.item(), steps) - sw.add_scalar("training/disc_loss_mpd", loss_disc_f.item(), steps) - sw.add_scalar("training/grad_norm_mpd", grad_norm_mpd, steps) - sw.add_scalar("training/fm_loss_mrd", loss_fm_s.item(), steps) - sw.add_scalar("training/gen_loss_mrd", loss_gen_s.item(), steps) - sw.add_scalar("training/disc_loss_mrd", loss_disc_s.item(), steps) - sw.add_scalar("training/grad_norm_mrd", grad_norm_mrd, steps) - sw.add_scalar("training/grad_norm_g", grad_norm_g, steps) - sw.add_scalar("training/learning_rate_d", scheduler_d.get_last_lr()[0], steps) - sw.add_scalar("training/learning_rate_g", scheduler_g.get_last_lr()[0], steps) - sw.add_scalar("training/epoch", epoch + 1, steps) - - # Validation - if steps % a.validation_interval == 0: - # Plot training input x so far used - for i_x in range(x.shape[0]): - sw.add_figure( - f"training_input/x_{i_x}", - plot_spectrogram(x[i_x].cpu()), - steps, - ) - sw.add_audio( - f"training_input/y_{i_x}", - y[i_x][0], - steps, - h.sampling_rate, - ) - - # Seen and unseen speakers validation loops - if not a.debug and steps != 0: - validate( - rank, - a, - h, - validation_loader, - mode=f"seen_{train_loader.dataset.name}", - ) - for i in range(len(list_unseen_validation_loader)): - validate( - rank, - a, - h, - list_unseen_validation_loader[i], - mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", - ) - steps += 1 - - # BigVGAN-v2 learning rate scheduler is changed from epoch-level to step-level - scheduler_g.step() - scheduler_d.step() - - if rank == 0: - print(f"Time taken for epoch {epoch + 1} is {int(time.time() - start)} sec\n") - - -def main(): - print("Initializing Training Process..") - - parser = argparse.ArgumentParser() - - parser.add_argument("--group_name", default=None) - - parser.add_argument("--input_wavs_dir", default="LibriTTS") - parser.add_argument("--input_mels_dir", default="ft_dataset") - parser.add_argument("--input_training_file", default="tests/LibriTTS/train-full.txt") - parser.add_argument("--input_validation_file", default="tests/LibriTTS/val-full.txt") - - parser.add_argument( - "--list_input_unseen_wavs_dir", - nargs="+", - default=["tests/LibriTTS", "tests/LibriTTS"], - ) - parser.add_argument( - "--list_input_unseen_validation_file", - nargs="+", - default=["tests/LibriTTS/dev-clean.txt", "tests/LibriTTS/dev-other.txt"], - ) - - parser.add_argument("--checkpoint_path", default="exp/bigvgan") - parser.add_argument("--config", default="") - - parser.add_argument("--training_epochs", default=100000, type=int) - parser.add_argument("--stdout_interval", default=5, type=int) - parser.add_argument("--checkpoint_interval", default=50000, type=int) - parser.add_argument("--summary_interval", default=100, type=int) - parser.add_argument("--validation_interval", default=50000, type=int) - - parser.add_argument( - "--freeze_step", - default=0, - type=int, - help="freeze D for the first specified steps. G only uses regression loss for these steps.", - ) - - parser.add_argument("--fine_tuning", default=False, type=bool) - - parser.add_argument( - "--debug", - default=False, - type=bool, - help="debug mode. skips validation loop throughout training", - ) - parser.add_argument( - "--evaluate", - default=False, - type=bool, - help="only run evaluation from checkpoint and exit", - ) - parser.add_argument( - "--eval_subsample", - default=5, - type=int, - help="subsampling during evaluation loop", - ) - parser.add_argument( - "--skip_seen", - default=False, - type=bool, - help="skip seen dataset. useful for test set inference", - ) - parser.add_argument( - "--save_audio", - default=False, - type=bool, - help="save audio of test set inference to disk", - ) - - a = parser.parse_args() - - with open(a.config) as f: - data = f.read() - - json_config = json.loads(data) - h = AttrDict(json_config) - - build_env(a.config, "config.json", a.checkpoint_path) - - torch.manual_seed(h.seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - h.num_gpus = torch.cuda.device_count() - h.batch_size = int(h.batch_size / h.num_gpus) - print(f"Batch size per GPU: {h.batch_size}") - else: - pass - - if h.num_gpus > 1: - mp.spawn( - train, - nprocs=h.num_gpus, - args=( - a, - h, - ), - ) - else: - train(0, a, h) - - -if __name__ == "__main__": - main() diff --git a/GPT_SoVITS/BigVGAN/utils0.py b/GPT_SoVITS/BigVGAN/utils0.py index da98a24c..2d66aa17 100644 --- a/GPT_SoVITS/BigVGAN/utils0.py +++ b/GPT_SoVITS/BigVGAN/utils0.py @@ -3,44 +3,10 @@ import glob import os -import matplotlib + import torch from torch.nn.utils import weight_norm -matplotlib.use("Agg") -import matplotlib.pylab as plt -from .meldataset import MAX_WAV_VALUE -from scipy.io.wavfile import write - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def plot_spectrogram_clipped(spectrogram, clip_max=2.0): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow( - spectrogram, - aspect="auto", - origin="lower", - interpolation="none", - vmin=1e-6, - vmax=clip_max, - ) - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ @@ -90,10 +56,3 @@ def scan_checkpoint(cp_dir, prefix, renamed_file=None): return renamed_path return None - - -def save_audio(audio, path, sr): - # wav: torch with 1d shape - audio = audio * MAX_WAV_VALUE - audio = audio.cpu().numpy().astype("int16") - write(path, sr, audio) diff --git a/GPT_SoVITS/TTS_infer_pack/TTS.py b/GPT_SoVITS/TTS_infer_pack/TTS.py index 0c1d2484..be2a4c64 100644 --- a/GPT_SoVITS/TTS_infer_pack/TTS.py +++ b/GPT_SoVITS/TTS_infer_pack/TTS.py @@ -2,17 +2,9 @@ import gc import math import os import random -import sys import time import traceback from copy import deepcopy - -import torchaudio -from tqdm import tqdm - -now_dir = os.getcwd() -sys.path.append(now_dir) -import os from typing import List, Tuple, Union import ffmpeg @@ -20,21 +12,26 @@ import librosa import numpy as np import torch import torch.nn.functional as F +import torchaudio import yaml -from AR.models.t2s_lightning_module import Text2SemanticLightningModule -from BigVGAN.bigvgan import BigVGAN -from feature_extractor.cnhubert import CNHubert -from module.mel_processing import mel_spectrogram_torch, spectrogram_torch -from module.models import SynthesizerTrn, SynthesizerTrnV3, Generator from peft import LoraConfig, get_peft_model -from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new +from tqdm import tqdm from transformers import AutoModelForMaskedLM, AutoTokenizer +from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModule +from GPT_SoVITS.BigVGAN.bigvgan import BigVGAN +from GPT_SoVITS.feature_extractor.cnhubert import CNHubert +from GPT_SoVITS.module.mel_processing import mel_spectrogram_torch, spectrogram_torch +from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import get_sovits_version_from_path_fast, load_sovits_new +from GPT_SoVITS.sv import SV +from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import splits +from GPT_SoVITS.TTS_infer_pack.TextPreprocessor import TextPreprocessor from tools.audio_sr import AP_BWE -from tools.i18n.i18n import I18nAuto, scan_language_list -from TTS_infer_pack.text_segmentation_method import splits -from TTS_infer_pack.TextPreprocessor import TextPreprocessor -from sv import SV +from tools.i18n.i18n import I18nAuto +from tools.my_utils import DictToAttrRecursive + +now_dir = os.getcwd() resample_transform_dict = {} @@ -48,7 +45,6 @@ def resample(audio_tensor, sr0, sr1, device): language = os.environ.get("language", "Auto") -language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language i18n = I18nAuto(language=language) @@ -64,33 +60,32 @@ def denorm_spec(x): return (x + 1) / 2 * (spec_max - spec_min) + spec_min -mel_fn = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1024, - "win_size": 1024, - "hop_size": 256, - "num_mels": 100, - "sampling_rate": 24000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) +def mel_fn(x): + return mel_spectrogram_torch( + y=x, + n_fft=1024, + num_mels=100, + sampling_rate=24000, + hop_size=256, + win_size=1024, + fmin=0, + fmax=None, + center=False, + ) -mel_fn_v4 = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1280, - "win_size": 1280, - "hop_size": 320, - "num_mels": 100, - "sampling_rate": 32000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) + +def mel_fn_v4(x): + return mel_spectrogram_torch( + y=x, + n_fft=1280, + num_mels=100, + sampling_rate=32000, + hop_size=320, + win_size=1280, + fmin=0, + fmax=None, + center=False, + ) def speed_change(input_audio: np.ndarray, speed: float, sr: int): @@ -114,34 +109,6 @@ def speed_change(input_audio: np.ndarray, speed: float, sr: int): return processed_audio -class DictToAttrRecursive(dict): - def __init__(self, input_dict): - super().__init__(input_dict) - for key, value in input_dict.items(): - if isinstance(value, dict): - value = DictToAttrRecursive(value) - self[key] = value - setattr(self, key, value) - - def __getattr__(self, item): - try: - return self[item] - except KeyError: - raise AttributeError(f"Attribute {item} not found") - - def __setattr__(self, key, value): - if isinstance(value, dict): - value = DictToAttrRecursive(value) - super(DictToAttrRecursive, self).__setitem__(key, value) - super().__setattr__(key, value) - - def __delattr__(self, item): - try: - del self[item] - except KeyError: - raise AttributeError(f"Attribute {item} not found") - - class NO_PROMPT_ERROR(Exception): pass @@ -316,7 +283,7 @@ class TTS_Config: self.is_half = self.configs.get("is_half", False) if str(self.device) == "cpu" and self.is_half: - print(f"Warning: Half precision is not supported on CPU, set is_half to False.") + print("Warning: Half precision is not supported on CPU, set is_half to False.") self.is_half = False version = self.configs.get("version", None) @@ -488,7 +455,7 @@ class TTS: self.init_sv_model() path_sovits = self.configs.default_configs[model_version]["vits_weights_path"] - if if_lora_v3 == True and os.path.exists(path_sovits) == False: + if if_lora_v3 is True and os.path.exists(path_sovits) is False: info = path_sovits + i18n("SoVITS %s 底模缺失,无法加载相应 LoRA 权重" % model_version) raise FileExistsError(info) @@ -549,7 +516,7 @@ class TTS: self.is_v2pro = model_version in {"v2Pro", "v2ProPlus"} - if if_lora_v3 == False: + if if_lora_v3 is False: print( f"Loading VITS weights from {weights_path}. {vits_model.load_state_dict(dict_s2['weight'], strict=False)}" ) @@ -580,8 +547,6 @@ class TTS: self.configs.save_configs() - - def init_t2s_weights(self, weights_path: str): print(f"Loading Text2Semantic weights from {weights_path}") self.configs.t2s_weights_path = weights_path @@ -654,7 +619,7 @@ class TTS: self.vocoder_configs["overlapped_len"] = 12 self.vocoder = self.vocoder.eval() - if self.configs.is_half == True: + if self.configs.is_half is True: self.vocoder = self.vocoder.half().to(self.configs.device) else: self.vocoder = self.vocoder.to(self.configs.device) @@ -756,19 +721,18 @@ class TTS: self.prompt_cache["refer_spec"][0] = spec_audio def _get_ref_spec(self, ref_audio_path): - raw_audio, raw_sr = torchaudio.load(ref_audio_path) - raw_audio = raw_audio.to(self.configs.device).float() + raw_audio, raw_sr = torchaudio.load_with_torchcodec(ref_audio_path) self.prompt_cache["raw_audio"] = raw_audio self.prompt_cache["raw_sr"] = raw_sr if raw_sr != self.configs.sampling_rate: audio = raw_audio.to(self.configs.device) - if audio.shape[0] == 2: + if audio.shape[0] > 1: audio = audio.mean(0).unsqueeze(0) audio = resample(audio, raw_sr, self.configs.sampling_rate, self.configs.device) else: audio = raw_audio.to(self.configs.device) - if audio.shape[0] == 2: + if audio.shape[0] > 1: audio = audio.mean(0).unsqueeze(0) maxx = audio.abs().max() @@ -784,7 +748,7 @@ class TTS: ) if self.configs.is_half: spec = spec.half() - if self.is_v2pro == True: + if self.is_v2pro is True: audio = resample(audio, self.configs.sampling_rate, 16000, self.configs.device) if self.configs.is_half: audio = audio.half() @@ -1235,7 +1199,7 @@ class TTS: spec = spec.to(dtype=self.precision, device=self.configs.device) refer_audio_spec.append(spec) if self.is_v2pro: - sv_emb.append(self.sv_model.compute_embedding3(audio_tensor)) + sv_emb.append(self.sv_model.compute_embedding(audio_tensor)) batch_audio_fragment = [] diff --git a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py index 283e91c3..3f8dc930 100644 --- a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py +++ b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py @@ -1,22 +1,18 @@ import os +import re import sys import threading - -from tqdm import tqdm - -now_dir = os.getcwd() -sys.path.append(now_dir) - -import re -import torch -from text.LangSegmenter import LangSegmenter -from text import chinese from typing import Dict, List, Tuple -from text.cleaner import clean_text -from text import cleaned_text_to_sequence -from transformers import AutoModelForMaskedLM, AutoTokenizer -from TTS_infer_pack.text_segmentation_method import split_big_text, splits, get_method as get_seg_method +import torch +from tqdm import tqdm +from transformers import AutoModelForMaskedLM, AutoTokenizer + +from GPT_SoVITS.text import cleaned_text_to_sequence +from GPT_SoVITS.text.cleaner import clean_text +from GPT_SoVITS.text.LangSegmenter import LangSegmenter +from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method as get_seg_method +from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import split_big_text, splits from tools.i18n.i18n import I18nAuto, scan_language_list language = os.environ.get("language", "Auto") @@ -121,25 +117,25 @@ class TextPreprocessor: def get_phones_and_bert(self, text: str, language: str, version: str, final: bool = False): with self.bert_lock: - text = re.sub(r' {2,}', ' ', text) + text = re.sub(r" {2,}", " ", text) textlist = [] langlist = [] if language == "all_zh": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_yue": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ja": - for tmp in LangSegmenter.getTexts(text,"ja"): + for tmp in LangSegmenter.getTexts(text, "ja"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ko": - for tmp in LangSegmenter.getTexts(text,"ko"): + for tmp in LangSegmenter.getTexts(text, "ko"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "en": @@ -158,7 +154,9 @@ class TextPreprocessor: else: for tmp in LangSegmenter.getTexts(text): if langlist: - if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): + if (tmp["lang"] == "en" and langlist[-1] == "en") or ( + tmp["lang"] != "en" and langlist[-1] != "en" + ): textlist[-1] += tmp["text"] continue if tmp["lang"] == "en": @@ -189,12 +187,11 @@ class TextPreprocessor: return phones, bert, norm_text def get_bert_feature(self, text: str, word2ph: list) -> torch.Tensor: - with torch.no_grad(): - inputs = self.tokenizer(text, return_tensors="pt") - for i in inputs: - inputs[i] = inputs[i].to(self.device) - res = self.bert_model(**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] + inputs = self.tokenizer(text, return_tensors="pt") + for i in inputs: + inputs[i] = inputs[i].to(self.device) + res = self.bert_model(**inputs, output_hidden_states=True) + res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] assert len(word2ph) == len(text) phone_level_feature = [] for i in range(len(word2ph)): @@ -209,6 +206,7 @@ class TextPreprocessor: phones = cleaned_text_to_sequence(phones, version) return phones, word2ph, norm_text + @torch.no_grad() def get_bert_inf(self, phones: list, word2ph: list, norm_text: str, language: str): language = language.replace("all_", "") if language == "zh": @@ -236,4 +234,4 @@ class TextPreprocessor: punctuations = "".join(re.escape(p) for p in punctuation) pattern = f"([{punctuations}])([{punctuations}])+" result = re.sub(pattern, r"\1", text) - return result \ No newline at end of file + return result diff --git a/GPT_SoVITS/TTS_infer_pack/__init__.py b/GPT_SoVITS/TTS_infer_pack/__init__.py index 8579a632..f0854b93 100644 --- a/GPT_SoVITS/TTS_infer_pack/__init__.py +++ b/GPT_SoVITS/TTS_infer_pack/__init__.py @@ -1 +1,3 @@ from . import TTS, text_segmentation_method + +__all__ = ["TTS", "text_segmentation_method"] diff --git a/GPT_SoVITS/TTS_infer_pack/text_segmentation_method.py b/GPT_SoVITS/TTS_infer_pack/text_segmentation_method.py index fda70a49..f7ec5da4 100644 --- a/GPT_SoVITS/TTS_infer_pack/text_segmentation_method.py +++ b/GPT_SoVITS/TTS_infer_pack/text_segmentation_method.py @@ -100,7 +100,7 @@ def cut0(inp): def cut1(inp): inp = inp.strip("\n") inps = split(inp) - split_idx = list(range(0, len(inps), 4)) + split_idx = list(range(0, len(inps) + 1, 4)) split_idx[-1] = None if len(split_idx) > 1: opts = [] diff --git a/GPT_SoVITS/download.py b/GPT_SoVITS/download.py deleted file mode 100644 index fc4ead63..00000000 --- a/GPT_SoVITS/download.py +++ /dev/null @@ -1,13 +0,0 @@ -import os -import sys - -now_dir = os.getcwd() -sys.path.insert(0, now_dir) -from text.g2pw import G2PWPinyin - -g2pw = G2PWPinyin( - model_dir="GPT_SoVITS/text/G2PWModel", - model_source="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", - v_to_u=False, - neutral_tone_with_five=True, -) diff --git a/GPT_SoVITS/eres2net/ERes2Net.py b/GPT_SoVITS/eres2net/ERes2Net.py deleted file mode 100644 index 1618c813..00000000 --- a/GPT_SoVITS/eres2net/ERes2Net.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -""" -Res2Net implementation is adapted from https://github.com/wenet-e2e/wespeaker. -ERes2Net incorporates both local and global feature fusion techniques to improve the performance. -The local feature fusion (LFF) fuses the features within one single residual block to extract the local signal. -The global feature fusion (GFF) takes acoustic features of different scales as input to aggregate global signal. -""" - -import torch -import math -import torch.nn as nn -import torch.nn.functional as F -import pooling_layers as pooling_layers -from fusion import AFF - - -class ReLU(nn.Hardtanh): - def __init__(self, inplace=False): - super(ReLU, self).__init__(0, 20, inplace) - - def __repr__(self): - inplace_str = "inplace" if self.inplace else "" - return self.__class__.__name__ + " (" + inplace_str + ")" - - -class BasicBlockERes2Net(nn.Module): - expansion = 2 - - def __init__(self, in_planes, planes, stride=1, baseWidth=32, scale=2): - super(BasicBlockERes2Net, self).__init__() - width = int(math.floor(planes * (baseWidth / 64.0))) - self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(width * scale) - self.nums = scale - - convs = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False)) - bns.append(nn.BatchNorm2d(width)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - self.relu = ReLU(inplace=True) - - self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.shortcut = nn.Sequential() - if stride != 1 or in_planes != self.expansion * planes: - self.shortcut = nn.Sequential( - nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(self.expansion * planes), - ) - self.stride = stride - self.width = width - self.scale = scale - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i == 0: - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp) - sp = self.relu(self.bns[i](sp)) - if i == 0: - out = sp - else: - out = torch.cat((out, sp), 1) - - out = self.conv3(out) - out = self.bn3(out) - - residual = self.shortcut(x) - out += residual - out = self.relu(out) - - return out - - -class BasicBlockERes2Net_diff_AFF(nn.Module): - expansion = 2 - - def __init__(self, in_planes, planes, stride=1, baseWidth=32, scale=2): - super(BasicBlockERes2Net_diff_AFF, self).__init__() - width = int(math.floor(planes * (baseWidth / 64.0))) - self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(width * scale) - self.nums = scale - - convs = [] - fuse_models = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False)) - bns.append(nn.BatchNorm2d(width)) - for j in range(self.nums - 1): - fuse_models.append(AFF(channels=width)) - - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - self.fuse_models = nn.ModuleList(fuse_models) - self.relu = ReLU(inplace=True) - - self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.shortcut = nn.Sequential() - if stride != 1 or in_planes != self.expansion * planes: - self.shortcut = nn.Sequential( - nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(self.expansion * planes), - ) - self.stride = stride - self.width = width - self.scale = scale - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i == 0: - sp = spx[i] - else: - sp = self.fuse_models[i - 1](sp, spx[i]) - - sp = self.convs[i](sp) - sp = self.relu(self.bns[i](sp)) - if i == 0: - out = sp - else: - out = torch.cat((out, sp), 1) - - out = self.conv3(out) - out = self.bn3(out) - - residual = self.shortcut(x) - out += residual - out = self.relu(out) - - return out - - -class ERes2Net(nn.Module): - def __init__( - self, - block=BasicBlockERes2Net, - block_fuse=BasicBlockERes2Net_diff_AFF, - num_blocks=[3, 4, 6, 3], - m_channels=32, - feat_dim=80, - embedding_size=192, - pooling_func="TSTP", - two_emb_layer=False, - ): - super(ERes2Net, self).__init__() - self.in_planes = m_channels - self.feat_dim = feat_dim - self.embedding_size = embedding_size - self.stats_dim = int(feat_dim / 8) * m_channels * 8 - self.two_emb_layer = two_emb_layer - - self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(m_channels) - self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1) - self.layer2 = self._make_layer(block, m_channels * 2, num_blocks[1], stride=2) - self.layer3 = self._make_layer(block_fuse, m_channels * 4, num_blocks[2], stride=2) - self.layer4 = self._make_layer(block_fuse, m_channels * 8, num_blocks[3], stride=2) - - # Downsampling module for each layer - self.layer1_downsample = nn.Conv2d( - m_channels * 2, m_channels * 4, kernel_size=3, stride=2, padding=1, bias=False - ) - self.layer2_downsample = nn.Conv2d( - m_channels * 4, m_channels * 8, kernel_size=3, padding=1, stride=2, bias=False - ) - self.layer3_downsample = nn.Conv2d( - m_channels * 8, m_channels * 16, kernel_size=3, padding=1, stride=2, bias=False - ) - - # Bottom-up fusion module - self.fuse_mode12 = AFF(channels=m_channels * 4) - self.fuse_mode123 = AFF(channels=m_channels * 8) - self.fuse_mode1234 = AFF(channels=m_channels * 16) - - self.n_stats = 1 if pooling_func == "TAP" or pooling_func == "TSDP" else 2 - self.pool = getattr(pooling_layers, pooling_func)(in_dim=self.stats_dim * block.expansion) - self.seg_1 = nn.Linear(self.stats_dim * block.expansion * self.n_stats, embedding_size) - if self.two_emb_layer: - self.seg_bn_1 = nn.BatchNorm1d(embedding_size, affine=False) - self.seg_2 = nn.Linear(embedding_size, embedding_size) - else: - self.seg_bn_1 = nn.Identity() - self.seg_2 = nn.Identity() - - def _make_layer(self, block, planes, num_blocks, stride): - strides = [stride] + [1] * (num_blocks - 1) - layers = [] - for stride in strides: - layers.append(block(self.in_planes, planes, stride)) - self.in_planes = planes * block.expansion - return nn.Sequential(*layers) - - def forward(self, x): - x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) - x = x.unsqueeze_(1) - out = F.relu(self.bn1(self.conv1(x))) - out1 = self.layer1(out) - out2 = self.layer2(out1) - out1_downsample = self.layer1_downsample(out1) - fuse_out12 = self.fuse_mode12(out2, out1_downsample) - out3 = self.layer3(out2) - fuse_out12_downsample = self.layer2_downsample(fuse_out12) - fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample) - out4 = self.layer4(out3) - fuse_out123_downsample = self.layer3_downsample(fuse_out123) - fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample) - stats = self.pool(fuse_out1234) - - embed_a = self.seg_1(stats) - if self.two_emb_layer: - out = F.relu(embed_a) - out = self.seg_bn_1(out) - embed_b = self.seg_2(out) - return embed_b - else: - return embed_a - - def forward3(self, x): - x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) - x = x.unsqueeze_(1) - out = F.relu(self.bn1(self.conv1(x))) - out1 = self.layer1(out) - out2 = self.layer2(out1) - out1_downsample = self.layer1_downsample(out1) - fuse_out12 = self.fuse_mode12(out2, out1_downsample) - out3 = self.layer3(out2) - fuse_out12_downsample = self.layer2_downsample(fuse_out12) - fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample) - out4 = self.layer4(out3) - fuse_out123_downsample = self.layer3_downsample(fuse_out123) - fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2).mean(-1) - return fuse_out1234 - - -if __name__ == "__main__": - x = torch.zeros(10, 300, 80) - model = ERes2Net(feat_dim=80, embedding_size=192, pooling_func="TSTP") - model.eval() - out = model(x) - print(out.shape) # torch.Size([10, 192]) - - num_params = sum(param.numel() for param in model.parameters()) - print("{} M".format(num_params / 1e6)) # 6.61M diff --git a/GPT_SoVITS/eres2net/ERes2NetV2.py b/GPT_SoVITS/eres2net/ERes2NetV2.py index 2e152a41..8bc9f4f1 100644 --- a/GPT_SoVITS/eres2net/ERes2NetV2.py +++ b/GPT_SoVITS/eres2net/ERes2NetV2.py @@ -8,12 +8,14 @@ To alleviate this problem, we propose an improved ERes2NetV2 by pruning redundan both the model parameters and its computational cost. """ -import torch import math + +import torch import torch.nn as nn import torch.nn.functional as F -import pooling_layers as pooling_layers -from fusion import AFF + +from . import pooling_layers as pooling_layers +from .fusion import AFF class ReLU(nn.Hardtanh): @@ -247,26 +249,4 @@ class ERes2NetV2(nn.Module): out4 = self.layer4(out3) out3_ds = self.layer3_ds(out3) fuse_out34 = self.fuse34(out4, out3_ds) - # print(111111111,fuse_out34.shape)#111111111 torch.Size([16, 2048, 10, 72]) return fuse_out34.flatten(start_dim=1, end_dim=2).mean(-1) - # stats = self.pool(fuse_out34) - # - # embed_a = self.seg_1(stats) - # if self.two_emb_layer: - # out = F.relu(embed_a) - # out = self.seg_bn_1(out) - # embed_b = self.seg_2(out) - # return embed_b - # else: - # return embed_a - - -if __name__ == "__main__": - x = torch.randn(1, 300, 80) - model = ERes2NetV2(feat_dim=80, embedding_size=192, m_channels=64, baseWidth=26, scale=2, expansion=2) - model.eval() - y = model(x) - print(y.size()) - macs, num_params = profile(model, inputs=(x,)) - print("Params: {} M".format(num_params / 1e6)) # 17.86 M - print("MACs: {} G".format(macs / 1e9)) # 12.69 G diff --git a/GPT_SoVITS/eres2net/ERes2Net_huge.py b/GPT_SoVITS/eres2net/ERes2Net_huge.py deleted file mode 100644 index 0f04236b..00000000 --- a/GPT_SoVITS/eres2net/ERes2Net_huge.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Res2Net implementation is adapted from https://github.com/wenet-e2e/wespeaker. -ERes2Net incorporates both local and global feature fusion techniques to improve the performance. -The local feature fusion (LFF) fuses the features within one single residual block to extract the local signal. -The global feature fusion (GFF) takes acoustic features of different scales as input to aggregate global signal. -ERes2Net-huge is an upgraded version of ERes2Net that uses a larger number of parameters to achieve better -recognition performance. Parameters expansion, baseWidth, and scale can be modified to obtain optimal performance. -""" - -import torch -import math -import torch.nn as nn -import torch.nn.functional as F -import pooling_layers as pooling_layers -from fusion import AFF - - -class ReLU(nn.Hardtanh): - def __init__(self, inplace=False): - super(ReLU, self).__init__(0, 20, inplace) - - def __repr__(self): - inplace_str = "inplace" if self.inplace else "" - return self.__class__.__name__ + " (" + inplace_str + ")" - - -class BasicBlockERes2Net(nn.Module): - expansion = 4 - - def __init__(self, in_planes, planes, stride=1, baseWidth=24, scale=3): - super(BasicBlockERes2Net, self).__init__() - width = int(math.floor(planes * (baseWidth / 64.0))) - self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(width * scale) - self.nums = scale - - convs = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False)) - bns.append(nn.BatchNorm2d(width)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - self.relu = ReLU(inplace=True) - - self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.shortcut = nn.Sequential() - if stride != 1 or in_planes != self.expansion * planes: - self.shortcut = nn.Sequential( - nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(self.expansion * planes), - ) - self.stride = stride - self.width = width - self.scale = scale - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i == 0: - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp) - sp = self.relu(self.bns[i](sp)) - if i == 0: - out = sp - else: - out = torch.cat((out, sp), 1) - - out = self.conv3(out) - out = self.bn3(out) - - residual = self.shortcut(x) - out += residual - out = self.relu(out) - - return out - - -class BasicBlockERes2Net_diff_AFF(nn.Module): - expansion = 4 - - def __init__(self, in_planes, planes, stride=1, baseWidth=24, scale=3): - super(BasicBlockERes2Net_diff_AFF, self).__init__() - width = int(math.floor(planes * (baseWidth / 64.0))) - self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False) - self.bn1 = nn.BatchNorm2d(width * scale) - self.nums = scale - - convs = [] - fuse_models = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False)) - bns.append(nn.BatchNorm2d(width)) - for j in range(self.nums - 1): - fuse_models.append(AFF(channels=width)) - - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - self.fuse_models = nn.ModuleList(fuse_models) - self.relu = ReLU(inplace=True) - - self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.shortcut = nn.Sequential() - if stride != 1 or in_planes != self.expansion * planes: - self.shortcut = nn.Sequential( - nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(self.expansion * planes), - ) - self.stride = stride - self.width = width - self.scale = scale - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i == 0: - sp = spx[i] - else: - sp = self.fuse_models[i - 1](sp, spx[i]) - - sp = self.convs[i](sp) - sp = self.relu(self.bns[i](sp)) - if i == 0: - out = sp - else: - out = torch.cat((out, sp), 1) - - out = self.conv3(out) - out = self.bn3(out) - - residual = self.shortcut(x) - out += residual - out = self.relu(out) - - return out - - -class ERes2Net(nn.Module): - def __init__( - self, - block=BasicBlockERes2Net, - block_fuse=BasicBlockERes2Net_diff_AFF, - num_blocks=[3, 4, 6, 3], - m_channels=64, - feat_dim=80, - embedding_size=192, - pooling_func="TSTP", - two_emb_layer=False, - ): - super(ERes2Net, self).__init__() - self.in_planes = m_channels - self.feat_dim = feat_dim - self.embedding_size = embedding_size - self.stats_dim = int(feat_dim / 8) * m_channels * 8 - self.two_emb_layer = two_emb_layer - - self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(m_channels) - - self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1) - self.layer2 = self._make_layer(block, m_channels * 2, num_blocks[1], stride=2) - self.layer3 = self._make_layer(block_fuse, m_channels * 4, num_blocks[2], stride=2) - self.layer4 = self._make_layer(block_fuse, m_channels * 8, num_blocks[3], stride=2) - - self.layer1_downsample = nn.Conv2d( - m_channels * 4, m_channels * 8, kernel_size=3, padding=1, stride=2, bias=False - ) - self.layer2_downsample = nn.Conv2d( - m_channels * 8, m_channels * 16, kernel_size=3, padding=1, stride=2, bias=False - ) - self.layer3_downsample = nn.Conv2d( - m_channels * 16, m_channels * 32, kernel_size=3, padding=1, stride=2, bias=False - ) - - self.fuse_mode12 = AFF(channels=m_channels * 8) - self.fuse_mode123 = AFF(channels=m_channels * 16) - self.fuse_mode1234 = AFF(channels=m_channels * 32) - - self.n_stats = 1 if pooling_func == "TAP" or pooling_func == "TSDP" else 2 - self.pool = getattr(pooling_layers, pooling_func)(in_dim=self.stats_dim * block.expansion) - self.seg_1 = nn.Linear(self.stats_dim * block.expansion * self.n_stats, embedding_size) - if self.two_emb_layer: - self.seg_bn_1 = nn.BatchNorm1d(embedding_size, affine=False) - self.seg_2 = nn.Linear(embedding_size, embedding_size) - else: - self.seg_bn_1 = nn.Identity() - self.seg_2 = nn.Identity() - - def _make_layer(self, block, planes, num_blocks, stride): - strides = [stride] + [1] * (num_blocks - 1) - layers = [] - for stride in strides: - layers.append(block(self.in_planes, planes, stride)) - self.in_planes = planes * block.expansion - return nn.Sequential(*layers) - - def forward(self, x): - x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) - - x = x.unsqueeze_(1) - out = F.relu(self.bn1(self.conv1(x))) - out1 = self.layer1(out) - out2 = self.layer2(out1) - out1_downsample = self.layer1_downsample(out1) - fuse_out12 = self.fuse_mode12(out2, out1_downsample) - out3 = self.layer3(out2) - fuse_out12_downsample = self.layer2_downsample(fuse_out12) - fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample) - out4 = self.layer4(out3) - fuse_out123_downsample = self.layer3_downsample(fuse_out123) - fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample) - stats = self.pool(fuse_out1234) - - embed_a = self.seg_1(stats) - if self.two_emb_layer: - out = F.relu(embed_a) - out = self.seg_bn_1(out) - embed_b = self.seg_2(out) - return embed_b - else: - return embed_a - - def forward2(self, x, if_mean): - x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) - - x = x.unsqueeze_(1) - out = F.relu(self.bn1(self.conv1(x))) - out1 = self.layer1(out) - out2 = self.layer2(out1) - out1_downsample = self.layer1_downsample(out1) - fuse_out12 = self.fuse_mode12(out2, out1_downsample) - out3 = self.layer3(out2) - fuse_out12_downsample = self.layer2_downsample(fuse_out12) - fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample) - out4 = self.layer4(out3) - fuse_out123_downsample = self.layer3_downsample(fuse_out123) - fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2) # bs,20480,T - if if_mean == False: - mean = fuse_out1234[0].transpose(1, 0) # (T,20480),bs=T - else: - mean = fuse_out1234.mean(2) # bs,20480 - mean_std = torch.cat([mean, torch.zeros_like(mean)], 1) - return self.seg_1(mean_std) # (T,192) - - # stats = self.pool(fuse_out1234) - # if self.two_emb_layer: - # out = F.relu(embed_a) - # out = self.seg_bn_1(out) - # embed_b = self.seg_2(out) - # return embed_b - # else: - # return embed_a - - def forward3(self, x): - x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) - - x = x.unsqueeze_(1) - out = F.relu(self.bn1(self.conv1(x))) - out1 = self.layer1(out) - out2 = self.layer2(out1) - out1_downsample = self.layer1_downsample(out1) - fuse_out12 = self.fuse_mode12(out2, out1_downsample) - out3 = self.layer3(out2) - fuse_out12_downsample = self.layer2_downsample(fuse_out12) - fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample) - out4 = self.layer4(out3) - fuse_out123_downsample = self.layer3_downsample(fuse_out123) - fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2).mean(-1) - return fuse_out1234 - # print(fuse_out1234.shape) - # print(fuse_out1234.flatten(start_dim=1,end_dim=2).shape) - # pdb.set_trace() diff --git a/GPT_SoVITS/export_torch_script.py b/GPT_SoVITS/export_torch_script.py index e4406f28..1a40f814 100644 --- a/GPT_SoVITS/export_torch_script.py +++ b/GPT_SoVITS/export_torch_script.py @@ -1,28 +1,26 @@ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py # reference: https://github.com/lifeiteng/vall-e import argparse +import json +import os from io import BytesIO from typing import Optional -from my_utils import load_audio + +import soundfile import torch import torchaudio - from torch import IntTensor, LongTensor, Tensor, nn from torch.nn import functional as F - +from torchaudio.compliance.kaldi import fbank from transformers import AutoModelForMaskedLM, AutoTokenizer -from feature_extractor import cnhubert -from AR.models.t2s_lightning_module import Text2SemanticLightningModule -from module.models_onnx import SynthesizerTrn - -from inference_webui import get_phones_and_bert - -from sv import SV -import kaldi as Kaldi - -import os -import soundfile +import GPT_SoVITS.text as text +from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModule +from GPT_SoVITS.feature_extractor import cnhubert +from GPT_SoVITS.inference_webui import get_phones_and_bert +from GPT_SoVITS.module.models_onnx import SynthesizerTrn +from GPT_SoVITS.sv import SV +from tools.my_utils import load_audio default_config = { "embedding_dim": 512, @@ -477,7 +475,7 @@ class T2SModel(nn.Module): # avoid dtype inconsistency when exporting bert = bert.to(dtype=self.bert_proj.weight.dtype) - + x = x + self.bert_proj(bert.transpose(1, 2)) x: torch.Tensor = self.ar_text_position(x) @@ -737,7 +735,7 @@ def export_prov2( device="cpu", is_half=True, ): - if sv_cn_model == None: + if sv_cn_model is None: init_sv_cn(device, is_half) if not os.path.exists(output_path): @@ -898,7 +896,7 @@ class ExportERes2NetV2(nn.Module): def forward(self, audio_16k): # 这个 fbank 函数有一个 cache, 不过不要紧,它跟 audio_16k 的长度无关 # 只跟 device 和 dtype 有关 - x = Kaldi.fbank(audio_16k, num_mel_bins=80, sample_frequency=16000, dither=0) + x = fbank(audio_16k, num_mel_bins=80, sample_frequency=16000, dither=0) x = torch.stack([x]) x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) @@ -1041,10 +1039,6 @@ def test(): soundfile.write("out.wav", audio.detach().cpu().numpy(), 32000) -import text -import json - - def export_symbel(version="v2"): if version == "v1": symbols = text._symbol_to_id_v1 diff --git a/GPT_SoVITS/export_torch_script_v3v4.py b/GPT_SoVITS/export_torch_script_v3v4.py index b0e4dba5..d4d03dc6 100644 --- a/GPT_SoVITS/export_torch_script_v3v4.py +++ b/GPT_SoVITS/export_torch_script_v3v4.py @@ -1,27 +1,25 @@ +import logging import os -from export_torch_script import ( + +import librosa +import numpy as np +import soundfile +import torch +import uvicorn +from librosa.filters import mel as librosa_mel_fn + +from GPT_SoVITS.export_torch_script import ( T2SModel, get_raw_t2s_model, resamplex, spectrogram_torch, ) -from f5_tts.model.backbones.dit import DiT -from inference_webui import get_phones_and_bert -import librosa -from module import commons -from module.mel_processing import mel_spectrogram_torch -from module.models_onnx import CFM, Generator, SynthesizerTrnV3 -import numpy as np -import torch._dynamo.config -import torchaudio -import logging -import uvicorn -import torch -import soundfile -from librosa.filters import mel as librosa_mel_fn - - -from inference_webui import get_spepc, norm_spec, resample, ssl_model +from GPT_SoVITS.f5_tts.model.backbones.dit import DiT +from GPT_SoVITS.inference_webui import get_phones_and_bert, get_spepc, norm_spec, ssl_model +from GPT_SoVITS.module import commons +from GPT_SoVITS.module.mel_processing import mel_spectrogram_torch +from GPT_SoVITS.module.models_onnx import CFM, Generator, SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import inspect_version logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG) logger = logging.getLogger("uvicorn") @@ -176,32 +174,33 @@ class ExportCFM(torch.nn.Module): return cfm_res, fea_ref, mel2 -mel_fn = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1024, - "win_size": 1024, - "hop_size": 256, - "num_mels": 100, - "sampling_rate": 24000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) -mel_fn_v4 = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1280, - "win_size": 1280, - "hop_size": 320, - "num_mels": 100, - "sampling_rate": 32000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) +def mel_fn(x): + return mel_spectrogram_torch( + y=x, + n_fft=1024, + num_mels=100, + sampling_rate=24000, + hop_size=256, + win_size=1024, + fmin=0, + fmax=None, + center=False, + ) + + +def mel_fn_v4(x): + return mel_spectrogram_torch( + y=x, + n_fft=1280, + num_mels=100, + sampling_rate=32000, + hop_size=320, + win_size=1280, + fmin=0, + fmax=None, + center=False, + ) + spec_min = -12 spec_max = 2 @@ -511,7 +510,7 @@ def init_bigvgan(): # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() bigvgan_model = bigvgan_model.eval() - if is_half == True: + if is_half is True: bigvgan_model = bigvgan_model.half().to(device) else: bigvgan_model = bigvgan_model.to(device) @@ -536,7 +535,7 @@ def init_hifigan(): "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), map_location="cpu" ) print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) - if is_half == True: + if is_half is True: hifigan_model = hifigan_model.half().to(device) else: hifigan_model = hifigan_model.to(device) @@ -578,8 +577,6 @@ class DictToAttrRecursive(dict): raise AttributeError(f"Attribute {item} not found") -from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new - v3v4set = {"v3", "v4"} @@ -587,12 +584,10 @@ def get_sovits_weights(sovits_path): path_sovits_v3 = "GPT_SoVITS/pretrained_models/s2Gv3.pth" is_exist_s2gv3 = os.path.exists(path_sovits_v3) - version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) - if if_lora_v3 == True and is_exist_s2gv3 == False: + model_version, version, if_lora_v3, hps, dict_s2 = inspect_version(sovits_path) + if if_lora_v3 is True and is_exist_s2gv3 is False: logger.info("SoVITS V3 底模缺失,无法加载相应 LoRA 权重") - dict_s2 = load_sovits_new(sovits_path) - hps = dict_s2["config"] hps = DictToAttrRecursive(hps) hps.model.semantic_frame_rate = "25hz" if "enc_p.text_embedding.weight" not in dict_s2["weight"]: @@ -617,7 +612,7 @@ def get_sovits_weights(sovits_path): model_version = hps.model.version logger.info(f"模型版本: {model_version}") - if is_half == True: + if is_half is True: vq_model = vq_model.half().to(device) else: vq_model = vq_model.to(device) @@ -729,11 +724,11 @@ def export_1(ref_wav_path, ref_wav_text, version="v3"): # ref_wav_path = "onnx/ad/ref.wav" speed = 1.0 sample_steps = 8 - dtype = torch.float16 if is_half == True else torch.float32 + dtype = torch.float16 if is_half is True else torch.float32 refer = get_spepc(hps, ref_wav_path).to(device).to(dtype) zero_wav = np.zeros( int(hps.data.sampling_rate * 0.3), - dtype=np.float16 if is_half == True else np.float32, + dtype=np.float16 if is_half is True else np.float32, ) with torch.no_grad(): @@ -741,7 +736,7 @@ def export_1(ref_wav_path, ref_wav_text, version="v3"): wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) - if is_half == True: + if is_half is True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: @@ -828,13 +823,11 @@ def export_1(ref_wav_path, ref_wav_text, version="v3"): gpt_sovits_half = ExportGPTSovitsV4Half(sovits.hps, script_t2s, trace_vq_model) torch.jit.script(gpt_sovits_half).save("onnx/ad/gpt_sovits_v4_half.pt") - ref_audio, sr = torchaudio.load(ref_wav_path) + tgt_sr = 24000 if version == "v3" else 32000 + ref_audio = torch.from_numpy(librosa.load(ref_wav_path, sr=tgt_sr)[0]).unsqueeze(0) ref_audio = ref_audio.to(device).float() if ref_audio.shape[0] == 2: ref_audio = ref_audio.mean(0).unsqueeze(0) - tgt_sr = 24000 if version == "v3" else 32000 - if sr != tgt_sr: - ref_audio = resample(ref_audio, sr, tgt_sr) # mel2 = mel_fn(ref_audio) mel2 = mel_fn(ref_audio) if version == "v3" else mel_fn_v4(ref_audio) mel2 = norm_spec(mel2) @@ -940,11 +933,11 @@ def test_export( speed = 1.0 sample_steps = 8 - dtype = torch.float16 if is_half == True else torch.float32 + dtype = torch.float16 if is_half is True else torch.float32 zero_wav = np.zeros( int(16000 * 0.3), - dtype=np.float16 if is_half == True else np.float32, + dtype=np.float16 if is_half is True else np.float32, ) with torch.no_grad(): @@ -952,7 +945,7 @@ def test_export( wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) - if is_half == True: + if is_half is True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: @@ -1058,11 +1051,11 @@ def test_export( speed = 1.0 sample_steps = torch.LongTensor([16]) - dtype = torch.float16 if is_half == True else torch.float32 + dtype = torch.float16 if is_half is True else torch.float32 zero_wav = np.zeros( int(out_sr * 0.3), - dtype=np.float16 if is_half == True else np.float32, + dtype=np.float16 if is_half is True else np.float32, ) with torch.no_grad(): @@ -1070,7 +1063,7 @@ def test_export( wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) - if is_half == True: + if is_half is True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: diff --git a/GPT_SoVITS/f5_tts/model/__init__.py b/GPT_SoVITS/f5_tts/model/__init__.py index 50cff994..88d52cff 100644 --- a/GPT_SoVITS/f5_tts/model/__init__.py +++ b/GPT_SoVITS/f5_tts/model/__init__.py @@ -1,13 +1,3 @@ -# from f5_tts.model.cfm import CFM -# -# from f5_tts.model.backbones.unett import UNetT -from GPT_SoVITS.f5_tts.model.backbones.dit import DiT -# from f5_tts.model.backbones.dit import DiTNoCond -# from f5_tts.model.backbones.dit import DiTNoCondNoT -# from f5_tts.model.backbones.mmdit import MMDiT +from .backbones.dit import DiT -# from f5_tts.model.trainer import Trainer - - -# __all__ = ["CFM", "UNetT", "DiT", "MMDiT", "Trainer"] -# __all__ = ["CFM", "UNetT", "DiTNoCond","DiT", "MMDiT"] +__all__ = ["DiT"] diff --git a/GPT_SoVITS/f5_tts/model/backbones/dit.py b/GPT_SoVITS/f5_tts/model/backbones/dit.py index 4aa3b9ac..75bd4017 100644 --- a/GPT_SoVITS/f5_tts/model/backbones/dit.py +++ b/GPT_SoVITS/f5_tts/model/backbones/dit.py @@ -12,21 +12,20 @@ from __future__ import annotations import torch from torch import nn from torch.utils.checkpoint import checkpoint - from x_transformers.x_transformers import RotaryEmbedding -from GPT_SoVITS.f5_tts.model.modules import ( - TimestepEmbedding, +from GPT_SoVITS.module.commons import sequence_mask + +from ..modules import ( + AdaLayerNormZero_Final, ConvNeXtV2Block, ConvPositionEmbedding, DiTBlock, - AdaLayerNormZero_Final, - precompute_freqs_cis, + TimestepEmbedding, get_pos_embed_indices, + precompute_freqs_cis, ) -from module.commons import sequence_mask - class TextEmbedding(nn.Module): def __init__(self, text_dim, conv_layers=0, conv_mult=2): diff --git a/GPT_SoVITS/f5_tts/model/backbones/mmdit.py b/GPT_SoVITS/f5_tts/model/backbones/mmdit.py index 64c7ef18..15192367 100644 --- a/GPT_SoVITS/f5_tts/model/backbones/mmdit.py +++ b/GPT_SoVITS/f5_tts/model/backbones/mmdit.py @@ -11,19 +11,17 @@ from __future__ import annotations import torch from torch import nn - from x_transformers.x_transformers import RotaryEmbedding -from f5_tts.model.modules import ( - TimestepEmbedding, +from ..modules import ( + AdaLayerNormZero_Final, ConvPositionEmbedding, MMDiTBlock, - AdaLayerNormZero_Final, - precompute_freqs_cis, + TimestepEmbedding, get_pos_embed_indices, + precompute_freqs_cis, ) - # text embedding diff --git a/GPT_SoVITS/f5_tts/model/backbones/unett.py b/GPT_SoVITS/f5_tts/model/backbones/unett.py index acf649a5..7d0a9605 100644 --- a/GPT_SoVITS/f5_tts/model/backbones/unett.py +++ b/GPT_SoVITS/f5_tts/model/backbones/unett.py @@ -8,27 +8,26 @@ d - dimension """ from __future__ import annotations + from typing import Literal import torch -from torch import nn import torch.nn.functional as F - +from torch import nn from x_transformers import RMSNorm from x_transformers.x_transformers import RotaryEmbedding -from f5_tts.model.modules import ( - TimestepEmbedding, - ConvNeXtV2Block, - ConvPositionEmbedding, +from ..modules import ( Attention, AttnProcessor, + ConvNeXtV2Block, + ConvPositionEmbedding, FeedForward, - precompute_freqs_cis, + TimestepEmbedding, get_pos_embed_indices, + precompute_freqs_cis, ) - # Text embedding diff --git a/GPT_SoVITS/f5_tts/model/modules.py b/GPT_SoVITS/f5_tts/model/modules.py index 9f030d96..b1cf0735 100644 --- a/GPT_SoVITS/f5_tts/model/modules.py +++ b/GPT_SoVITS/f5_tts/model/modules.py @@ -19,7 +19,6 @@ from librosa.filters import mel as librosa_mel_fn from torch import nn from x_transformers.x_transformers import apply_rotary_pos_emb - # raw wav to mel spec diff --git a/GPT_SoVITS/feature_extractor/cnhubert.py b/GPT_SoVITS/feature_extractor/cnhubert.py index f22b8d09..21451797 100644 --- a/GPT_SoVITS/feature_extractor/cnhubert.py +++ b/GPT_SoVITS/feature_extractor/cnhubert.py @@ -1,28 +1,25 @@ -import torch +import logging import os + +import torch +import torch.nn as nn +from transformers import ( + HubertModel, + Wav2Vec2FeatureExtractor, +) from transformers import logging as tf_logging tf_logging.set_verbosity_error() -import logging - logging.getLogger("numba").setLevel(logging.WARNING) -from transformers import ( - Wav2Vec2FeatureExtractor, - HubertModel, -) - -import utils -import torch.nn as nn - -cnhubert_base_path = None +cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" class CNHubert(nn.Module): - def __init__(self, base_path: str = None): + def __init__(self, base_path: str = ""): super().__init__() - if base_path is None: + if not base_path: base_path = cnhubert_base_path if os.path.exists(base_path): ... @@ -37,70 +34,13 @@ class CNHubert(nn.Module): return feats -# class CNHubertLarge(nn.Module): -# def __init__(self): -# super().__init__() -# self.model = HubertModel.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large") -# self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large") -# def forward(self, x): -# input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) -# feats = self.model(input_values)["last_hidden_state"] -# return feats -# -# class CVec(nn.Module): -# def __init__(self): -# super().__init__() -# self.model = HubertModel.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base") -# self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base") -# def forward(self, x): -# input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) -# feats = self.model(input_values)["last_hidden_state"] -# return feats -# -# class cnw2v2base(nn.Module): -# def __init__(self): -# super().__init__() -# self.model = Wav2Vec2Model.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base") -# self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base") -# def forward(self, x): -# input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) -# feats = self.model(input_values)["last_hidden_state"] -# return feats - - def get_model(): model = CNHubert() model.eval() return model -# def get_large_model(): -# model = CNHubertLarge() -# model.eval() -# return model -# -# def get_model_cvec(): -# model = CVec() -# model.eval() -# return model -# -# def get_model_cnw2v2base(): -# model = cnw2v2base() -# model.eval() -# return model - - def get_content(hmodel, wav_16k_tensor): with torch.no_grad(): feats = hmodel(wav_16k_tensor) return feats.transpose(1, 2) - - -if __name__ == "__main__": - model = get_model() - src_path = "/Users/Shared/原音频2.wav" - wav_16k_tensor = utils.load_wav_to_torch_and_resample(src_path, 16000) - model = model - wav_16k_tensor = wav_16k_tensor - feats = get_content(model, wav_16k_tensor) - print(feats.shape) diff --git a/GPT_SoVITS/feature_extractor/whisper_enc.py b/GPT_SoVITS/feature_extractor/whisper_enc.py index 260539bc..d35b548a 100644 --- a/GPT_SoVITS/feature_extractor/whisper_enc.py +++ b/GPT_SoVITS/feature_extractor/whisper_enc.py @@ -1,17 +1,16 @@ import torch +import whisper +from whisper import log_mel_spectrogram, pad_or_trim def get_model(): - import whisper - model = whisper.load_model("small", device="cpu") return model.encoder -def get_content(model=None, wav_16k_tensor=None): - from whisper import log_mel_spectrogram, pad_or_trim - +def get_content(model: whisper.Whisper, wav_16k_tensor: torch.Tensor): + assert model dev = next(model.parameters()).device mel = log_mel_spectrogram(wav_16k_tensor).to(dev)[:, :3000] # if torch.cuda.is_available(): @@ -19,5 +18,5 @@ def get_content(model=None, wav_16k_tensor=None): feature_len = mel.shape[-1] // 2 assert mel.shape[-1] < 3000, "输入音频过长,只允许输入30以内音频" with torch.no_grad(): - feature = model(pad_or_trim(mel, 3000).unsqueeze(0))[:1, :feature_len, :].transpose(1, 2) + feature = model(pad_or_trim(mel, 3000).unsqueeze(0))[:1, :feature_len, :].transpose(1, 2) # type: ignore return feature diff --git a/GPT_SoVITS/inference_cli.py b/GPT_SoVITS/inference_cli.py deleted file mode 100644 index 459a3d36..00000000 --- a/GPT_SoVITS/inference_cli.py +++ /dev/null @@ -1,86 +0,0 @@ -import argparse -import os -import soundfile as sf - -from tools.i18n.i18n import I18nAuto -from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav - -i18n = I18nAuto() - - -def synthesize( - GPT_model_path, - SoVITS_model_path, - ref_audio_path, - ref_text_path, - ref_language, - target_text_path, - target_language, - output_path, -): - # Read reference text - with open(ref_text_path, "r", encoding="utf-8") as file: - ref_text = file.read() - - # Read target text - with open(target_text_path, "r", encoding="utf-8") as file: - target_text = file.read() - - # Change model weights - change_gpt_weights(gpt_path=GPT_model_path) - change_sovits_weights(sovits_path=SoVITS_model_path) - - # Synthesize audio - synthesis_result = get_tts_wav( - ref_wav_path=ref_audio_path, - prompt_text=ref_text, - prompt_language=i18n(ref_language), - text=target_text, - text_language=i18n(target_language), - top_p=1, - temperature=1, - ) - - result_list = list(synthesis_result) - - if result_list: - last_sampling_rate, last_audio_data = result_list[-1] - output_wav_path = os.path.join(output_path, "output.wav") - sf.write(output_wav_path, last_audio_data, last_sampling_rate) - print(f"Audio saved to {output_wav_path}") - - -def main(): - parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool") - parser.add_argument("--gpt_model", required=True, help="Path to the GPT model file") - parser.add_argument("--sovits_model", required=True, help="Path to the SoVITS model file") - parser.add_argument("--ref_audio", required=True, help="Path to the reference audio file") - parser.add_argument("--ref_text", required=True, help="Path to the reference text file") - parser.add_argument( - "--ref_language", required=True, choices=["中文", "英文", "日文"], help="Language of the reference audio" - ) - parser.add_argument("--target_text", required=True, help="Path to the target text file") - parser.add_argument( - "--target_language", - required=True, - choices=["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"], - help="Language of the target text", - ) - parser.add_argument("--output_path", required=True, help="Path to the output directory") - - args = parser.parse_args() - - synthesize( - args.gpt_model, - args.sovits_model, - args.ref_audio, - args.ref_text, - args.ref_language, - args.target_text, - args.target_language, - args.output_path, - ) - - -if __name__ == "__main__": - main() diff --git a/GPT_SoVITS/inference_gui.py b/GPT_SoVITS/inference_gui.py deleted file mode 100644 index 379f7fa8..00000000 --- a/GPT_SoVITS/inference_gui.py +++ /dev/null @@ -1,316 +0,0 @@ -import os -import sys -from PyQt5.QtCore import QEvent -from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit -from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox -import soundfile as sf - -from tools.i18n.i18n import I18nAuto - -i18n = I18nAuto() - -from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav - - -class GPTSoVITSGUI(QMainWindow): - GPT_Path = gpt_path - SoVITS_Path = sovits_path - - def __init__(self): - super().__init__() - - self.setWindowTitle("GPT-SoVITS GUI") - self.setGeometry(800, 450, 950, 850) - - self.setStyleSheet(""" - QWidget { - background-color: #a3d3b1; - } - - QTabWidget::pane { - background-color: #a3d3b1; - } - - QTabWidget::tab-bar { - alignment: left; - } - - QTabBar::tab { - background: #8da4bf; - color: #ffffff; - padding: 8px; - } - - QTabBar::tab:selected { - background: #2a3f54; - } - - QLabel { - color: #000000; - } - - QPushButton { - background-color: #4CAF50; - color: white; - padding: 8px; - border: 1px solid #4CAF50; - border-radius: 4px; - } - - QPushButton:hover { - background-color: #45a049; - border: 1px solid #45a049; - box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1); - } - """) - - license_text = ( - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. " - "如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." - ) - license_label = QLabel(license_text) - license_label.setWordWrap(True) - - self.GPT_model_label = QLabel("选择GPT模型:") - self.GPT_model_input = QLineEdit() - self.GPT_model_input.setPlaceholderText("拖拽或选择文件") - self.GPT_model_input.setText(self.GPT_Path) - self.GPT_model_input.setReadOnly(True) - self.GPT_model_button = QPushButton("选择GPT模型文件") - self.GPT_model_button.clicked.connect(self.select_GPT_model) - - self.SoVITS_model_label = QLabel("选择SoVITS模型:") - self.SoVITS_model_input = QLineEdit() - self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件") - self.SoVITS_model_input.setText(self.SoVITS_Path) - self.SoVITS_model_input.setReadOnly(True) - self.SoVITS_model_button = QPushButton("选择SoVITS模型文件") - self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model) - - self.ref_audio_label = QLabel("上传参考音频:") - self.ref_audio_input = QLineEdit() - self.ref_audio_input.setPlaceholderText("拖拽或选择文件") - self.ref_audio_input.setReadOnly(True) - self.ref_audio_button = QPushButton("选择音频文件") - self.ref_audio_button.clicked.connect(self.select_ref_audio) - - self.ref_text_label = QLabel("参考音频文本:") - self.ref_text_input = QLineEdit() - self.ref_text_input.setPlaceholderText("直接输入文字或上传文本") - self.ref_text_button = QPushButton("上传文本") - self.ref_text_button.clicked.connect(self.upload_ref_text) - - self.ref_language_label = QLabel("参考音频语言:") - self.ref_language_combobox = QComboBox() - self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"]) - self.ref_language_combobox.setCurrentText("多语种混合") - - self.target_text_label = QLabel("合成目标文本:") - self.target_text_input = QLineEdit() - self.target_text_input.setPlaceholderText("直接输入文字或上传文本") - self.target_text_button = QPushButton("上传文本") - self.target_text_button.clicked.connect(self.upload_target_text) - - self.target_language_label = QLabel("合成音频语言:") - self.target_language_combobox = QComboBox() - self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"]) - self.target_language_combobox.setCurrentText("多语种混合") - - self.output_label = QLabel("输出音频路径:") - self.output_input = QLineEdit() - self.output_input.setPlaceholderText("拖拽或选择文件") - self.output_input.setReadOnly(True) - self.output_button = QPushButton("选择文件夹") - self.output_button.clicked.connect(self.select_output_path) - - self.output_text = QTextEdit() - self.output_text.setReadOnly(True) - - self.add_drag_drop_events( - [ - self.GPT_model_input, - self.SoVITS_model_input, - self.ref_audio_input, - self.ref_text_input, - self.target_text_input, - self.output_input, - ] - ) - - self.synthesize_button = QPushButton("合成") - self.synthesize_button.clicked.connect(self.synthesize) - - self.clear_output_button = QPushButton("清空输出") - self.clear_output_button.clicked.connect(self.clear_output) - - self.status_bar = QStatusBar() - - main_layout = QVBoxLayout() - - input_layout = QGridLayout(self) - input_layout.setSpacing(10) - - input_layout.addWidget(license_label, 0, 0, 1, 3) - - input_layout.addWidget(self.GPT_model_label, 1, 0) - input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2) - input_layout.addWidget(self.GPT_model_button, 2, 2) - - input_layout.addWidget(self.SoVITS_model_label, 3, 0) - input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2) - input_layout.addWidget(self.SoVITS_model_button, 4, 2) - - input_layout.addWidget(self.ref_audio_label, 5, 0) - input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2) - input_layout.addWidget(self.ref_audio_button, 6, 2) - - input_layout.addWidget(self.ref_language_label, 7, 0) - input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1) - input_layout.addWidget(self.ref_text_label, 9, 0) - input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2) - input_layout.addWidget(self.ref_text_button, 10, 2) - - input_layout.addWidget(self.target_language_label, 11, 0) - input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1) - input_layout.addWidget(self.target_text_label, 13, 0) - input_layout.addWidget(self.target_text_input, 14, 0, 1, 2) - input_layout.addWidget(self.target_text_button, 14, 2) - - input_layout.addWidget(self.output_label, 15, 0) - input_layout.addWidget(self.output_input, 16, 0, 1, 2) - input_layout.addWidget(self.output_button, 16, 2) - - main_layout.addLayout(input_layout) - - output_layout = QVBoxLayout() - output_layout.addWidget(self.output_text) - main_layout.addLayout(output_layout) - - main_layout.addWidget(self.synthesize_button) - - main_layout.addWidget(self.clear_output_button) - - main_layout.addWidget(self.status_bar) - - self.central_widget = QWidget() - self.central_widget.setLayout(main_layout) - self.setCentralWidget(self.central_widget) - - def dragEnterEvent(self, event): - if event.mimeData().hasUrls(): - event.acceptProposedAction() - - def dropEvent(self, event): - if event.mimeData().hasUrls(): - file_paths = [url.toLocalFile() for url in event.mimeData().urls()] - if len(file_paths) == 1: - self.update_ref_audio(file_paths[0]) - else: - self.update_ref_audio(", ".join(file_paths)) - - def add_drag_drop_events(self, widgets): - for widget in widgets: - widget.setAcceptDrops(True) - widget.installEventFilter(self) - - def eventFilter(self, obj, event): - if event.type() in (QEvent.DragEnter, QEvent.Drop): - mime_data = event.mimeData() - if mime_data.hasUrls(): - event.acceptProposedAction() - - return super().eventFilter(obj, event) - - def select_GPT_model(self): - file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)") - if file_path: - self.GPT_model_input.setText(file_path) - - def select_SoVITS_model(self): - file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)") - if file_path: - self.SoVITS_model_input.setText(file_path) - - def select_ref_audio(self): - file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)") - if file_path: - self.update_ref_audio(file_path) - - def upload_ref_text(self): - file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)") - if file_path: - with open(file_path, "r", encoding="utf-8") as file: - content = file.read() - self.ref_text_input.setText(content) - - def upload_target_text(self): - file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)") - if file_path: - with open(file_path, "r", encoding="utf-8") as file: - content = file.read() - self.target_text_input.setText(content) - - def select_output_path(self): - options = QFileDialog.Options() - options |= QFileDialog.DontUseNativeDialog - options |= QFileDialog.ShowDirsOnly - - folder_dialog = QFileDialog() - folder_dialog.setOptions(options) - folder_dialog.setFileMode(QFileDialog.Directory) - - if folder_dialog.exec_(): - folder_path = folder_dialog.selectedFiles()[0] - self.output_input.setText(folder_path) - - def update_ref_audio(self, file_path): - self.ref_audio_input.setText(file_path) - - def clear_output(self): - self.output_text.clear() - - def synthesize(self): - GPT_model_path = self.GPT_model_input.text() - SoVITS_model_path = self.SoVITS_model_input.text() - ref_audio_path = self.ref_audio_input.text() - language_combobox = self.ref_language_combobox.currentText() - language_combobox = i18n(language_combobox) - ref_text = self.ref_text_input.text() - target_language_combobox = self.target_language_combobox.currentText() - target_language_combobox = i18n(target_language_combobox) - target_text = self.target_text_input.text() - output_path = self.output_input.text() - - if GPT_model_path != self.GPT_Path: - change_gpt_weights(gpt_path=GPT_model_path) - self.GPT_Path = GPT_model_path - if SoVITS_model_path != self.SoVITS_Path: - change_sovits_weights(sovits_path=SoVITS_model_path) - self.SoVITS_Path = SoVITS_model_path - - synthesis_result = get_tts_wav( - ref_wav_path=ref_audio_path, - prompt_text=ref_text, - prompt_language=language_combobox, - text=target_text, - text_language=target_language_combobox, - ) - - result_list = list(synthesis_result) - - if result_list: - last_sampling_rate, last_audio_data = result_list[-1] - output_wav_path = os.path.join(output_path, "output.wav") - sf.write(output_wav_path, last_audio_data, last_sampling_rate) - - result = "Audio saved to " + output_wav_path - - self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000) - self.output_text.append("处理结果:\n" + result) - - -if __name__ == "__main__": - app = QApplication(sys.argv) - mainWin = GPTSoVITSGUI() - mainWin.show() - sys.exit(app.exec_()) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index a361ed58..e3e44c24 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -1,36 +1,54 @@ -""" -按中英混合识别 -按日英混合识别 -多语种启动切分识别语种 -全部按中文识别 -全部按英文识别 -全部按日文识别 -""" -import psutil -import os - -def set_high_priority(): - """把当前 Python 进程设为 HIGH_PRIORITY_CLASS""" - if os.name != "nt": - return # 仅 Windows 有效 - p = psutil.Process(os.getpid()) - try: - p.nice(psutil.HIGH_PRIORITY_CLASS) - print("已将进程优先级设为 High") - except psutil.AccessDenied: - print("权限不足,无法修改优先级(请用管理员运行)") -set_high_priority() -import json +import argparse +import contextlib import logging import os import re -import sys import traceback import warnings +from functools import partial +from pathlib import Path +from time import time as ttime +from typing import Any +import gradio as gr +import librosa +import numpy as np +import psutil import torch import torchaudio -from text.LangSegmenter import LangSegmenter +from peft import LoraConfig, get_peft_model +from transformers import AutoModelForMaskedLM, AutoTokenizer + +from config import ( + change_choices, + get_dtype, + get_weights_names, + pretrained_sovits_name, +) +from config import ( + infer_device as default_device, +) +from GPT_SoVITS.Accelerate import MLX, PyTorch, T2SEngineProtocol, T2SRequest, backends +from GPT_SoVITS.Accelerate.logger import console +from GPT_SoVITS.feature_extractor import cnhubert +from GPT_SoVITS.module.mel_processing import mel_spectrogram_torch, spectrogram_torch +from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import inspect_version +from GPT_SoVITS.sv import SV +from GPT_SoVITS.text import cleaned_text_to_sequence +from GPT_SoVITS.text.cleaner import clean_text +from GPT_SoVITS.text.LangSegmenter import LangSegmenter +from tools.assets import css, js, top_html +from tools.i18n.i18n import I18nAuto, scan_language_list +from tools.my_utils import DictToAttrRecursive + +with contextlib.suppress(ImportError): + import mlx.utils as mxutils + +warnings.filterwarnings( + "ignore", message="MPS: The constant padding of more than 3 dimensions is not currently supported natively." +) +warnings.filterwarnings("ignore", message=".*ComplexHalf support is experimental.*") logging.getLogger("markdown_it").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) @@ -40,219 +58,254 @@ logging.getLogger("asyncio").setLevel(logging.ERROR) logging.getLogger("charset_normalizer").setLevel(logging.ERROR) logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) logging.getLogger("multipart.multipart").setLevel(logging.ERROR) -warnings.simplefilter(action="ignore", category=FutureWarning) -version = model_version = os.environ.get("version", "v2") +os.environ["TOKENIZERS_PARALLELISM"] = "false" -from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path -SoVITS_names, GPT_names = get_weights_names() -from config import pretrained_sovits_name +def set_high_priority(): + if os.name != "nt": + return + p = psutil.Process(os.getpid()) + with contextlib.suppress(psutil.AccessDenied): + p.nice(psutil.HIGH_PRIORITY_CLASS) + print("已将进程优先级设为 High") + +_LANG_RE = re.compile(r"^[a-z]{2}[_-][A-Z]{2}$") + + +def lang_type(text: str) -> str: + if text == "Auto": + return text + if not _LANG_RE.match(text): + raise argparse.ArgumentTypeError(f"Unspported Format: {text}, Expected ll_CC/ll-CC") + ll, cc = re.split(r"[_-]", text) + language = f"{ll}_{cc}" + if language in scan_language_list(): + return language + else: + return "Auto" + + +def build_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + prog="inference_webui", + description=f"PYTHONPATH=. python -s GPT_SoVITS/inference_webui.py zh_CN -b {backends[-1]}", + ) + p.add_argument( + "language", + nargs="?", + default="Auto", + type=lang_type, + help="Language Code, Such as zh_CN, en-US", + ) + p.add_argument( + "--backends", + "-b", + choices=backends, + default=backends[-1], + help="AR Inference Backend", + required=False, + ) + p.add_argument( + "--device", + "-d", + default=str(default_device), + help="Inference Device", + required=False, + ) + p.add_argument( + "--port", + "-p", + default=9872, + type=int, + help="WebUI Binding Port", + required=False, + ) + p.add_argument( + "--share", + "-s", + default=False, + action="store_true", + help="Gradio Share Link", + required=False, + ) + p.add_argument( + "--cnhubert", + default="GPT_SoVITS/pretrained_models/chinese-hubert-base", + help="CNHuBERT Pretrain", + required=False, + ) + p.add_argument( + "--bert", + default="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", + help="BERT Pretrain", + required=False, + ) + p.add_argument( + "--gpt", + default="", + help="GPT Model", + required=False, + ) + p.add_argument( + "--sovits", + default="", + help="SoVITS Model", + required=False, + ) + + return p + + +args = build_parser().parse_args() + +hps: Any = None +vq_model: SynthesizerTrn | SynthesizerTrnV3 | None = None +t2s_engine: T2SEngineProtocol | None = None + +version = model_version = "v2" path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] is_exist_s2gv3 = os.path.exists(path_sovits_v3) is_exist_s2gv4 = os.path.exists(path_sovits_v4) -if os.path.exists("./weight.json"): - pass -else: - with open("./weight.json", "w", encoding="utf-8") as file: - json.dump({"GPT": {}, "SoVITS": {}}, file) - -with open("./weight.json", "r", encoding="utf-8") as file: - weight_data = file.read() - weight_data = json.loads(weight_data) - gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1])) - sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0])) - if isinstance(gpt_path, list): - gpt_path = gpt_path[0] - if isinstance(sovits_path, list): - sovits_path = sovits_path[0] - -# print(2333333) -# print(os.environ["gpt_path"]) -# print(gpt_path) -# print(GPT_names) -# print(weight_data) -# print(weight_data.get("GPT", {})) -# print(version)###GPT version里没有s2的v2pro -# print(weight_data.get("GPT", {}).get(version, GPT_names[-1])) - -cnhubert_base_path = os.environ.get("cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base") -bert_path = os.environ.get("bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large") -infer_ttswebui = os.environ.get("infer_ttswebui", 9872) -infer_ttswebui = int(infer_ttswebui) -is_share = os.environ.get("is_share", "False") -is_share = eval(is_share) -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() -# is_half=False -punctuation = set(["!", "?", "…", ",", ".", "-", " "]) -import gradio as gr -import librosa -import numpy as np -from feature_extractor import cnhubert -from transformers import AutoModelForMaskedLM, AutoTokenizer - -cnhubert.cnhubert_base_path = cnhubert_base_path - -import random - -from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 +cnhubert_base_path = str(args.cnhubert) +bert_path = str(args.bert) +infer_ttswebui = int(args.port) +is_share = bool(args.share) -def set_seed(seed): - if seed == -1: - seed = random.randint(0, 1000000) - seed = int(seed) - random.seed(seed) - os.environ["PYTHONHASHSEED"] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) +i18n = I18nAuto(language=args.language) +ar_backend: str = args.backends +change_choices_i18n = partial(change_choices, i18n=i18n) +SoVITS_names, GPT_names = get_weights_names(i18n) -# set_seed(42) - -from time import time as ttime - -from AR.models.t2s_lightning_module import Text2SemanticLightningModule -from peft import LoraConfig, get_peft_model -from text import cleaned_text_to_sequence -from text.cleaner import clean_text - -from tools.assets import css, js, top_html -from tools.i18n.i18n import I18nAuto, scan_language_list - -language = os.environ.get("language", "Auto") -language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language -i18n = I18nAuto(language=language) - -# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 - -if torch.cuda.is_available(): - device = "cuda" -else: - device = "cpu" dict_language_v1 = { i18n("中文"): "all_zh", # 全部按中文识别 - i18n("英文"): "en", # 全部按英文识别#######不变 + i18n("英文"): "en", # 全部按英文识别 i18n("日文"): "all_ja", # 全部按日文识别 - i18n("中英混合"): "zh", # 按中英混合识别####不变 - i18n("日英混合"): "ja", # 按日英混合识别####不变 + i18n("中英混合"): "zh", # 按中英混合识别 + i18n("日英混合"): "ja", # 按日英混合识别 i18n("多语种混合"): "auto", # 多语种启动切分识别语种 } dict_language_v2 = { i18n("中文"): "all_zh", # 全部按中文识别 - i18n("英文"): "en", # 全部按英文识别#######不变 + i18n("英文"): "en", # 全部按英文识别 i18n("日文"): "all_ja", # 全部按日文识别 - i18n("粤语"): "all_yue", # 全部按中文识别 + i18n("粤语"): "all_yue", # 全部按粤语识别 i18n("韩文"): "all_ko", # 全部按韩文识别 - i18n("中英混合"): "zh", # 按中英混合识别####不变 - i18n("日英混合"): "ja", # 按日英混合识别####不变 - i18n("粤英混合"): "yue", # 按粤英混合识别####不变 - i18n("韩英混合"): "ko", # 按韩英混合识别####不变 + i18n("中英混合"): "zh", + i18n("日英混合"): "ja", + i18n("粤英混合"): "yue", + i18n("韩英混合"): "ko", i18n("多语种混合"): "auto", # 多语种启动切分识别语种 i18n("多语种混合(粤语)"): "auto_yue", # 多语种启动切分识别语种 } dict_language = dict_language_v1 if version == "v1" else dict_language_v2 +punctuation = set(["!", "?", "…", ",", ".", "-", " "]) +splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…"} +v3v4set = {"v3", "v4"} + +infer_device = torch.device(args.device) +device = infer_device if infer_device.type == "cuda" else torch.device("cpu") + +dtype = get_dtype(device.index) +is_half = dtype == torch.float16 + tokenizer = AutoTokenizer.from_pretrained(bert_path) -bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) -if is_half == True: - bert_model = bert_model.half().to(device) -else: - bert_model = bert_model.to(device) +bert_model = AutoModelForMaskedLM.from_pretrained(bert_path).to(infer_device, dtype) + +cnhubert.cnhubert_base_path = cnhubert_base_path +ssl_model = cnhubert.get_model().to(infer_device, dtype) + +spec_min = -12 +spec_max = 2 + + +def norm_spec(x): + return (x - spec_min) / (spec_max - spec_min) * 2 - 1 + + +def denorm_spec(x): + return (x + 1) / 2 * (spec_max - spec_min) + spec_min + + +def mel_fn(x): + return mel_spectrogram_torch( + y=x, + n_fft=1024, + num_mels=100, + sampling_rate=24000, + hop_size=256, + win_size=1024, + fmin=0, + fmax=None, + center=False, + ) + + +def mel_fn_v4(x): + return mel_spectrogram_torch( + y=x, + n_fft=1280, + num_mels=100, + sampling_rate=32000, + hop_size=320, + win_size=1280, + fmin=0, + fmax=None, + center=False, + ) + + +gpt_path = str(args.gpt) or GPT_names[0][-1] +sovits_path = str(args.sovits) or SoVITS_names[0][-1] def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors="pt") - for i in inputs: - inputs[i] = inputs[i].to(device) - res = bert_model(**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] + inputs = tokenizer(text, return_tensors="pt") + for i in inputs: + inputs[i] = inputs[i].to(infer_device) + res = bert_model(**inputs, output_hidden_states=True) + res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] + assert len(word2ph) == len(text) phone_level_feature = [] for i in range(len(word2ph)): repeat_feature = res[i].repeat(word2ph[i], 1) phone_level_feature.append(repeat_feature) - phone_level_feature = torch.cat(phone_level_feature, dim=0) - return phone_level_feature.T - - -class DictToAttrRecursive(dict): - def __init__(self, input_dict): - super().__init__(input_dict) - for key, value in input_dict.items(): - if isinstance(value, dict): - value = DictToAttrRecursive(value) - self[key] = value - setattr(self, key, value) - - def __getattr__(self, item): - try: - return self[item] - except KeyError: - raise AttributeError(f"Attribute {item} not found") - - def __setattr__(self, key, value): - if isinstance(value, dict): - value = DictToAttrRecursive(value) - super(DictToAttrRecursive, self).__setitem__(key, value) - super().__setattr__(key, value) - - def __delattr__(self, item): - try: - del self[item] - except KeyError: - raise AttributeError(f"Attribute {item} not found") - - -ssl_model = cnhubert.get_model() -if is_half == True: - ssl_model = ssl_model.half().to(device) -else: - ssl_model = ssl_model.to(device) - - -###todo:put them to process_ckpt and modify my_save func (save sovits weights), gpt save weights use my_save in process_ckpt -# symbol_version-model_version-if_lora_v3 -from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new - -v3v4set = {"v3", "v4"} + phone_level_feature_t = torch.cat(phone_level_feature, dim=0) + return phone_level_feature_t.T def change_sovits_weights(sovits_path, prompt_language=None, text_language=None): - if "!" in sovits_path or "!" in sovits_path: - sovits_path = name2sovits_path[sovits_path] - global vq_model, hps, version, model_version, dict_language, if_lora_v3 - version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) - print(sovits_path, version, model_version, if_lora_v3) + global vq_model, hps, version, model_version, dict_language + model_version, version, is_lora, hps, dict_s2 = inspect_version(sovits_path) + print(sovits_path, version, model_version, is_lora) is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 - if if_lora_v3 == True and is_exist == False: - info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重") + if is_lora is True and is_exist is False: + info = f"{path_sovits} SoVITS {model_version} {i18n('底模缺失,无法加载相应 LoRA 权重')}" gr.Warning(info) - raise FileExistsError(info) + raise FileNotFoundError(info) dict_language = dict_language_v1 if version == "v1" else dict_language_v2 + visible_sample_steps = visible_inp_refs = None if prompt_language is not None and text_language is not None: if prompt_language in list(dict_language.keys()): - prompt_text_update, prompt_language_update = ( - {"__type__": "update"}, - {"__type__": "update", "value": prompt_language}, - ) + prompt_text_update, prompt_language_update = gr.skip(), gr.update(choices=list(dict_language.keys())) else: - prompt_text_update = {"__type__": "update", "value": ""} - prompt_language_update = {"__type__": "update", "value": i18n("中文")} + prompt_text_update = gr.update(value="") + prompt_language_update = gr.update(value=i18n("中文"), choices=list(dict_language.keys())) if text_language in list(dict_language.keys()): - text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language} + text_update, text_language_update = gr.skip(), gr.skip() else: - text_update = {"__type__": "update", "value": ""} - text_language_update = {"__type__": "update", "value": i18n("中文")} + text_update = gr.update(value="") + text_language_update = gr.update(value=i18n("中文"), choices=list(dict_language.keys())) + if model_version in v3v4set: visible_sample_steps = True visible_inp_refs = False @@ -260,41 +313,25 @@ def change_sovits_weights(sovits_path, prompt_language=None, text_language=None) visible_sample_steps = False visible_inp_refs = True yield ( - {"__type__": "update", "choices": list(dict_language.keys())}, - {"__type__": "update", "choices": list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update, - { - "__type__": "update", - "visible": visible_sample_steps, - "value": 32 if model_version == "v3" else 8, - "choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], - }, - {"__type__": "update", "visible": visible_inp_refs}, - {"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False}, - {"__type__": "update", "visible": True if model_version == "v3" else False}, - {"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False}, + gr.update( + visible=visible_sample_steps, + value=32 if model_version == "v3" else 8, + choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], + ), + gr.update(visible=visible_inp_refs), + gr.update(value=False, interactive=True if model_version not in v3v4set else False), + gr.update(visible=True if model_version == "v3" else False), + gr.update(value=i18n("模型加载中,请等待"), interactive=False), ) - dict_s2 = load_sovits_new(sovits_path) - hps = dict_s2["config"] hps = DictToAttrRecursive(hps) hps.model.semantic_frame_rate = "25hz" - if "enc_p.text_embedding.weight" not in dict_s2["weight"]: - hps.model.version = "v2" # v3model,v2sybomls - elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: - hps.model.version = "v1" - else: - hps.model.version = "v2" - version = hps.model.version - # print("sovits版本:",hps.model.version) + hps.model.version = model_version if model_version not in v3v4set: - if "Pro" not in model_version: - model_version = version - else: - hps.model.version = model_version vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, @@ -302,31 +339,26 @@ def change_sovits_weights(sovits_path, prompt_language=None, text_language=None) **hps.model, ) else: - hps.model.version = model_version vq_model = SynthesizerTrnV3( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, - ) + ).eval() + if "pretrained" not in sovits_path: - try: + if hasattr(vq_model, "enc_q"): del vq_model.enc_q - except: - pass - if is_half == True: - vq_model = vq_model.half().to(device) - else: - vq_model = vq_model.to(device) - vq_model.eval() - if if_lora_v3 == False: - print("loading sovits_%s" % model_version, vq_model.load_state_dict(dict_s2["weight"], strict=False)) + + if is_lora is False: + console.print(f">> loading sovits_{model_version}", vq_model.load_state_dict(dict_s2["weight"], strict=False)) else: path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 - print( - "loading sovits_%spretrained_G" % model_version, - vq_model.load_state_dict(load_sovits_new(path_sovits)["weight"], strict=False), - ) + console.print(f">> loading sovits_{model_version}spretrained_G") + dict_pretrain = torch.load(path_sovits)["weight"] + console.print(f">> loading sovits_{model_version}_lora{model_version}") + dict_pretrain.update(dict_s2["weight"]) + state_dict = dict_pretrain lora_rank = dict_s2["lora_rank"] lora_config = LoraConfig( target_modules=["to_k", "to_q", "to_v", "to_out.0"], @@ -334,107 +366,83 @@ def change_sovits_weights(sovits_path, prompt_language=None, text_language=None) lora_alpha=lora_rank, init_lora_weights=True, ) - vq_model.cfm = get_peft_model(vq_model.cfm, lora_config) - print("loading sovits_%s_lora%s" % (model_version, lora_rank)) - vq_model.load_state_dict(dict_s2["weight"], strict=False) - vq_model.cfm = vq_model.cfm.merge_and_unload() - # torch.save(vq_model.state_dict(),"merge_win.pth") + vq_model.cfm = get_peft_model(vq_model.cfm, lora_config) # type: ignore + vq_model.load_state_dict(state_dict, strict=False) + vq_model.cfm = vq_model.cfm.merge_and_unload() # pyright: ignore[reportAttributeAccessIssue, reportCallIssue] vq_model.eval() + vq_model = vq_model.to(infer_device, dtype) + yield ( - {"__type__": "update", "choices": list(dict_language.keys())}, - {"__type__": "update", "choices": list(dict_language.keys())}, - prompt_text_update, - prompt_language_update, - text_update, - text_language_update, - { - "__type__": "update", - "visible": visible_sample_steps, - "value": 32 if model_version == "v3" else 8, - "choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], - }, - {"__type__": "update", "visible": visible_inp_refs}, - {"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False}, - {"__type__": "update", "visible": True if model_version == "v3" else False}, - {"__type__": "update", "value": i18n("合成语音"), "interactive": True}, + gr.skip(), + gr.skip(), + gr.skip(), + gr.skip(), + gr.skip(), + gr.skip(), + gr.skip(), + gr.skip(), + gr.update(value=i18n("合成语音"), interactive=True), ) - with open("./weight.json") as f: - data = f.read() - data = json.loads(data) - data["SoVITS"][version] = sovits_path - with open("./weight.json", "w") as f: - f.write(json.dumps(data)) -try: +with contextlib.suppress(UnboundLocalError): next(change_sovits_weights(sovits_path)) -except: - pass def change_gpt_weights(gpt_path): - if "!" in gpt_path or "!" in gpt_path: - gpt_path = name2gpt_path[gpt_path] - global hz, max_sec, t2s_model, config - hz = 50 - dict_s1 = torch.load(gpt_path, map_location="cpu", weights_only=False) - config = dict_s1["config"] - max_sec = config["data"]["max_sec"] - t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) - t2s_model.load_state_dict(dict_s1["weight"]) - if is_half == True: - t2s_model = t2s_model.half() - t2s_model = t2s_model.to(device) - t2s_model.eval() - # total = sum([param.nelement() for param in t2s_model.parameters()]) - # print("Number of parameter: %.2fM" % (total / 1e6)) - with open("./weight.json") as f: - data = f.read() - data = json.loads(data) - data["GPT"][version] = gpt_path - with open("./weight.json", "w") as f: - f.write(json.dumps(data)) + global t2s_engine, config + + if "mlx" in ar_backend.lower(): + t2s_engine = MLX.T2SEngineMLX( + MLX.T2SEngineMLX.load_decoder(Path(gpt_path), backend=ar_backend), + "mx.gpu" if infer_device.type != "cpu" else "mx.cpu", + dtype=dtype, + ) + # t2s_engine.decoder_model.compile() + total = sum((p[-1].size for p in mxutils.tree_flatten(t2s_engine.decoder_model.parameters()))) # type: ignore + else: + t2s_engine = PyTorch.T2SEngineTorch( + PyTorch.T2SEngineTorch.load_decoder(Path(gpt_path), backend=ar_backend), + device, + dtype=dtype, + ) + # t2s_engine.decoder_model.compile() + total = sum(p.numel() for p in t2s_engine.decoder_model.parameters()) + console.print(">> Number of parameter: %.2fM" % (total / 1e6)) change_gpt_weights(gpt_path) -os.environ["HF_ENDPOINT"] = "https://hf-mirror.com" -import torch - -now_dir = os.getcwd() def clean_hifigan_model(): global hifigan_model if hifigan_model: hifigan_model = hifigan_model.cpu() - hifigan_model = None - try: + del hifigan_model + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass + hifigan_model = None def clean_bigvgan_model(): global bigvgan_model if bigvgan_model: bigvgan_model = bigvgan_model.cpu() - bigvgan_model = None - try: + del bigvgan_model + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass + bigvgan_model = None def clean_sv_cn_model(): global sv_cn_model if sv_cn_model: sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu() - sv_cn_model = None - try: + del sv_cn_model + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass + sv_cn_model = None def init_bigvgan(): @@ -442,18 +450,14 @@ def init_bigvgan(): from BigVGAN import bigvgan bigvgan_model = bigvgan.BigVGAN.from_pretrained( - "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), + "./GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x", use_cuda_kernel=False, ) # if True, RuntimeError: Ninja is required to load C++ extensions # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() - bigvgan_model = bigvgan_model.eval() + bigvgan_model = bigvgan_model.to(infer_device, dtype).eval() clean_hifigan_model() clean_sv_cn_model() - if is_half == True: - bigvgan_model = bigvgan_model.half().to(device) - else: - bigvgan_model = bigvgan_model.to(device) def init_hifigan(): @@ -472,25 +476,20 @@ def init_hifigan(): hifigan_model.eval() hifigan_model.remove_weight_norm() state_dict_g = torch.load( - "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), + "./GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth", map_location="cpu", weights_only=False, ) - print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) + console.print(">> loading vocoder", hifigan_model.load_state_dict(state_dict_g)) clean_bigvgan_model() clean_sv_cn_model() - if is_half == True: - hifigan_model = hifigan_model.half().to(device) - else: - hifigan_model = hifigan_model.to(device) - -from sv import SV + hifigan_model = hifigan_model.to(infer_device, dtype) def init_sv_cn(): global hifigan_model, bigvgan_model, sv_cn_model - sv_cn_model = SV(device, is_half) + sv_cn_model = SV(infer_device, is_half) clean_bigvgan_model() clean_hifigan_model() @@ -508,31 +507,23 @@ resample_transform_dict = {} def resample(audio_tensor, sr0, sr1, device): global resample_transform_dict - key = "%s-%s-%s" % (sr0, sr1, str(device)) + key = f"{sr0}-{sr1}-{device}" if key not in resample_transform_dict: resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device) return resample_transform_dict[key](audio_tensor) def get_spepc(hps, filename, dtype, device, is_v2pro=False): - # audio = load_audio(filename, int(hps.data.sampling_rate)) - - # audio, sampling_rate = librosa.load(filename, sr=int(hps.data.sampling_rate)) - # audio = torch.FloatTensor(audio) - sr1 = int(hps.data.sampling_rate) - audio, sr0 = torchaudio.load(filename) - if sr0 != sr1: - audio = audio.to(device) - if audio.shape[0] == 2: - audio = audio.mean(0).unsqueeze(0) - audio = resample(audio, sr0, sr1, device) - else: - audio = audio.to(device) - if audio.shape[0] == 2: - audio = audio.mean(0).unsqueeze(0) + audio, sr0 = torchaudio.load_with_torchcodec(filename) + audio = audio.to(device) - maxx = audio.abs().max() + if sr0 != sr1: + audio = resample(audio, sr0, sr1, device) + if audio.shape[0] > 1: + audio = audio.mean(0).unsqueeze(0) + + maxx = float(audio.abs().max()) if maxx > 1: audio /= min(2, maxx) spec = spectrogram_torch( @@ -544,7 +535,7 @@ def get_spepc(hps, filename, dtype, device, is_v2pro=False): center=False, ) spec = spec.to(dtype) - if is_v2pro == True: + if is_v2pro is True: audio = resample(audio, sr1, 16000, device).to(dtype) return spec, audio @@ -556,9 +547,6 @@ def clean_text_inf(text, language, version): return phones, word2ph, norm_text -dtype = torch.float16 if is_half == True else torch.float32 - - def get_bert_inf(phones, word2ph, norm_text, language): language = language.replace("all_", "") if language == "zh": @@ -566,58 +554,38 @@ def get_bert_inf(phones, word2ph, norm_text, language): else: bert = torch.zeros( (1024, len(phones)), - dtype=torch.float16 if is_half == True else torch.float32, + dtype=torch.float16 if is_half is True else torch.float32, ).to(device) return bert -splits = { - ",", - "。", - "?", - "!", - ",", - ".", - "?", - "!", - "~", - ":", - ":", - "—", - "…", -} - - def get_first(text): pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]" text = re.split(pattern, text)[0].strip() return text -from text import chinese - - def get_phones_and_bert(text, language, version, final=False): - text = re.sub(r' {2,}', ' ', text) + text = re.sub(r" {2,}", " ", text) textlist = [] langlist = [] if language == "all_zh": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_yue": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ja": - for tmp in LangSegmenter.getTexts(text,"ja"): + for tmp in LangSegmenter.getTexts(text, "ja"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ko": - for tmp in LangSegmenter.getTexts(text,"ko"): + for tmp in LangSegmenter.getTexts(text, "ko"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "en": @@ -667,48 +635,6 @@ def get_phones_and_bert(text, language, version, final=False): return phones, bert.to(dtype), norm_text -from module.mel_processing import mel_spectrogram_torch, spectrogram_torch - -spec_min = -12 -spec_max = 2 - - -def norm_spec(x): - return (x - spec_min) / (spec_max - spec_min) * 2 - 1 - - -def denorm_spec(x): - return (x + 1) / 2 * (spec_max - spec_min) + spec_min - - -mel_fn = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1024, - "win_size": 1024, - "hop_size": 256, - "num_mels": 100, - "sampling_rate": 24000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) -mel_fn_v4 = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1280, - "win_size": 1280, - "hop_size": 320, - "num_mels": 100, - "sampling_rate": 32000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) - - def merge_short_text_in_array(texts, threshold): if (len(texts)) < 2: return texts @@ -732,20 +658,18 @@ sr_model = None def audio_sr(audio, sr): global sr_model - if sr_model == None: + if sr_model is None: from tools.audio_sr import AP_BWE try: - sr_model = AP_BWE(device, DictToAttrRecursive) + sr_model = AP_BWE(infer_device, DictToAttrRecursive) except FileNotFoundError: gr.Warning(i18n("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载好")) - return audio.cpu().detach().numpy(), sr + return audio.cpu().numpy(), sr return sr_model(audio, sr) -##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature -# cache_tokens={}#暂未实现清理机制 -cache = {} +cache: dict[int, Any] = {} def get_tts_wav( @@ -766,7 +690,7 @@ def get_tts_wav( if_sr=False, pause_second=0.3, ): - global cache + torch.set_grad_enabled(False) if ref_wav_path: pass else: @@ -794,36 +718,38 @@ def get_tts_wav( prompt_text = prompt_text.strip("\n") if prompt_text[-1] not in splits: prompt_text += "。" if prompt_language != "en" else "." - print(i18n("实际输入的参考文本:"), prompt_text) + print(">>", i18n("实际输入的参考文本:"), prompt_text) text = text.strip("\n") # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text - print(i18n("实际输入的目标文本:"), text) + print(">>", i18n("实际输入的目标文本:"), text) zero_wav = np.zeros( int(hps.data.sampling_rate * pause_second), - dtype=np.float16 if is_half == True else np.float32, + dtype=np.float16 if is_half is True else np.float32, ) zero_wav_torch = torch.from_numpy(zero_wav) - if is_half == True: - zero_wav_torch = zero_wav_torch.half().to(device) + if is_half is True: + zero_wav_torch = zero_wav_torch.half().to(infer_device) else: - zero_wav_torch = zero_wav_torch.to(device) + zero_wav_torch = zero_wav_torch.to(infer_device) if not ref_free: - with torch.no_grad(): - wav16k, sr = librosa.load(ref_wav_path, sr=16000) - if wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000: - gr.Warning(i18n("参考音频在3~10秒范围外,请更换!")) - raise OSError(i18n("参考音频在3~10秒范围外,请更换!")) - wav16k = torch.from_numpy(wav16k) - if is_half == True: - wav16k = wav16k.half().to(device) - else: - wav16k = wav16k.to(device) - wav16k = torch.cat([wav16k, zero_wav_torch]) - ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() - codes = vq_model.extract_latent(ssl_content) - prompt_semantic = codes[0, 0] - prompt = prompt_semantic.unsqueeze(0).to(device) + assert vq_model + wav16k, sr = librosa.load(ref_wav_path, sr=16000) + if wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000: + gr.Warning(i18n("参考音频在3~10秒范围外,请更换!")) + raise OSError(i18n("参考音频在3~10秒范围外,请更换!")) + wav16k_t = torch.from_numpy(wav16k) + if is_half is True: + wav16k_t = wav16k_t.half().to(infer_device) + else: + wav16k_t = wav16k_t.to(infer_device) + wav16k_t = torch.cat([wav16k_t, zero_wav_torch]) + ssl_content = ssl_model.model(wav16k_t.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() + codes = vq_model.extract_latent(ssl_content) + prompt_semantic = codes[0, 0] + prompt = prompt_semantic.unsqueeze(0).to(device) + else: + prompt = torch.zeros((1, 0)).to(device, torch.int32) t1 = ttime() t.append(t1 - t0) @@ -840,14 +766,19 @@ def get_tts_wav( text = cut5(text) while "\n\n" in text: text = text.replace("\n\n", "\n") - print(i18n("实际输入的目标文本(切句后):"), text) texts = text.split("\n") texts = process_text(texts) texts = merge_short_text_in_array(texts, 5) audio_opt = [] - ###s2v3暂不支持ref_free + # s2v3暂不支持ref_free if not ref_free: - phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version) + phones1, bert1, _ = get_phones_and_bert(prompt_text, prompt_language, version) + else: + phones1, bert1 = [], torch.zeros(1024, 0).to(device, dtype) + + infer_len: list[int] = [] + infer_time: list[float] = [] + assert vq_model for i_text, text in enumerate(texts): # 解决输入目标文本的空行导致报错的问题 @@ -855,84 +786,97 @@ def get_tts_wav( continue if text[-1] not in splits: text += "。" if text_language != "en" else "." - print(i18n("实际输入的目标文本(每句):"), text) + print(">>", i18n("实际输入的目标文本(每句):"), text) phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version) - print(i18n("前端处理后的文本(每句):"), norm_text2) - if not ref_free: - bert = torch.cat([bert1, bert2], 1) - all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) - else: - bert = bert2 - all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0) + print(">>", i18n("前端处理后的文本(每句):"), norm_text2) + + bert = torch.cat([bert1, bert2], 1) + all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) bert = bert.to(device).unsqueeze(0) all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) t2 = ttime() - # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature) - # print(cache.keys(),if_freeze) - if i_text in cache and if_freeze == True: + if i_text in cache and if_freeze is True: pred_semantic = cache[i_text] else: - with torch.no_grad(): - pred_semantic, idx = t2s_model.model.infer_panel( - all_phoneme_ids, - all_phoneme_len, - None if ref_free else prompt, - bert, - # prompt_phone_len=ph_offset, - top_k=top_k, - top_p=top_p, - temperature=temperature, - early_stop_num=hz * max_sec, - ) - pred_semantic = pred_semantic[:, -idx:].unsqueeze(0) - cache[i_text] = pred_semantic + t2s_request = T2SRequest( + [all_phoneme_ids.squeeze(0)], + all_phoneme_len, + prompt, + [bert.squeeze(0)], + valid_length=1, + top_k=top_k, + top_p=top_p, + temperature=temperature, + early_stop_num=1500, + use_cuda_graph=torch.cuda.is_available(), + # debug=True, + ) + assert t2s_engine + t2s_result = t2s_engine.generate(t2s_request) + if t2s_result.exception is not None: + console.print(t2s_result.traceback) + raise RuntimeError() + pred_semantic_list = t2s_result.result + assert pred_semantic_list, t2s_result.traceback + pred_semantic = pred_semantic_list[0].unsqueeze(0).to(infer_device) + infer_len.append(pred_semantic.shape[-1]) + infer_time.append(t2s_result.infer_speed[-1]) + + cache[i_text] = pred_semantic t3 = ttime() is_v2pro = model_version in {"v2Pro", "v2ProPlus"} - # print(23333,is_v2pro,model_version) - ###v3不存在以下逻辑和inp_refs + + sv_emb: list[torch.Tensor] = [] if model_version not in v3v4set: refers = [] - if is_v2pro: - sv_emb = [] - if sv_cn_model == None: - init_sv_cn() + if is_v2pro and sv_cn_model is None: + init_sv_cn() if inp_refs: for path in inp_refs: - try: #####这里加上提取sv的逻辑,要么一堆sv一堆refer,要么单个sv单个refer - refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro) + try: # 这里加上提取sv的逻辑,要么一堆sv一堆refer,要么单个sv单个refer + refer, audio_tensor = get_spepc(hps, path.name, dtype, infer_device, is_v2pro) refers.append(refer) if is_v2pro: - sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor)) - except: + assert sv_cn_model + sv_emb.append(sv_cn_model.compute_embedding(audio_tensor)) + except Exception as e: + print(e) traceback.print_exc() if len(refers) == 0: - refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro) + refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, infer_device, is_v2pro) refers = [refers] if is_v2pro: - sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)] + assert sv_cn_model + sv_emb = [sv_cn_model.compute_embedding(audio_tensor)] if is_v2pro: audio = vq_model.decode( - pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed, sv_emb=sv_emb - )[0][0] + pred_semantic, + torch.LongTensor(phones2).to(infer_device).unsqueeze(0), + refers, + speed=speed, + sv_emb=sv_emb, + )[0][0] # type: ignore else: audio = vq_model.decode( - pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed - )[0][0] + pred_semantic, + torch.LongTensor(phones2).to(infer_device).unsqueeze(0), + refers, + speed=speed, + )[0][0] # type: ignore else: - refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device) - phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0) - phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) - fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) - ref_audio, sr = torchaudio.load(ref_wav_path) - ref_audio = ref_audio.to(device).float() - if ref_audio.shape[0] == 2: - ref_audio = ref_audio.mean(0).unsqueeze(0) + refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, infer_device) + phoneme_ids0 = torch.LongTensor(phones1).to(infer_device).unsqueeze(0) + phoneme_ids1 = torch.LongTensor(phones2).to(infer_device).unsqueeze(0) + fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) # type: ignore tgt_sr = 24000 if model_version == "v3" else 32000 + ref_audio, sr = torchaudio.load_with_torchcodec(ref_wav_path) + ref_audio = ref_audio.to(infer_device) if sr != tgt_sr: - ref_audio = resample(ref_audio, sr, tgt_sr, device) - # print("ref_audio",ref_audio.abs().mean()) + ref_audio = resample(ref_audio, sr, tgt_sr, infer_device) + if ref_audio.shape[0] > 1: + ref_audio = ref_audio.mean(0).unsqueeze(0) mel2 = mel_fn(ref_audio) if model_version == "v3" else mel_fn_v4(ref_audio) mel2 = norm_spec(mel2) T_min = min(mel2.shape[2], fea_ref.shape[2]) @@ -946,7 +890,7 @@ def get_tts_wav( T_min = Tref chunk_len = Tchunk - T_min mel2 = mel2.to(dtype) - fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge, speed) + fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge, speed) # type: ignore cfm_resss = [] idx = 0 while 1: @@ -955,7 +899,7 @@ def get_tts_wav( break idx += chunk_len fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1) - cfm_res = vq_model.cfm.inference( + cfm_res = vq_model.cfm.inference( # type: ignore fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0 ) cfm_res = cfm_res[:, :, mel2.shape[2] :] @@ -965,15 +909,15 @@ def get_tts_wav( cfm_res = torch.cat(cfm_resss, 2) cfm_res = denorm_spec(cfm_res) if model_version == "v3": - if bigvgan_model == None: + if bigvgan_model is None: init_bigvgan() else: # v4 - if hifigan_model == None: + if hifigan_model is None: init_hifigan() vocoder_model = bigvgan_model if model_version == "v3" else hifigan_model with torch.inference_mode(): - wav_gen = vocoder_model(cfm_res) - audio = wav_gen[0][0] # .cpu().detach().numpy() + wav_gen = vocoder_model(cfm_res) # type: ignore + audio = wav_gen[0][0] max_audio = torch.abs(audio).max() # 简单防止16bit爆音 if max_audio > 1: audio = audio / max_audio @@ -982,23 +926,42 @@ def get_tts_wav( t4 = ttime() t.extend([t2 - t1, t3 - t2, t4 - t3]) t1 = ttime() - print("%.3f\t%.3f\t%.3f\t%.3f" % (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))) - audio_opt = torch.cat(audio_opt, 0) # np.concatenate + + audio_opt_t = torch.cat(audio_opt, 0) # np.concatenate if model_version in {"v1", "v2", "v2Pro", "v2ProPlus"}: opt_sr = 32000 elif model_version == "v3": opt_sr = 24000 else: opt_sr = 48000 # v4 - if if_sr == True and opt_sr == 24000: - print(i18n("音频超分中")) - audio_opt, opt_sr = audio_sr(audio_opt.unsqueeze(0), opt_sr) - max_audio = np.abs(audio_opt).max() + if if_sr is True and opt_sr == 24000: + print(">>", i18n("音频超分中")) + audio_opt_n, opt_sr = audio_sr(audio_opt_t.unsqueeze(0), opt_sr) + max_audio = np.abs(audio_opt_n).max() if max_audio > 1: - audio_opt /= max_audio + audio_opt_n /= max_audio else: - audio_opt = audio_opt.cpu().detach().numpy() - yield opt_sr, (audio_opt * 32767).astype(np.int16) + audio_opt_n = audio_opt_t.cpu().numpy() + + t0 = t[0] + t1 = sum(t[1::3]) + t2 = sum(t[2::3]) + t3 = sum(t[3::3]) + + infer_speed_avg = sum(infer_len) / sum(infer_time) + rtf_value = sum(t) / (audio_opt_n.__len__() / opt_sr) + + console.print(f">> Time Stamps: {t0:.3f}\t{t1:.3f}\t{t2:.3f}\t{t3:.3f}") + console.print(f">> Infer Speed: {infer_speed_avg:.2f} Token/s") + console.print(f">> RTF: {rtf_value:.2f}") + + gr.Info(f"{infer_speed_avg:.2f} Token/s", title="Infer Speed") + gr.Info(f"{rtf_value:.2f}", title="RTF") + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + yield opt_sr, (audio_opt_n * 32767).astype(np.int16) def split(todo_text): @@ -1023,7 +986,7 @@ def split(todo_text): def cut1(inp): inp = inp.strip("\n") inps = split(inp) - split_idx = list(range(0, len(inps), 4)) + split_idx: list[int | None] = list(range(0, len(inps) + 1, 4)) split_idx[-1] = None if len(split_idx) > 1: opts = [] @@ -1052,8 +1015,7 @@ def cut2(inp): tmp_str = "" if tmp_str != "": opts.append(tmp_str) - # print(opts) - if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起 + if len(opts) > 1 and len(opts[-1]) < 50: # 如果最后一个太短了,和前一个合一起 opts[-2] = opts[-2] + opts[-1] opts = opts[:-1] opts = [item for item in opts if not set(item).issubset(punctuation)] @@ -1062,7 +1024,7 @@ def cut2(inp): def cut3(inp): inp = inp.strip("\n") - opts = ["%s" % item for item in inp.strip("。").split("。")] + opts = inp.strip("。").split("。") opts = [item for item in opts if not set(item).issubset(punctuation)] return "\n".join(opts) @@ -1099,14 +1061,6 @@ def cut5(inp): return "\n".join(opt) -def custom_sort_key(s): - # 使用正则表达式提取字符串中的数字部分和非数字部分 - parts = re.split("(\d+)", s) - # 将数字部分转换为整数,非数字部分保持不变 - parts = [int(part) if part.isdigit() else part for part in parts] - return parts - - def process_text(texts): _text = [] if all(text in [None, " ", "\n", ""] for text in texts): @@ -1139,215 +1093,207 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ), elem_classes="markdown", ) - with gr.Group(): - gr.Markdown(html_center(i18n("模型切换"), "h3")) - with gr.Row(): - GPT_dropdown = gr.Dropdown( - label=i18n("GPT模型列表"), - choices=sorted(GPT_names, key=custom_sort_key), - value=gpt_path, - interactive=True, - scale=14, - ) - SoVITS_dropdown = gr.Dropdown( - label=i18n("SoVITS模型列表"), - choices=sorted(SoVITS_names, key=custom_sort_key), - value=sovits_path, - interactive=True, - scale=14, - ) + gr.Markdown(html_center(i18n("模型切换"), "h3")) + with gr.Row(equal_height=True): + with gr.Column(scale=2): + with gr.Row(equal_height=True): + GPT_dropdown = gr.Dropdown( + label=i18n("GPT模型列表"), + choices=GPT_names, + value=gpt_path, + interactive=True, + ) + SoVITS_dropdown = gr.Dropdown( + label=i18n("SoVITS模型列表"), + choices=SoVITS_names, + value=sovits_path, + interactive=True, + ) + with gr.Column(scale=1): refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14) - refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) - gr.Markdown(html_center(i18n("*请上传并填写参考信息"), "h3")) - with gr.Row(): - inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13) - with gr.Column(scale=13): - ref_text_free = gr.Checkbox( - label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。") - + i18n("v3暂不支持该模式,使用了会报错。"), - value=False, - interactive=True if model_version not in v3v4set else False, - show_label=True, - scale=1, - ) - gr.Markdown( - html_left( - i18n("使用无参考文本模式时建议使用微调的GPT") - + "
" - + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。") + refresh_button.click(fn=change_choices_i18n, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) + gr.Markdown(html_center(i18n("*请上传并填写参考信息"), "h3")) + with gr.Row(equal_height=True): + with gr.Column(scale=2): + with gr.Row(equal_height=True): + with gr.Column(scale=1): + inp_ref = gr.Audio( + label=i18n("请上传3~10秒内参考音频,超过会报错!"), + type="filepath", + sources="upload", + scale=13, + editable=False, + waveform_options={"show_recording_waveform": False}, ) - ) - prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1) - with gr.Column(scale=14): - prompt_language = gr.Dropdown( - label=i18n("参考音频的语种"), - choices=list(dict_language.keys()), - value=i18n("中文"), - ) - inp_refs = ( - gr.File( - label=i18n( - "可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。" - ), - file_count="multiple", + with gr.Column(scale=1): + gr.Markdown( + html_center( + i18n("使用无参考文本模式时建议使用微调的GPT") + + "
" + + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。") + ) ) - if model_version not in v3v4set - else gr.File( - label=i18n( - "可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。" - ), - file_count="multiple", - visible=False, - ) - ) - sample_steps = ( - gr.Radio( - label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"), - value=32 if model_version == "v3" else 8, - choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], - visible=True, - ) - if model_version in v3v4set - else gr.Radio( - label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"), - choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], - visible=False, - value=32 if model_version == "v3" else 8, - ) - ) - if_sr_Checkbox = gr.Checkbox( - label=i18n("v3输出如果觉得闷可以试试开超分"), - value=False, - interactive=True, - show_label=True, - visible=False if model_version != "v3" else True, - ) - gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"), "h3")) - with gr.Row(): - with gr.Column(scale=13): - text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26) - with gr.Column(scale=7): - text_language = gr.Dropdown( - label=i18n("需要合成的语种") + i18n(".限制范围越小判别效果越好。"), - choices=list(dict_language.keys()), - value=i18n("中文"), - scale=1, - ) - how_to_cut = gr.Dropdown( - label=i18n("怎么切"), - choices=[ - i18n("不切"), - i18n("凑四句一切"), - i18n("凑50字一切"), - i18n("按中文句号。切"), - i18n("按英文句号.切"), - i18n("按标点符号切"), - ], - value=i18n("凑四句一切"), - interactive=True, - scale=1, - ) - gr.Markdown(value=html_center(i18n("语速调整,高为更快"))) - if_freeze = gr.Checkbox( - label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), - value=False, - interactive=True, - show_label=True, - scale=1, - ) - with gr.Row(): - speed = gr.Slider( - minimum=0.6, maximum=1.65, step=0.05, label=i18n("语速"), value=1, interactive=True, scale=1 - ) - pause_second_slider = gr.Slider( - minimum=0.1, - maximum=0.5, - step=0.01, - label=i18n("句间停顿秒数"), - value=0.3, - interactive=True, + ref_text_free = gr.Checkbox( + label=i18n("开启无参考文本模式"), + info=i18n("不填参考文本亦相当于开启") + ", " + i18n("v3暂不支持该模式,使用了会报错。"), + value=False, + interactive=True if model_version not in v3v4set else False, + show_label=True, scale=1, ) - gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):"))) - top_k = gr.Slider( - minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True, scale=1 - ) - top_p = gr.Slider( - minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True, scale=1 - ) - temperature = gr.Slider( - minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True, scale=1 - ) - # with gr.Column(): - # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。")) - # phoneme=gr.Textbox(label=i18n("音素框"), value="") - # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary") - with gr.Row(): - inference_button = gr.Button(value=i18n("合成语音"), variant="primary", size="lg", scale=25) - output = gr.Audio(label=i18n("输出的语音"), scale=14) + prompt_language = gr.Dropdown( + label="", + info=i18n("参考音频的语种"), + choices=list(dict_language.keys()), + value=i18n("中文"), + ) + prompt_text = gr.Textbox(label="", info=i18n("参考音频的文本"), value="", lines=3, max_lines=3) - inference_button.click( - get_tts_wav, - [ - inp_ref, - prompt_text, - prompt_language, - text, - text_language, - how_to_cut, - top_k, - top_p, - temperature, - ref_text_free, - speed, - if_freeze, - inp_refs, - sample_steps, - if_sr_Checkbox, - pause_second_slider, - ], - [output], - ) - SoVITS_dropdown.change( - change_sovits_weights, - [SoVITS_dropdown, prompt_language, text_language], - [ - prompt_language, - text_language, - prompt_text, - prompt_language, - text, - text_language, - sample_steps, - inp_refs, - ref_text_free, - if_sr_Checkbox, - inference_button, - ], - ) - GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], []) + with gr.Column(scale=1): + inp_refs = ( + gr.File( + label=i18n( + "可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。" + ), + file_count="multiple", + ) + if model_version not in v3v4set + else gr.File( + label=i18n( + "可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。" + ), + file_count="multiple", + visible=False, + ) + ) + sample_steps = ( + gr.Radio( + label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"), + value=32 if model_version == "v3" else 8, + choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], + visible=True, + ) + if model_version in v3v4set + else gr.Radio( + label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"), + choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], + visible=False, + value=32 if model_version == "v3" else 8, + ) + ) + if_sr_Checkbox = gr.Checkbox( + label=i18n("v3输出如果觉得闷可以试试开超分"), + value=False, + interactive=True, + show_label=True, + visible=False if model_version != "v3" else True, + ) + gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"), "h3")) + with gr.Row(equal_height=True): + with gr.Column(scale=2): + text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=30, max_lines=40) + with gr.Column(scale=1): + text_language = gr.Dropdown( + label=i18n("需要合成的语种") + i18n(".限制范围越小判别效果越好。"), + choices=list(dict_language.keys()), + value=i18n("中文"), + scale=1, + ) + how_to_cut = gr.Dropdown( + label=i18n("怎么切"), + choices=[ + i18n("不切"), + i18n("凑四句一切"), + i18n("凑50字一切"), + i18n("按中文句号。切"), + i18n("按英文句号.切"), + i18n("按标点符号切"), + ], + value=i18n("凑四句一切"), + interactive=True, + scale=1, + ) + if_freeze = gr.Checkbox( + label=i18n("是否直接对上次合成结果调整语速和音色"), + value=False, + interactive=True, + show_label=True, + scale=1, + ) + with gr.Row(equal_height=True): + speed = gr.Slider( + minimum=0.6, maximum=1.65, step=0.05, label=i18n("语速"), value=1, interactive=True, scale=1 + ) + pause_second_slider = gr.Slider( + minimum=0.1, + maximum=0.5, + step=0.01, + label=i18n("句间停顿秒数"), + value=0.3, + interactive=True, + scale=1, + ) + gr.Markdown(html_center(i18n("GPT采样参数(不懂就用默认):"))) + top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True, scale=1) + top_p = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True, scale=1) + temperature = gr.Slider( + minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True, scale=1 + ) + with gr.Row(equal_height=True): + with gr.Column(scale=2): + inference_button = gr.Button(value=i18n("合成语音"), variant="primary", size="lg") + with gr.Column(scale=1): + output = gr.Audio( + label=i18n("输出的语音"), + waveform_options={"show_recording_waveform": False}, + editable=False, + ) + + inference_button.click( + get_tts_wav, + [ + inp_ref, + prompt_text, + prompt_language, + text, + text_language, + how_to_cut, + top_k, + top_p, + temperature, + ref_text_free, + speed, + if_freeze, + inp_refs, + sample_steps, + if_sr_Checkbox, + pause_second_slider, + ], + [output], + ) + SoVITS_dropdown.change( + change_sovits_weights, + [SoVITS_dropdown, prompt_language, text_language], + [ + prompt_text, + prompt_language, + text, + text_language, + sample_steps, + inp_refs, + ref_text_free, + if_sr_Checkbox, + inference_button, + ], + ) + GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], []) - # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。")) - # with gr.Row(): - # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="") - # button1 = gr.Button(i18n("凑四句一切"), variant="primary") - # button2 = gr.Button(i18n("凑50字一切"), variant="primary") - # button3 = gr.Button(i18n("按中文句号。切"), variant="primary") - # button4 = gr.Button(i18n("按英文句号.切"), variant="primary") - # button5 = gr.Button(i18n("按标点符号切"), variant="primary") - # text_opt = gr.Textbox(label=i18n("切分后文本"), value="") - # button1.click(cut1, [text_inp], [text_opt]) - # button2.click(cut2, [text_inp], [text_opt]) - # button3.click(cut3, [text_inp], [text_opt]) - # button4.click(cut4, [text_inp], [text_opt]) - # button5.click(cut5, [text_inp], [text_opt]) - # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))) if __name__ == "__main__": - app.queue().launch( # concurrency_count=511, max_size=1022 + set_high_priority() + app.queue(api_open=False, default_concurrency_limit=512, max_size=1024).launch( server_name="0.0.0.0", inbrowser=True, share=is_share, server_port=infer_ttswebui, - # quiet=True, ) diff --git a/GPT_SoVITS/inference_webui_fast.py b/GPT_SoVITS/inference_webui_fast.py index 51a120f1..bbf0227f 100644 --- a/GPT_SoVITS/inference_webui_fast.py +++ b/GPT_SoVITS/inference_webui_fast.py @@ -6,32 +6,26 @@ 全部按英文识别 全部按日文识别 """ -import psutil -import os -def set_high_priority(): - """把当前 Python 进程设为 HIGH_PRIORITY_CLASS""" - if os.name != "nt": - return # 仅 Windows 有效 - p = psutil.Process(os.getpid()) - try: - p.nice(psutil.HIGH_PRIORITY_CLASS) - print("已将进程优先级设为 High") - except psutil.AccessDenied: - print("权限不足,无法修改优先级(请用管理员运行)") -set_high_priority() -import json +import argparse +import contextlib import logging import os import random import re -import sys +from functools import partial +import gradio as gr +import psutil import torch -now_dir = os.getcwd() -sys.path.append(now_dir) -sys.path.append("%s/GPT_SoVITS" % (now_dir)) +from config import change_choices, custom_sort_key, get_dtype, get_weights_names, pretrained_sovits_name +from config import infer_device as default_device +from GPT_SoVITS.process_ckpt import inspect_version +from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method +from GPT_SoVITS.TTS_infer_pack.TTS import NO_PROMPT_ERROR, TTS, TTS_Config +from tools.assets import css, js, top_html +from tools.i18n.i18n import I18nAuto, scan_language_list logging.getLogger("markdown_it").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) @@ -41,44 +35,128 @@ logging.getLogger("asyncio").setLevel(logging.ERROR) logging.getLogger("charset_normalizer").setLevel(logging.ERROR) logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) - -infer_ttswebui = os.environ.get("infer_ttswebui", 9872) -infer_ttswebui = int(infer_ttswebui) -is_share = os.environ.get("is_share", "False") -is_share = eval(is_share) -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] - -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() -gpt_path = os.environ.get("gpt_path", None) -sovits_path = os.environ.get("sovits_path", None) -cnhubert_base_path = os.environ.get("cnhubert_base_path", None) -bert_path = os.environ.get("bert_path", None) -version = model_version = os.environ.get("version", "v2") - -import gradio as gr -from TTS_infer_pack.text_segmentation_method import get_method -from TTS_infer_pack.TTS import NO_PROMPT_ERROR, TTS, TTS_Config - -from tools.assets import css, js, top_html -from tools.i18n.i18n import I18nAuto, scan_language_list - -language = os.environ.get("language", "Auto") -language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language -i18n = I18nAuto(language=language) +os.environ["TOKENIZERS_PARALLELISM"] = "false" -# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 +def set_high_priority(): + if os.name != "nt": + return + p = psutil.Process(os.getpid()) + with contextlib.suppress(psutil.AccessDenied): + p.nice(psutil.HIGH_PRIORITY_CLASS) + print("已将进程优先级设为 High") -if torch.cuda.is_available(): - device = "cuda" -# elif torch.backends.mps.is_available(): -# device = "mps" -else: - device = "cpu" -# is_half = False -# device = "cpu" +set_high_priority() + + +_LANG_RE = re.compile(r"^[a-z]{2}[_-][A-Z]{2}$") + + +def lang_type(text: str) -> str: + if text == "Auto": + return text + if not _LANG_RE.match(text): + raise argparse.ArgumentTypeError(f"Unspported Format: {text}, Expected ll_CC/ll-CC") + ll, cc = re.split(r"[_-]", text) + language = f"{ll}_{cc}" + if language in scan_language_list(): + return language + else: + return "Auto" + + +def build_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + prog="inference_webui", + description="python -s inference_webui.py zh_CN -i naive", + ) + p.add_argument( + "language", + nargs="?", + default="Auto", + type=lang_type, + help="Language Code, Such as zh_CN, en-US", + ) + p.add_argument( + "--device", + "-d", + default=str(default_device), + help="Inference Device", + required=False, + ) + p.add_argument( + "--port", + "-p", + default=9872, + type=int, + help="WebUI Binding Port", + required=False, + ) + p.add_argument( + "--share", + "-s", + default=False, + action="store_true", + help="Gradio Share Link", + required=False, + ) + p.add_argument( + "--cnhubert", + default="GPT_SoVITS/pretrained_models/chinese-hubert-base", + help="CNHuBERT Pretrain", + required=False, + ) + p.add_argument( + "--bert", + default="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", + help="BERT Pretrain", + required=False, + ) + p.add_argument( + "--gpt", + default="", + help="GPT Model", + required=False, + ) + p.add_argument( + "--sovits", + default="", + help="SoVITS Model", + required=False, + ) + + return p + + +args = build_parser().parse_args() + + +infer_ttswebui = int(args.port) +is_share = args.share + +infer_device = torch.device(args.device) +device = infer_device + +if torch.mps.is_available(): + device = torch.device("cpu") + +dtype = get_dtype(device.index) +is_half = dtype == torch.float16 + +i18n = I18nAuto(language=args.language) +change_choices_gradio = partial(change_choices, i18n=i18n) + +SoVITS_names, GPT_names = get_weights_names(i18n) + +gpt_path = str(args.gpt) or GPT_names[0][-1] +sovits_path = str(args.sovits) or SoVITS_names[0][-1] + +cnhubert_base_path = str(args.cuhubert) +bert_path = str(args.bert) + +version = model_version = "v2" + dict_language_v1 = { i18n("中文"): "all_zh", # 全部按中文识别 @@ -112,10 +190,6 @@ cut_method = { i18n("按标点符号切"): "cut5", } -from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path - -SoVITS_names, GPT_names = get_weights_names() -from config import pretrained_sovits_name path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] @@ -128,12 +202,8 @@ tts_config.is_half = is_half # tts_config.version = version tts_config.update_version(version) if gpt_path is not None: - if "!" in gpt_path or "!" in gpt_path: - gpt_path = name2gpt_path[gpt_path] tts_config.t2s_weights_path = gpt_path if sovits_path is not None: - if "!" in sovits_path or "!" in sovits_path: - sovits_path = name2sovits_path[sovits_path] tts_config.vits_weights_path = sovits_path if cnhubert_base_path is not None: tts_config.cnhuhbert_base_path = cnhubert_base_path @@ -201,62 +271,31 @@ def inference( gr.Warning(i18n("V3不支持无参考文本模式,请填写参考文本!")) -def custom_sort_key(s): - # 使用正则表达式提取字符串中的数字部分和非数字部分 - parts = re.split("(\d+)", s) - # 将数字部分转换为整数,非数字部分保持不变 - parts = [int(part) if part.isdigit() else part for part in parts] - return parts - - -if os.path.exists("./weight.json"): - pass -else: - with open("./weight.json", "w", encoding="utf-8") as file: - json.dump({"GPT": {}, "SoVITS": {}}, file) - -with open("./weight.json", "r", encoding="utf-8") as file: - weight_data = file.read() - weight_data = json.loads(weight_data) - gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1])) - sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0])) - if isinstance(gpt_path, list): - gpt_path = gpt_path[0] - if isinstance(sovits_path, list): - sovits_path = sovits_path[0] - -from process_ckpt import get_sovits_version_from_path_fast - v3v4set = {"v3", "v4"} def change_sovits_weights(sovits_path, prompt_language=None, text_language=None): - if "!" in sovits_path or "!" in sovits_path: - sovits_path = name2sovits_path[sovits_path] - global version, model_version, dict_language, if_lora_v3 - version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) - # print(sovits_path,version, model_version, if_lora_v3) + global version, model_version, dict_language, is_lora + model_version, version, is_lora, _, __ = inspect_version(sovits_path) + # print(sovits_path,version, model_version, is_lora) is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 - if if_lora_v3 == True and is_exist == False: - info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重") + if is_lora is True and is_exist is False: + info = path_sovits + f"SoVITS {model_version}" + i18n("底模缺失,无法加载相应 LoRA 权重") gr.Warning(info) raise FileExistsError(info) dict_language = dict_language_v1 if version == "v1" else dict_language_v2 if prompt_language is not None and text_language is not None: if prompt_language in list(dict_language.keys()): - prompt_text_update, prompt_language_update = ( - {"__type__": "update"}, - {"__type__": "update", "value": prompt_language}, - ) + prompt_text_update, prompt_language_update = gr.skip(), gr.skip() else: - prompt_text_update = {"__type__": "update", "value": ""} - prompt_language_update = {"__type__": "update", "value": i18n("中文")} + prompt_text_update = gr.update(value="") + prompt_language_update = gr.update(value=i18n("中文")) if text_language in list(dict_language.keys()): - text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language} + text_update, text_language_update = gr.skip(), gr.skip() else: - text_update = {"__type__": "update", "value": ""} - text_language_update = {"__type__": "update", "value": i18n("中文")} + text_update = gr.update(value="") + text_language_update = gr.update(value=i18n("中文")) if model_version in v3v4set: visible_sample_steps = True visible_inp_refs = False @@ -264,42 +303,42 @@ def change_sovits_weights(sovits_path, prompt_language=None, text_language=None) visible_sample_steps = False visible_inp_refs = True yield ( - {"__type__": "update", "choices": list(dict_language.keys())}, - {"__type__": "update", "choices": list(dict_language.keys())}, + gr.update(choices=list(dict_language.keys())), + gr.update(choices=list(dict_language.keys())), prompt_text_update, prompt_language_update, text_update, text_language_update, - {"__type__": "update", "interactive": visible_sample_steps, "value": 32}, - {"__type__": "update", "visible": visible_inp_refs}, - {"__type__": "update", "interactive": True if model_version not in v3v4set else False}, - {"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False}, + gr.update( + visible=visible_sample_steps, + value=32 if model_version == "v3" else 8, + choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], + ), + gr.update(visible=visible_inp_refs), + gr.update(interactive=True if model_version not in v3v4set else False), + gr.update(value=i18n("模型加载中,请等待"), interactive=False), ) tts_pipeline.init_vits_weights(sovits_path) yield ( - {"__type__": "update", "choices": list(dict_language.keys())}, - {"__type__": "update", "choices": list(dict_language.keys())}, + gr.update(choices=list(dict_language.keys())), + gr.update(choices=list(dict_language.keys())), prompt_text_update, prompt_language_update, text_update, text_language_update, - {"__type__": "update", "interactive": visible_sample_steps, "value": 32}, - {"__type__": "update", "visible": visible_inp_refs}, - {"__type__": "update", "interactive": True if model_version not in v3v4set else False}, - {"__type__": "update", "value": i18n("合成语音"), "interactive": True}, + gr.update( + visible=visible_sample_steps, + value=32 if model_version == "v3" else 8, + choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], + ), + gr.update(visible=visible_inp_refs), + gr.update(interactive=True if model_version not in v3v4set else False), + gr.update(value=i18n("合成语音"), interactive=True), ) - with open("./weight.json") as f: - data = f.read() - data = json.loads(data) - data["SoVITS"][version] = sovits_path - with open("./weight.json", "w") as f: - f.write(json.dumps(data)) def change_gpt_weights(gpt_path): - if "!" in gpt_path or "!" in gpt_path: - gpt_path = name2gpt_path[gpt_path] tts_pipeline.init_t2s_weights(gpt_path) @@ -315,7 +354,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css with gr.Column(): # with gr.Group(): gr.Markdown(value=i18n("模型切换")) - with gr.Row(): + with gr.Row(equal_height=True): GPT_dropdown = gr.Dropdown( label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), @@ -329,20 +368,24 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css interactive=True, ) refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") - refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) + refresh_button.click(fn=change_choices_gradio, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(): gr.Markdown(value=i18n("*请上传并填写参考信息")) - with gr.Row(): - inp_ref = gr.Audio(label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), type="filepath") + with gr.Row(equal_height=True): + inp_ref = gr.Audio( + label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), + type="filepath", + waveform_options={"show_recording_waveform": False}, + ) inp_refs = gr.File( label=i18n("辅参考音频(可选多个,或不选)"), file_count="multiple", visible=True if model_version != "v3" else False, ) prompt_text = gr.Textbox(label=i18n("主参考音频的文本"), value="", lines=2) - with gr.Row(): + with gr.Row(equal_height=True): prompt_language = gr.Dropdown( label=i18n("主参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文") ) @@ -368,26 +411,26 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css with gr.Group(): gr.Markdown(value=i18n("推理设置")) - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(): - with gr.Row(): + with gr.Row(equal_height=True): batch_size = gr.Slider( minimum=1, maximum=200, step=1, label=i18n("batch_size"), value=20, interactive=True ) sample_steps = gr.Radio( label=i18n("采样步数(仅对V3/4生效)"), value=32, choices=[4, 8, 16, 32, 64, 128], visible=True ) - with gr.Row(): + with gr.Row(equal_height=True): fragment_interval = gr.Slider( minimum=0.01, maximum=1, step=0.01, label=i18n("分段间隔(秒)"), value=0.3, interactive=True ) speed_factor = gr.Slider( minimum=0.6, maximum=1.65, step=0.05, label="语速", value=1.0, interactive=True ) - with gr.Row(): + with gr.Row(equal_height=True): top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=5, interactive=True) top_p = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True) - with gr.Row(): + with gr.Row(equal_height=True): temperature = gr.Slider( minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True ) @@ -396,7 +439,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ) with gr.Column(): - with gr.Row(): + with gr.Row(equal_height=True): how_to_cut = gr.Dropdown( label=i18n("怎么切"), choices=[ @@ -415,7 +458,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css label=i18n("音频超采样(仅对V3生效))"), value=False, interactive=True, show_label=True ) - with gr.Row(): + with gr.Row(equal_height=True): parallel_infer = gr.Checkbox(label=i18n("并行推理"), value=True, interactive=True, show_label=True) split_bucket = gr.Checkbox( label=i18n("数据分桶(并行推理时会降低一点计算量)"), @@ -424,12 +467,15 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css show_label=True, ) - with gr.Row(): + with gr.Row(equal_height=True): seed = gr.Number(label=i18n("随机种子"), value=-1) keep_random = gr.Checkbox(label=i18n("保持随机"), value=True, interactive=True, show_label=True) - output = gr.Audio(label=i18n("输出的语音")) - with gr.Row(): + output = gr.Audio( + label=i18n("输出的语音"), + waveform_options={"show_recording_waveform": False}, + ) + with gr.Row(equal_height=True): inference_button = gr.Button(i18n("合成语音"), variant="primary") stop_infer = gr.Button(i18n("终止合成"), variant="primary") @@ -485,7 +531,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。" ) ) - with gr.Row(): + with gr.Row(equal_height=True): text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="", lines=4) with gr.Column(): _how_to_cut = gr.Radio( diff --git a/GPT_SoVITS/module/__init__.py b/GPT_SoVITS/module/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/GPT_SoVITS/module/attentions.py b/GPT_SoVITS/module/attentions.py index 341de4ac..3772875d 100644 --- a/GPT_SoVITS/module/attentions.py +++ b/GPT_SoVITS/module/attentions.py @@ -1,10 +1,13 @@ import math -import torch -from torch import nn -from torch.nn import functional as F -from module import commons -from module.modules import LayerNorm +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm + +from . import commons +from .modules import LayerNorm class Encoder(nn.Module): @@ -392,10 +395,6 @@ class FFN(nn.Module): return x -import torch.nn as nn -from torch.nn.utils import remove_weight_norm, weight_norm - - class Depthwise_Separable_Conv1D(nn.Module): def __init__( self, diff --git a/GPT_SoVITS/module/attentions_onnx.py b/GPT_SoVITS/module/attentions_onnx.py index 9961f987..d9746db7 100644 --- a/GPT_SoVITS/module/attentions_onnx.py +++ b/GPT_SoVITS/module/attentions_onnx.py @@ -1,11 +1,11 @@ import math +from typing import Optional + import torch from torch import nn from torch.nn import functional as F -from module import commons - -from typing import Optional +from . import commons class LayerNorm(nn.Module): diff --git a/GPT_SoVITS/module/data_utils.py b/GPT_SoVITS/module/data_utils.py index 46eff5fb..c256e686 100644 --- a/GPT_SoVITS/module/data_utils.py +++ b/GPT_SoVITS/module/data_utils.py @@ -1,15 +1,17 @@ import os import random import traceback + import torch +import torch.nn.functional as F import torch.utils.data from tqdm import tqdm -from module.mel_processing import spectrogram_torch, spec_to_mel_torch -from text import cleaned_text_to_sequence -import torch.nn.functional as F +from GPT_SoVITS.text import cleaned_text_to_sequence from tools.my_utils import load_audio +from .mel_processing import spec_to_mel_torch, spectrogram_torch + version = os.environ.get("version", None) diff --git a/GPT_SoVITS/module/losses.py b/GPT_SoVITS/module/losses.py index 2b642db1..79a3e3eb 100644 --- a/GPT_SoVITS/module/losses.py +++ b/GPT_SoVITS/module/losses.py @@ -4,18 +4,18 @@ import torch def feature_loss(fmap_r, fmap_g): - loss = 0 + loss = torch.tensor(0).to(fmap_r[0][0].device) for dr, dg in zip(fmap_r, fmap_g): for rl, gl in zip(dr, dg): rl = rl.float().detach() gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) + loss = torch.mean(torch.abs(rl - gl)) + loss return loss * 2 def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 + loss = torch.tensor(0).to(disc_real_outputs[0].device) r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): @@ -23,7 +23,7 @@ def discriminator_loss(disc_real_outputs, disc_generated_outputs): dg = dg.float() r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg**2) - loss += r_loss + g_loss + loss = r_loss + g_loss + loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) @@ -31,13 +31,13 @@ def discriminator_loss(disc_real_outputs, disc_generated_outputs): def generator_loss(disc_outputs): - loss = 0 + loss = torch.tensor(0).to(disc_outputs[0].device) gen_losses = [] for dg in disc_outputs: dg = dg.float() l = torch.mean((1 - dg) ** 2) gen_losses.append(l) - loss += l + loss = l + loss return loss, gen_losses diff --git a/GPT_SoVITS/module/mel_processing.py b/GPT_SoVITS/module/mel_processing.py index 62c7b40e..7020f94a 100644 --- a/GPT_SoVITS/module/mel_processing.py +++ b/GPT_SoVITS/module/mel_processing.py @@ -1,5 +1,4 @@ import torch -import torch.utils.data from librosa.filters import mel as librosa_mel_fn MAX_WAV_VALUE = 32768.0 @@ -67,10 +66,10 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False) pad_mode="reflect", normalized=False, onesided=True, - return_complex=False, + return_complex=True, ) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8) + spec = spec.abs().pow_(2).add_(1e-8).sqrt_() return spec @@ -132,10 +131,10 @@ def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, pad_mode="reflect", normalized=False, onesided=True, - return_complex=False, + return_complex=True, ) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8) + spec = spec.abs().pow_(2).add_(1e-8).sqrt_() spec = torch.matmul(mel_basis[fmax_dtype_device], spec) spec = spectral_normalize_torch(spec) diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index 1c8e662f..b752d06c 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -1,28 +1,32 @@ -import warnings - -warnings.filterwarnings("ignore") +import contextlib import math +import random import torch from torch import nn -from torch.nn import functional as F - -from module import commons -from module import modules -from module import attentions -from f5_tts.model import DiT -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from module.commons import init_weights, get_padding -from module.mrte_model import MRTE -from module.quantize import ResidualVectorQuantizer - -# from text import symbols -from text import symbols as symbols_v1 -from text import symbols2 as symbols_v2 from torch.cuda.amp import autocast -import contextlib -import random +from torch.nn import Conv1d, Conv2d, ConvTranspose1d +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from GPT_SoVITS.f5_tts.model import DiT +from GPT_SoVITS.text import symbols as symbols_v1 +from GPT_SoVITS.text import symbols2 as symbols_v2 +from GPT_SoVITS.utils import HParams +from tools.my_utils import _open_file + +from . import attentions, commons, modules +from .commons import get_padding, init_weights +from .mrte_model import MRTE +from .quantize import ResidualVectorQuantizer + + +def set_serialization(): + torch.serialization.add_safe_globals([(HParams, "utils.HParams")]) + torch.serialization._open_file = _open_file + + +set_serialization() class StochasticDurationPredictor(nn.Module): @@ -230,25 +234,6 @@ class TextEncoder(nn.Module): m, logs = torch.split(stats, self.out_channels, dim=1) return y, m, logs, y_mask - def extract_latent(self, x): - x = self.ssl_proj(x) - quantized, codes, commit_loss, quantized_list = self.quantizer(x) - return codes.transpose(0, 1) - - def decode_latent(self, codes, y_mask, refer, refer_mask, ge): - quantized = self.quantizer.decode(codes) - - y = self.vq_proj(quantized) * y_mask - y = self.encoder_ssl(y * y_mask, y_mask) - - y = self.mrte(y, y_mask, refer, refer_mask, ge) - - y = self.encoder2(y * y_mask, y_mask) - - stats = self.proj(y) * y_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - return y, m, logs, y_mask, quantized - class ResidualCouplingBlock(nn.Module): def __init__( @@ -483,7 +468,7 @@ class DiscriminatorP(torch.nn.Module): super(DiscriminatorP, self).__init__() self.period = period self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.convs = nn.ModuleList( [ norm_f( @@ -560,7 +545,7 @@ class DiscriminatorP(torch.nn.Module): class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.convs = nn.ModuleList( [ norm_f(Conv1d(1, 16, 15, 1, padding=7)), @@ -957,7 +942,6 @@ class SynthesizerTrn(nn.Module): o = self.dec((z * y_mask)[:, :, :], g=ge) return o, y_mask, (z, z_p, m_p, logs_p) - @torch.no_grad() def decode(self, codes, text, refer, noise_scale=0.5, speed=1, sv_emb=None): def get_ge(refer, sv_emb): ge = None @@ -1004,7 +988,7 @@ class SynthesizerTrn(nn.Module): o = self.dec((z * y_mask)[:, :, :], g=ge) return o - def extract_latent(self, x): + def extract_latent(self, x) -> torch.Tensor: ssl = self.ssl_proj(x) quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) return codes.transpose(0, 1) @@ -1180,13 +1164,7 @@ class SynthesizerTrnV3(nn.Module): self.enc_p = TextEncoder( inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout ) - # self.ref_enc = modules.MelStyleEncoder(spec_channels, style_vector_dim=gin_channels)###Rollback - self.ref_enc = modules.MelStyleEncoder(704, style_vector_dim=gin_channels) ###Rollback - # self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - # upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - # self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - # gin_channels=gin_channels) - # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + self.ref_enc = modules.MelStyleEncoder(704, style_vector_dim=gin_channels) ssl_dim = 768 assert semantic_frame_rate in ["25hz", "50hz"] @@ -1206,7 +1184,7 @@ class SynthesizerTrnV3(nn.Module): 100, DiT(**dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=inter_channels2, conv_layers=4)), ) # text_dim is condition feature dim - if self.freeze_quantizer == True: + if self.freeze_quantizer is True: set_no_grad(self.ssl_proj) set_no_grad(self.quantizer) set_no_grad(self.enc_p) @@ -1245,7 +1223,7 @@ class SynthesizerTrnV3(nn.Module): def decode_encp(self, codes, text, refer, ge=None, speed=1): # print(2333333,refer.shape) # ge=None - if ge == None: + if ge is None: refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device) refer_mask = torch.unsqueeze(commons.sequence_mask(refer_lengths, refer.size(2)), 1).to(refer.dtype) ge = self.ref_enc(refer[:, :704] * refer_mask, refer_mask) @@ -1409,7 +1387,7 @@ class SynthesizerTrnV3b(nn.Module): def decode_encp(self, codes, text, refer, ge=None): # print(2333333,refer.shape) # ge=None - if ge == None: + if ge is None: refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device) refer_mask = torch.unsqueeze(commons.sequence_mask(refer_lengths, refer.size(2)), 1).to(refer.dtype) ge = self.ref_enc(refer[:, :704] * refer_mask, refer_mask) @@ -1427,7 +1405,7 @@ class SynthesizerTrnV3b(nn.Module): fea, y_mask_ = self.wns1(fea, y_lengths1, ge) return fea, ge - def extract_latent(self, x): + def extract_latent(self, x) -> torch.Tensor: ssl = self.ssl_proj(x) quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) return codes.transpose(0, 1) diff --git a/GPT_SoVITS/module/models_onnx.py b/GPT_SoVITS/module/models_onnx.py index b62b8b71..912539c3 100644 --- a/GPT_SoVITS/module/models_onnx.py +++ b/GPT_SoVITS/module/models_onnx.py @@ -1,23 +1,21 @@ import math from typing import Optional + import torch from torch import nn +from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, spectral_norm +from torch.nn.utils import weight_norm -from module import commons -from module import modules -from module import attentions_onnx as attentions +from GPT_SoVITS.f5_tts.model import DiT +from GPT_SoVITS.text import symbols as symbols_v1 +from GPT_SoVITS.text import symbols2 as symbols_v2 -from f5_tts.model import DiT - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from module.commons import init_weights, get_padding -from module.quantize import ResidualVectorQuantizer - -# from text import symbols -from text import symbols as symbols_v1 -from text import symbols2 as symbols_v2 +from . import attentions_onnx as attentions +from . import commons, modules +from .commons import get_padding, init_weights +from .quantize import ResidualVectorQuantizer class StochasticDurationPredictor(nn.Module): @@ -459,7 +457,7 @@ class DiscriminatorP(torch.nn.Module): super(DiscriminatorP, self).__init__() self.period = period self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.convs = nn.ModuleList( [ norm_f( @@ -536,7 +534,7 @@ class DiscriminatorP(torch.nn.Module): class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.convs = nn.ModuleList( [ norm_f(Conv1d(1, 16, 15, 1, padding=7)), @@ -1057,7 +1055,7 @@ class SynthesizerTrnV3(nn.Module): 100, DiT(**dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=inter_channels2, conv_layers=4)), ) # text_dim is condition feature dim - if freeze_quantizer == True: + if freeze_quantizer is True: set_no_grad(self.ssl_proj) set_no_grad(self.quantizer) set_no_grad(self.enc_p) diff --git a/GPT_SoVITS/module/modules.py b/GPT_SoVITS/module/modules.py index 6fa84a43..4bfc29ec 100644 --- a/GPT_SoVITS/module/modules.py +++ b/GPT_SoVITS/module/modules.py @@ -1,18 +1,19 @@ import math +import warnings import numpy as np import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -from module import commons -from module.commons import init_weights, get_padding -from module.transforms import piecewise_rational_quadratic_transform import torch.distributions as D +from torch import nn +from torch.nn import Conv1d +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, weight_norm +from . import commons +from .commons import get_padding, init_weights +from .transforms import piecewise_rational_quadratic_transform + +warnings.filterwarnings("ignore", category=FutureWarning, module="torch.nn.utils.weight_norm") LRELU_SLOPE = 0.1 @@ -154,7 +155,7 @@ class WN(torch.nn.Module): if gin_channels != 0: cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + self.cond_layer = torch.nn.utils.parametrizations.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i @@ -166,7 +167,7 @@ class WN(torch.nn.Module): dilation=dilation, padding=padding, ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + in_layer = torch.nn.utils.parametrizations.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary @@ -176,7 +177,7 @@ class WN(torch.nn.Module): res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + res_skip_layer = torch.nn.utils.parametrizations.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): diff --git a/GPT_SoVITS/module/mrte_model.py b/GPT_SoVITS/module/mrte_model.py index e889b7e9..bf25cb07 100644 --- a/GPT_SoVITS/module/mrte_model.py +++ b/GPT_SoVITS/module/mrte_model.py @@ -2,8 +2,10 @@ import torch from torch import nn -from torch.nn.utils import remove_weight_norm, weight_norm -from module.attentions import MultiHeadAttention +from torch.nn.utils import remove_weight_norm +from torch.nn.utils import weight_norm + +from .attentions import MultiHeadAttention class MRTE(nn.Module): @@ -23,7 +25,7 @@ class MRTE(nn.Module): self.c_post = nn.Conv1d(hidden_size, out_channels, 1) def forward(self, ssl_enc, ssl_mask, text, text_mask, ge, test=None): - if ge == None: + if ge is None: ge = 0 attn_mask = text_mask.unsqueeze(2) * ssl_mask.unsqueeze(-1) diff --git a/GPT_SoVITS/module/quantize.py b/GPT_SoVITS/module/quantize.py index 0afed835..537af3c2 100644 --- a/GPT_SoVITS/module/quantize.py +++ b/GPT_SoVITS/module/quantize.py @@ -6,13 +6,13 @@ """Residual vector quantizer implementation.""" -from dataclasses import dataclass, field import typing as tp +from dataclasses import dataclass, field import torch from torch import nn -from module.core_vq import ResidualVectorQuantization +from .core_vq import ResidualVectorQuantization @dataclass diff --git a/GPT_SoVITS/onnx_export.py b/GPT_SoVITS/onnx_export.py index fd680135..57c30c9c 100644 --- a/GPT_SoVITS/onnx_export.py +++ b/GPT_SoVITS/onnx_export.py @@ -1,18 +1,20 @@ +import json +import os + +import onnxruntime +import soundfile import torch import torchaudio -from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule -from feature_extractor import cnhubert -from module.models_onnx import SynthesizerTrn, symbols_v1, symbols_v2 from torch import nn +from GPT_SoVITS.AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule +from GPT_SoVITS.feature_extractor import cnhubert +from GPT_SoVITS.module.models_onnx import SynthesizerTrn, symbols_v1, symbols_v2 +from GPT_SoVITS.text import cleaned_text_to_sequence + cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" cnhubert.cnhubert_base_path = cnhubert_base_path ssl_model = cnhubert.get_model() -import json -import os - -import soundfile -from text import cleaned_text_to_sequence def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): @@ -232,8 +234,6 @@ class GptSoVits(nn.Module): pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) audio = self.vits(text_seq, pred_semantic, ref_audio) if debug: - import onnxruntime - sess = onnxruntime.InferenceSession("onnx/koharu/koharu_vits.onnx", providers=["CPU"]) audio1 = sess.run( None, @@ -343,7 +343,7 @@ def export(vits_path, gpt_path, project_name, vits_model="v2"): try: os.mkdir(f"onnx/{project_name}") - except: + except Exception: pass ssl_content = ssl(ref_audio_16k).float() @@ -387,7 +387,7 @@ def export(vits_path, gpt_path, project_name, vits_model="v2"): if __name__ == "__main__": try: os.mkdir("onnx") - except: + except Exception: pass gpt_path = "GPT_weights/nahida-e25.ckpt" diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index 8d83e79a..9d4ac605 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -1,113 +1,46 @@ -# -*- coding: utf-8 -*- - +import enum import os +import os.path as osp +import platform +import queue +import sys +import time +import warnings +from pathlib import Path +from typing import List -inp_text = os.environ.get("inp_text") -inp_wav_dir = os.environ.get("inp_wav_dir") -exp_name = os.environ.get("exp_name") -i_part = os.environ.get("i_part") -all_parts = os.environ.get("all_parts") -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -opt_dir = os.environ.get("opt_dir") -bert_pretrained_dir = os.environ.get("bert_pretrained_dir") import torch +import torch.multiprocessing as tmp +import typer +from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn +from torch.multiprocessing.spawn import spawn +from transformers import BertForMaskedLM, BertTokenizerFast -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() -version = os.environ.get("version", None) -import traceback -import os.path -from text.cleaner import clean_text -from transformers import AutoModelForMaskedLM, AutoTokenizer +from GPT_SoVITS.Accelerate.logger import console, logger, SpeedColumnIteration +from GPT_SoVITS.text.cleaner import clean_text from tools.my_utils import clean_path -# inp_text=sys.argv[1] -# inp_wav_dir=sys.argv[2] -# exp_name=sys.argv[3] -# i_part=sys.argv[4] -# all_parts=sys.argv[5] -# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6]#i_gpu -# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name -# bert_pretrained_dir="/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large" +torch.set_grad_enabled(False) -from time import time as ttime -import shutil +tmp.set_start_method("spawn", force=True) + +warnings.filterwarnings("ignore", category=UserWarning, module="jieba_fast._compat") -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path - dir = os.path.dirname(path) - name = os.path.basename(path) - # tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part) - tmp_path = "%s%s.pth" % (ttime(), i_part) - torch.save(fea, tmp_path) - shutil.move(tmp_path, "%s/%s" % (dir, name)) +class Device(str, enum.Enum): + cpu = "cpu" + cuda = "cuda" + mps = "mps" -txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) -if os.path.exists(txt_path) == False: - bert_dir = "%s/3-bert" % (opt_dir) - os.makedirs(opt_dir, exist_ok=True) - os.makedirs(bert_dir, exist_ok=True) - if torch.cuda.is_available(): - device = "cuda:0" - # elif torch.backends.mps.is_available(): - # device = "mps" - else: - device = "cpu" - if os.path.exists(bert_pretrained_dir): - ... - else: - raise FileNotFoundError(bert_pretrained_dir) - tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) - bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) - if is_half == True: - bert_model = bert_model.half().to(device) - else: - bert_model = bert_model.to(device) +app = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]}, + add_completion=False, +) - def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors="pt") - for i in inputs: - inputs[i] = inputs[i].to(device) - res = bert_model(**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] - assert len(word2ph) == len(text) - phone_level_feature = [] - for i in range(len(word2ph)): - repeat_feature = res[i].repeat(word2ph[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T - - def process(data, res): - for name, text, lan in data: - try: - name = clean_path(name) - name = os.path.basename(name) - print(name) - phones, word2ph, norm_text = clean_text(text.replace("%", "-").replace("¥", ","), lan, version) - path_bert = "%s/%s.pt" % (bert_dir, name) - if os.path.exists(path_bert) == False and lan == "zh": - bert_feature = get_bert_feature(norm_text, word2ph) - assert bert_feature.shape[-1] == len(phones) - # torch.save(bert_feature, path_bert) - my_save(bert_feature, path_bert) - phones = " ".join(phones) - # res.append([name,phones]) - res.append([name, phones, word2ph, norm_text]) - except: - print(name, text, traceback.format_exc()) - - todo = [] - res = [] - with open(inp_text, "r", encoding="utf8") as f: - lines = f.read().strip("\n").split("\n") - - language_v1_to_language_v2 = { +def lang_map(lang: str) -> str: + m = { "ZH": "zh", "zh": "zh", "JP": "ja", @@ -124,20 +57,264 @@ if os.path.exists(txt_path) == False: "YUE": "yue", "Yue": "yue", } - for line in lines[int(i_part) :: int(all_parts)]: - try: - wav_name, spk_name, language, text = line.split("|") - # todo.append([name,text,"zh"]) - if language in language_v1_to_language_v2.keys(): - todo.append([wav_name, text, language_v1_to_language_v2.get(language, language)]) - else: - print(f"\033[33m[Waring] The {language = } of {wav_name} is not supported for training.\033[0m") - except: - print(line, traceback.format_exc()) + return m.get(lang, "") - process(todo, res) - opt = [] - for name, phones, word2ph, norm_text in res: - opt.append("%s\t%s\t%s\t%s" % (name, phones, word2ph, norm_text)) - with open(txt_path, "w", encoding="utf8") as f: - f.write("\n".join(opt) + "\n") + +def parse_inp_text_line(line: str) -> tuple[str, str, str]: + wav_name, _, language, text = line.split("|", 3) + return wav_name, language, text + + +def build_device_strings(device_type: str, device_ids: list[int], procs_per_device: int) -> list[str]: + devices: list[str] = [] + for device_id in device_ids: + dstr = f"{device_type}:{device_id}" + devices.extend([dstr] * procs_per_device) + return devices + + +def worker_entry( + rank: int, + device_strs: List[str], + tasks_q: "tmp.Queue[tuple[int, str, str, str] | None]", + results_q: "tmp.Queue[tuple[int, tuple[str, str, list[int] | None, str]]]", + bert_pretrained_dir: str, + opt_dir: str, + fp16: bool, + version: str | None, +): + device_str = device_strs[rank] + device = torch.device(device_str) + + if device.type == "cuda": + assert torch.cuda.is_available() + torch.cuda.set_device(device.index) + elif device.type == "mps": + assert torch.mps.is_available() + elif device.type == "xpu": + assert torch.xpu.is_available() + + bert_dir = osp.join(opt_dir, "3-bert") + os.makedirs(bert_dir, exist_ok=True) + + if not osp.exists(bert_pretrained_dir): + raise FileNotFoundError(bert_pretrained_dir) + + tokenizer = BertTokenizerFast.from_pretrained(bert_pretrained_dir) + bert_model = BertForMaskedLM.from_pretrained(bert_pretrained_dir, device_map=device) + + if fp16: + bert_model = bert_model.half() + + def get_bert_feature(text: str, word2ph: list[int]) -> torch.Tensor: + inputs = tokenizer(text, return_tensors="pt") + for k in inputs: + inputs[k] = inputs[k].to(device) + out: torch.Tensor = bert_model(**inputs, output_hidden_states=True).hidden_states # type: ignore + layer = out[-3][0].cpu()[1:-1] # [seq-2, hid] + assert len(word2ph) == len(text) + phone_level_feature = [] + for i in range(len(word2ph)): + phone_level_feature.append(layer[i].repeat(word2ph[i], 1)) + feats = torch.cat(phone_level_feature, dim=0) # [phones, hid] + return feats.T # [hid, phones] + + i = 0 + while True: + item = tasks_q.get() + if item is None: + break + + idx, wav_name, language, text = item + + i += 1 + if i % 10 == 0: + match device.index: + case "cuda": + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() + + try: + name = clean_path(osp.basename(wav_name)) + mapped_lang = lang_map(language) + if not mapped_lang: + logger.warning(f"[W{rank}] Unsupported language: {language} of {wav_name}") + results_q.put((idx, ("", "", [], ""))) + continue + + phones, word2ph, norm_text = clean_text( + text.replace("%", "-").replace("¥", ","), + mapped_lang, + version, + ) + + if mapped_lang == "zh": + path_bert = osp.join(bert_dir, f"{name}.pt") + if not osp.exists(path_bert): + assert word2ph + bert_feature = get_bert_feature(norm_text, word2ph) + assert bert_feature.shape[-1] == len(phones) + torch.save(bert_feature, path_bert) + + phones_str = " ".join(phones) + results_q.put((idx, (name, phones_str, word2ph, norm_text))) + except Exception as e: + del ( + device_str, + tokenizer, + bert_model, + bert_dir, + bert_pretrained_dir, + tasks_q, + results_q, + opt_dir, + item, + idx, + i, + ) + logger.exception(f"[W{rank}] Failed: {wav_name} | {text}") + raise e + + sys.exit(0) + + +@app.command() +def main( + inp_list: Path = typer.Option( + ..., + "--inp-list", + file_okay=True, + dir_okay=False, + exists=True, + readable=True, + show_default=False, + help="list File: wav|spk|lang|text", + ), + opt: Path = typer.Option( + ..., "--opt", file_okay=False, dir_okay=True, writable=True, show_default=False, help="Output Directory" + ), + bert: Path = typer.Option( + ..., "--bert", exists=True, readable=True, show_default=False, help="Path to Bert Pretrained Models" + ), + version: str = typer.Option("v2", "--version", help="SoVITS Language Version"), + device: Device = typer.Option(Device.cpu, "--device", help="Compute device"), + device_id: str = typer.Option("0", "--device-id", help="CUDA_VISIBLE_DEVICE, Such as '0,1,2'"), + nproc: int = typer.Option(1, "--nproc", min=1, help="Number of processes per GPU"), + fp16: bool = typer.Option(False, is_flag=True, flag_value=True, help="Use FP16"), +): + device_ids = [int(x) for x in device_id.split(",") if x.strip() != ""] + if device in {"cpu", "mps"} and device_ids != [0]: + raise ValueError(f"Invalid Device ID {device_ids}") + if nproc < 1: + raise ValueError(f"Invalid Num Process {nproc}") + + os.makedirs(opt, exist_ok=True) + merged_path = osp.join(opt, "2-name2text.txt") + + with open(inp_list, "r", encoding="utf8") as f: + lines = [ln for ln in f.read().splitlines() if ln.strip()] + + tasks_all: list[tuple[int, str, str, str]] = [] + for idx, line in enumerate(lines): + try: + wav_name, language, text = parse_inp_text_line(line) + tasks_all.append((idx, wav_name, language, text)) + except Exception: + logger.exception(f"Skip line {idx}: {line}") + + n_tasks = len(tasks_all) + if n_tasks == 0: + logger.warning("Empty list") + with open(merged_path, "w", encoding="utf8") as fout: + pass + return + + device_strs = build_device_strings(device, device_ids, nproc) + world_size = len(device_strs) + + tasks_q: "tmp.Queue[tuple[int, str, str, str] | None]" = tmp.Queue() + results_q: "tmp.Queue[tuple[int, tuple[str, str, list[int] | None, str]]]" = tmp.Queue() + + for task in tasks_all: + tasks_q.put(task) + for _ in range(world_size): + tasks_q.put(None) + + ordered: list[tuple[str, str, list[int] | None, str]] = [("", "", [], "")] * n_tasks + completed = 0 + + with Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + redirect_stderr=False, + redirect_stdout=False, + ) as progress: + progress_task = progress.add_task("G2P & Extract Bert", total=n_tasks) + + ctx = spawn( + worker_entry, + args=(device_strs, tasks_q, results_q, bert, opt, fp16, version), + nprocs=world_size, + join=False, + daemon=False, + ) + assert ctx + + while completed < n_tasks: + try: + idx, tup = results_q.get(timeout=0.01) + ordered[idx] = tup + completed += 1 + progress.update(progress_task, advance=1) + except queue.Empty: + pass + + for p in ctx.processes: + assert p + if (p.exitcode is not None and p.exitcode != 0) or (not p.is_alive()): + progress.live.stop() + try: + ctx.join() + except Exception as e: + console.print(e) + finally: + logger.critical(f"Worker PID {p.pid} crashed with exit code {p.exitcode}.") + sys.exit(1) + ctx.join() + + with open(merged_path, "w", encoding="utf8") as fout: + for name, phones_str, word2ph, norm_text in ordered: + if name: + fout.write(f"{name}\t{phones_str}\t{word2ph}\t{norm_text}\n") + + logger.info(f"Done: {merged_path}") + + +def is_powershell_env(env: dict) -> bool: + return any(k in env for k in ("PSHOME", "POWERSHELL_DISTRIBUTION_CHANNEL", "PSModulePath")) + + +def get_prog_name() -> str: + system = platform.system() + env = os.environ.copy() + script_rel = osp.join("GPT_SoVITS", "prepare_datasets", osp.basename(__file__)) + if system == "Windows": + if is_powershell_env(env): + return rf"$env:PYTHONPATH='.'; python -s {script_rel}" + else: + return rf"set PYTHONPATH=. && python -s {script_rel}" + else: + return f"PYTHONPATH=. python -s {script_rel}" + + +if __name__ == "__main__": + t = time.perf_counter() + app(prog_name=get_prog_name()) + logger.info(f"Exec Time: {time.perf_counter() - t:.2f} secs") diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py new file mode 100644 index 00000000..4cd4f5ef --- /dev/null +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py @@ -0,0 +1,423 @@ +import enum +import os +import os.path as osp +import platform +import queue +import sys +import time +import warnings +from pathlib import Path +from typing import List, Optional + +import numpy as np +import torch +import torch.multiprocessing as tmp +import torchaudio +import typer +from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn +from scipy.io import wavfile +from torch.multiprocessing.spawn import spawn + +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration, console, logger +from GPT_SoVITS.eres2net.ERes2NetV2 import ERes2NetV2 +from GPT_SoVITS.feature_extractor import cnhubert as cnhubert_mod +from tools.my_utils import clean_path, load_audio + +warnings.filterwarnings("ignore", message=".*ComplexHalf support is experimental.*") + +torch.set_grad_enabled(False) + +tmp.set_start_method("spawn", force=True) + +MAXX = 0.95 +ALPHA = 0.5 + + +class Device(str, enum.Enum): + cpu = "cpu" + cuda = "cuda" + mps = "mps" + + +app = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]}, + add_completion=False, +) + + +class SV: + def __init__(self, device: torch.device, fp16: bool, sv_path: str): + pretrained_state = torch.load(sv_path, map_location="cpu") + embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4) + embedding_model.load_state_dict(pretrained_state) + embedding_model.eval() + self.embedding_model = embedding_model + self.dtype = torch.float16 if fp16 else torch.float32 + if fp16 is False: + self.embedding_model = self.embedding_model.to(device) + else: + self.embedding_model = self.embedding_model.half().to(device) + + def compute_embedding(self, wav: torch.Tensor): + if not torch.cuda.is_available(): + wav = wav.float() + feat = torch.stack( + [ + torchaudio.compliance.kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) + for wav0 in wav + ] + ).to(self.dtype) + sv_emb: torch.Tensor = self.embedding_model.forward3(feat) + return sv_emb + + +def parse_inp_text_line(line: str) -> str: + wav_name, _, __, ___ = line.split("|", 3) + return wav_name + + +def build_device_strings(device_type: str, device_ids: List[int], procs_per_device: int) -> List[str]: + devices: List[str] = [] + for device_id in device_ids: + dstr = f"{device_type}:{device_id}" + devices.extend([dstr] * procs_per_device) + return devices + + +def worker_entry( + rank: int, + device_strs: List[str], + tasks_q: "tmp.Queue[tuple[int, str] | None]", + results_q: "tmp.Queue[int]", + cnhubert_base_dir: str, + sv: Optional[str], + opt_dir: str, + fp16: bool, +): + device_str = device_strs[rank] + device = torch.device(device_str) + + if device.type == "cuda": + assert torch.cuda.is_available() + torch.cuda.set_device(device.index) + elif device.type == "mps": + assert torch.mps.is_available() + elif device.type == "xpu": + assert torch.xpu.is_available() + + hubert_dir = osp.join(opt_dir, "4-cnhubert") + wav32dir = osp.join(opt_dir, "5-wav32k") + os.makedirs(hubert_dir, exist_ok=True) + os.makedirs(wav32dir, exist_ok=True) + + if not osp.exists(cnhubert_base_dir): + raise FileNotFoundError(f"CNHuBERT Base Dir not found: {cnhubert_base_dir}") + cnhubert_mod.cnhubert_base_path = cnhubert_base_dir + + model = cnhubert_mod.get_model() + resample = torchaudio.transforms.Resample(32000, 16000) + + if fp16: + model = model.half().to(device) + resample = resample.half().to(device) + else: + model = model.to(device) + resample = resample.to(device) + + sv_model: SV | None = None + + sv_cn_dir = osp.join(opt_dir, "7-sv_cn") + if sv: + os.makedirs(sv_cn_dir, exist_ok=True) + extract_sv = True + sv_model = SV(device, fp16, sv) + else: + extract_sv = False + + def process_one_item( + wav_name: str, + wav_path: str, + model_: cnhubert_mod.CNHubert, + resample_: torchaudio.transforms.Resample, + use_fp16: bool = False, + extract_sv: bool = False, + ) -> bool: + hubert_path = osp.join(hubert_dir, f"{wav_name}.pt") + if osp.exists(hubert_path): + return False + + tmp_audio = load_audio(wav_path, 32000) + tmp_max = float(np.abs(tmp_audio).max()) if tmp_audio.size > 0 else 0.0 + if tmp_max <= 0: + logger.warning(f"[W{rank}] Filtered: Empty or silent audio: {wav_path}") + return False + if tmp_max > 2.2: + logger.warning(f"[W{rank}] Filtered: peak={tmp_max:.3f}") + return False + + tmp_audio32 = (tmp_audio / tmp_max * (MAXX * ALPHA * 32768.0)) + ((1.0 - ALPHA) * 32768.0) * tmp_audio + + if extract_sv: + assert sv_cn_dir + assert sv_model + sv_path = osp.join(sv_cn_dir, f"{wav_name}.pt") + if not osp.exists(sv_path): + tmp_audio32_sv = (tmp_audio / tmp_max * (MAXX * ALPHA)) + (1.0 - ALPHA) * tmp_audio + tensor_wav32_sv = torch.from_numpy(tmp_audio32_sv).to(device) + if use_fp16: + tensor_wav32_sv = tensor_wav32_sv.half() + tensor_wav16_sv: torch.Tensor = resample_(tensor_wav32_sv) + out_sv = sv_model.compute_embedding(tensor_wav16_sv.unsqueeze(0)).cpu() + torch.save(out_sv, sv_path) + + tensor_wav32 = torch.from_numpy(tmp_audio32).to(device) + + if use_fp16: + tensor_wav32 = tensor_wav32.half() + + tensor_wav16 = resample_(tensor_wav32) + + out: torch.Tensor = model_.model(tensor_wav16.unsqueeze(0))["last_hidden_state"] # [1, T, 768] + ssl = out.transpose(1, 2).contiguous().cpu() # [1, 768, T] + + if torch.isnan(ssl).any(): + return True + + wavfile.write( + osp.join(wav32dir, f"{osp.splitext(wav_name)[0]}.wav"), + 32000, + tmp_audio32.astype(np.int16), + ) + + torch.save(ssl, hubert_path) + return False + + i = 0 + while True: + item = tasks_q.get() + if item is None: + break + + idx, wav_path = item + + i += 1 + if i % 10 == 0: + match device.index: + case "cuda": + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() + + try: + name = clean_path(osp.basename(wav_path)) + + is_nan = process_one_item( + wav_name=name, + wav_path=wav_path, + model_=model, + resample_=resample, + use_fp16=fp16, + extract_sv=extract_sv, + ) + + if is_nan and fp16: + model = model.float() + resample = resample.float() + is_nan = process_one_item( + wav_name=name, + wav_path=wav_path, + model_=model, + resample_=resample, + use_fp16=False, + extract_sv=False, + ) + if is_nan: + logger.error(f"[W{rank}] Failed: NaN Audio {name}") + + model = model.half() + resample = resample.half() + + except Exception as e: + del ( + device_str, + hubert_dir, + wav32dir, + cnhubert_base_dir, + tasks_q, + results_q, + opt_dir, + model, + resample, + sv_cn_dir, + sv_model, + device_strs, + idx, + sv, + i, + ) + logger.exception(f"[W{rank}] Failed: {wav_path}") + raise e + + results_q.put(idx) + + sys.exit(0) + + +@app.command() +def main( + inp_list: Path = typer.Option( + ..., + "--inp-list", + file_okay=True, + dir_okay=False, + exists=True, + readable=True, + show_default=False, + help="list File: wav|spk|lang|text", + ), + wav_dir: Optional[Path] = typer.Option( + None, "--wav-dir", file_okay=False, dir_okay=True, readable=True, show_default=False, help="Wav Audio Dir" + ), + opt: Path = typer.Option( + ..., "--opt", file_okay=False, dir_okay=True, writable=True, show_default=False, help="Output Directory" + ), + cnhubert_dir: Path = typer.Option( + ..., + "--cnhubert", + exists=True, + file_okay=False, + dir_okay=True, + readable=True, + show_default=False, + help="Path to CNHuBERT Pretrained Models", + ), + sv: Optional[Path] = typer.Option( + None, + "--sv", + exists=True, + file_okay=True, + dir_okay=False, + readable=True, + show_default=False, + help="(optional) SV Model Path, If Set, Extract SV Embeddings", + ), + device: Device = typer.Option(Device.cpu, "--device", help="Compute device"), + device_id: str = typer.Option("0", "--device-id", help="CUDA_VISIBLE_DEVICE, Such as '0,1,2'"), + nproc: int = typer.Option(1, "--nproc", min=1, help="Number of processes per GPU"), + fp16: bool = typer.Option(False, is_flag=True, flag_value=True, help="Use FP16"), +): + device_ids = [int(x) for x in device_id.split(",") if x.strip() != ""] + if device in {"cpu", "mps"} and device_ids != [0]: + raise ValueError(f"Invalid Device IDs for {device=}: {device_ids}") + if nproc < 1: + raise ValueError(f"Invalid nproc: {nproc}") + + os.makedirs(opt, exist_ok=True) + + with open(inp_list, "r", encoding="utf8") as f: + lines = [ln for ln in f.read().splitlines() if ln.strip()] + + tasks_all: list[tuple[int, str]] = [] + for idx, line in enumerate(lines): + try: + wav_name = parse_inp_text_line(line) + if wav_dir: + wav_name = clean_path(osp.basename(wav_name)) + wav_path = osp.join(str(wav_dir), wav_name) + else: + wav_path = wav_name + tasks_all.append((idx, wav_path)) + except Exception: + logger.exception(f"Skip line {idx}: {line}") + + n_tasks = len(tasks_all) + if n_tasks == 0: + logger.warning("Empty list. Nothing to do.") + return + + device_strs = build_device_strings(device, device_ids, nproc) + world_size = len(device_strs) + + tasks_q: "tmp.Queue[tuple[int, str] | None]" = tmp.Queue() + results_q: "tmp.Queue[int]" = tmp.Queue() + + for task in tasks_all: + tasks_q.put(task) + for _ in range(world_size): + tasks_q.put(None) + + completed = 0 + + with Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + redirect_stderr=False, + redirect_stdout=False, + ) as progress: + if sv: + progress_task = progress.add_task("Extract CNHuBERT/SV & Save Wav 32k", total=n_tasks) + else: + progress_task = progress.add_task("Extract CNHuBERT & Save Wav 32k", total=n_tasks) + + ctx = spawn( + worker_entry, + args=(device_strs, tasks_q, results_q, cnhubert_dir, sv, opt, fp16), + nprocs=world_size, + join=False, + daemon=False, + ) + assert ctx is not None + + while completed < n_tasks: + try: + _ = results_q.get(timeout=0.01) + completed += 1 + progress.update(progress_task, advance=1) + except queue.Empty: + pass + + for p in ctx.processes: + if p is None: + continue + if (p.exitcode is not None and p.exitcode != 0) or (not p.is_alive()): + progress.live.stop() + try: + ctx.join() + except Exception as e: + console.print(e) + finally: + logger.critical(f"Worker PID {p.pid} crashed with exit code {p.exitcode}.") + sys.exit(1) + + ctx.join() + + logger.info(f"Done. Output dir: {opt}") + + +def is_powershell_env(env: dict) -> bool: + return any(k in env for k in ("PSHOME", "POWERSHELL_DISTRIBUTION_CHANNEL", "PSModulePath")) + + +def get_prog_name() -> str: + system = platform.system() + env = os.environ.copy() + script_rel = os.path.join("GPT_SoVITS", "prepare_datasets", os.path.basename(__file__)) + if system == "Windows": + if is_powershell_env(env): + return rf"$env:PYTHONPATH='.'; python -s {script_rel}" + else: + return rf"set PYTHONPATH=. && python -s {script_rel}" + else: + return f"PYTHONPATH=. python -s {script_rel}" + + +if __name__ == "__main__": + t = time.perf_counter() + app(prog_name=get_prog_name()) + logger.info(f"Exec Time: {time.perf_counter() - t:.2f} secs") diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py deleted file mode 100644 index 3a84c014..00000000 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- - -import sys -import os - -inp_text = os.environ.get("inp_text") -inp_wav_dir = os.environ.get("inp_wav_dir") -exp_name = os.environ.get("exp_name") -i_part = os.environ.get("i_part") -all_parts = os.environ.get("all_parts") -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -from feature_extractor import cnhubert - -opt_dir = os.environ.get("opt_dir") -cnhubert.cnhubert_base_path = os.environ.get("cnhubert_base_dir") -import torch - -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() - -import traceback -import numpy as np -from scipy.io import wavfile -import librosa - -now_dir = os.getcwd() -sys.path.append(now_dir) -from tools.my_utils import load_audio, clean_path - -# from config import cnhubert_base_path -# cnhubert.cnhubert_base_path=cnhubert_base_path -# inp_text=sys.argv[1] -# inp_wav_dir=sys.argv[2] -# exp_name=sys.argv[3] -# i_part=sys.argv[4] -# all_parts=sys.argv[5] -# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6] -# cnhubert.cnhubert_base_path=sys.argv[7] -# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name - -from time import time as ttime -import shutil - - -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path - dir = os.path.dirname(path) - name = os.path.basename(path) - # tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part) - tmp_path = "%s%s.pth" % (ttime(), i_part) - torch.save(fea, tmp_path) - shutil.move(tmp_path, "%s/%s" % (dir, name)) - - -hubert_dir = "%s/4-cnhubert" % (opt_dir) -wav32dir = "%s/5-wav32k" % (opt_dir) -os.makedirs(opt_dir, exist_ok=True) -os.makedirs(hubert_dir, exist_ok=True) -os.makedirs(wav32dir, exist_ok=True) - -maxx = 0.95 -alpha = 0.5 -if torch.cuda.is_available(): - device = "cuda:0" -# elif torch.backends.mps.is_available(): -# device = "mps" -else: - device = "cpu" -model = cnhubert.get_model() -# is_half=False -if is_half == True: - model = model.half().to(device) -else: - model = model.to(device) - -nan_fails = [] - - -def name2go(wav_name, wav_path): - hubert_path = "%s/%s.pt" % (hubert_dir, wav_name) - if os.path.exists(hubert_path): - return - tmp_audio = load_audio(wav_path, 32000) - tmp_max = np.abs(tmp_audio).max() - if tmp_max > 2.2: - print("%s-filtered,%s" % (wav_name, tmp_max)) - return - tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha * 32768)) + ((1 - alpha) * 32768) * tmp_audio - tmp_audio32b = (tmp_audio / tmp_max * (maxx * alpha * 1145.14)) + ((1 - alpha) * 1145.14) * tmp_audio - tmp_audio = librosa.resample(tmp_audio32b, orig_sr=32000, target_sr=16000) # 不是重采样问题 - tensor_wav16 = torch.from_numpy(tmp_audio) - if is_half == True: - tensor_wav16 = tensor_wav16.half().to(device) - else: - tensor_wav16 = tensor_wav16.to(device) - ssl = model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1, 2).cpu() # torch.Size([1, 768, 215]) - if np.isnan(ssl.detach().numpy()).sum() != 0: - nan_fails.append((wav_name, wav_path)) - print("nan filtered:%s" % wav_name) - return - wavfile.write( - "%s/%s" % (wav32dir, wav_name), - 32000, - tmp_audio32.astype("int16"), - ) - my_save(ssl, hubert_path) - - -with open(inp_text, "r", encoding="utf8") as f: - lines = f.read().strip("\n").split("\n") - -for line in lines[int(i_part) :: int(all_parts)]: - try: - # wav_name,text=line.split("\t") - wav_name, spk_name, language, text = line.split("|") - wav_name = clean_path(wav_name) - if inp_wav_dir != "" and inp_wav_dir != None: - wav_name = os.path.basename(wav_name) - wav_path = "%s/%s" % (inp_wav_dir, wav_name) - - else: - wav_path = wav_name - wav_name = os.path.basename(wav_name) - name2go(wav_name, wav_path) - except: - print(line, traceback.format_exc()) - -if len(nan_fails) > 0 and is_half == True: - is_half = False - model = model.float() - for wav in nan_fails: - try: - name2go(wav[0], wav[1]) - except: - print(wav_name, traceback.format_exc()) diff --git a/GPT_SoVITS/prepare_datasets/2-get-sv.py b/GPT_SoVITS/prepare_datasets/2-get-sv.py deleted file mode 100644 index 80b0ad69..00000000 --- a/GPT_SoVITS/prepare_datasets/2-get-sv.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- - -import sys -import os - -inp_text = os.environ.get("inp_text") -inp_wav_dir = os.environ.get("inp_wav_dir") -exp_name = os.environ.get("exp_name") -i_part = os.environ.get("i_part") -all_parts = os.environ.get("all_parts") -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] - -opt_dir = os.environ.get("opt_dir") -sv_path = os.environ.get("sv_path") -import torch - -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() - -import traceback -import torchaudio - -now_dir = os.getcwd() -sys.path.append(now_dir) -sys.path.append(f"{now_dir}/GPT_SoVITS/eres2net") -from tools.my_utils import clean_path -from time import time as ttime -import shutil -from ERes2NetV2 import ERes2NetV2 -import kaldi as Kaldi - - -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path - dir = os.path.dirname(path) - name = os.path.basename(path) - # tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part) - tmp_path = "%s%s.pth" % (ttime(), i_part) - torch.save(fea, tmp_path) - shutil.move(tmp_path, "%s/%s" % (dir, name)) - - -sv_cn_dir = "%s/7-sv_cn" % (opt_dir) -wav32dir = "%s/5-wav32k" % (opt_dir) -os.makedirs(opt_dir, exist_ok=True) -os.makedirs(sv_cn_dir, exist_ok=True) -os.makedirs(wav32dir, exist_ok=True) - -maxx = 0.95 -alpha = 0.5 -if torch.cuda.is_available(): - device = "cuda:0" -# elif torch.backends.mps.is_available(): -# device = "mps" -else: - device = "cpu" - - -class SV: - def __init__(self, device, is_half): - pretrained_state = torch.load(sv_path, map_location="cpu") - embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4) - embedding_model.load_state_dict(pretrained_state) - embedding_model.eval() - self.embedding_model = embedding_model - self.res = torchaudio.transforms.Resample(32000, 16000).to(device) - if is_half == False: - self.embedding_model = self.embedding_model.to(device) - else: - self.embedding_model = self.embedding_model.half().to(device) - self.is_half = is_half - - def compute_embedding3(self, wav): # (1,x)#-1~1 - with torch.no_grad(): - wav = self.res(wav) - if self.is_half == True: - wav = wav.half() - feat = torch.stack( - [Kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav] - ) - sv_emb = self.embedding_model.forward3(feat) - return sv_emb - - -sv = SV(device, is_half) - - -def name2go(wav_name, wav_path): - sv_cn_path = "%s/%s.pt" % (sv_cn_dir, wav_name) - if os.path.exists(sv_cn_path): - return - wav_path = "%s/%s" % (wav32dir, wav_name) - wav32k, sr0 = torchaudio.load(wav_path) - assert sr0 == 32000 - wav32k = wav32k.to(device) - emb = sv.compute_embedding3(wav32k).cpu() # torch.Size([1, 20480]) - my_save(emb, sv_cn_path) - - -with open(inp_text, "r", encoding="utf8") as f: - lines = f.read().strip("\n").split("\n") - -for line in lines[int(i_part) :: int(all_parts)]: - try: - wav_name, spk_name, language, text = line.split("|") - wav_name = clean_path(wav_name) - if inp_wav_dir != "" and inp_wav_dir != None: - wav_name = os.path.basename(wav_name) - wav_path = "%s/%s" % (inp_wav_dir, wav_name) - - else: - wav_path = wav_name - wav_name = os.path.basename(wav_name) - name2go(wav_name, wav_path) - except: - print(line, traceback.format_exc()) diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index ddb0607c..27ca5f6d 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -1,118 +1,313 @@ +import enum +import gc import os - -inp_text = os.environ.get("inp_text") -exp_name = os.environ.get("exp_name") -i_part = os.environ.get("i_part") -all_parts = os.environ.get("all_parts") -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] -opt_dir = os.environ.get("opt_dir") -pretrained_s2G = os.environ.get("pretrained_s2G") -s2config_path = os.environ.get("s2config_path") - -if os.path.exists(pretrained_s2G): - ... -else: - raise FileNotFoundError(pretrained_s2G) -# version=os.environ.get("version","v2") -size = os.path.getsize(pretrained_s2G) -if size < 82978 * 1024: - version = "v1" -elif size < 100 * 1024 * 1024: - version = "v2" -elif size < 103520 * 1024: - version = "v1" -elif size < 700 * 1024 * 1024: - version = "v2" -else: - version = "v3" -import torch - -is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() -import traceback +import os.path as osp +import platform +import queue import sys +import time +from pathlib import Path +from typing import List, Tuple -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging -import utils +import torch +import torch.multiprocessing as tmp +import typer +from rich.progress import BarColumn, Progress, TimeRemainingColumn, TextColumn +from torch.multiprocessing.spawn import spawn -if version != "v3": - from module.models import SynthesizerTrn -else: - from module.models import SynthesizerTrnV3 as SynthesizerTrn -from tools.my_utils import clean_path +from GPT_SoVITS.Accelerate.logger import console, logger, SpeedColumnIteration +from GPT_SoVITS.module.models import SynthesizerTrn, SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import inspect_version +from tools.my_utils import DictToAttrRecursive, clean_path -logging.getLogger("numba").setLevel(logging.WARNING) -# from config import pretrained_s2G +torch.set_grad_enabled(False) -# inp_text=sys.argv[1] -# exp_name=sys.argv[2] -# i_part=sys.argv[3] -# all_parts=sys.argv[4] -# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[5] -# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name +tmp.set_start_method("spawn", force=True) -hubert_dir = "%s/4-cnhubert" % (opt_dir) -semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) -if os.path.exists(semantic_path) == False: +class Device(str, enum.Enum): + cpu = "cpu" + cuda = "cuda" + mps = "mps" + + +app = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]}, + add_completion=False, +) + + +def parse_inp_text_line(line: str) -> str: + wav_name, _, __, ___ = line.split("|", 3) + return wav_name + + +def build_device_strings(device_type: str, device_ids: List[int], procs_per_device: int) -> List[str]: + devices: List[str] = [] + for device_id in device_ids: + dstr = f"{device_type}:{device_id}" if device_type in {"cuda", "mps"} else "cpu" + devices.extend([dstr] * procs_per_device) + return devices + + +def worker_entry( + rank: int, + device_strs: List[str], + tasks_q: "tmp.Queue[Tuple[int, str] | None]", + results_q: "tmp.Queue[Tuple[int, str]]", + pretrained_s2g: str, + opt_dir: str, + fp16: bool, +): + device_str = device_strs[rank] + device = torch.device(device_str) + + if device.type == "cuda": + assert torch.cuda.is_available() + torch.cuda.set_device(device.index) + elif device.type == "mps": + assert torch.mps.is_available() + elif device.type == "xpu": + assert torch.xpu.is_available() + + hubert_dir = osp.join(opt_dir, "4-cnhubert") os.makedirs(opt_dir, exist_ok=True) - if torch.cuda.is_available(): - device = "cuda" - # elif torch.backends.mps.is_available(): - # device = "mps" - else: - device = "cpu" - hps = utils.get_hparams_from_file(s2config_path) - vq_model = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - version=version, - **hps.model, - ) - if is_half == True: - vq_model = vq_model.half().to(device) - else: - vq_model = vq_model.to(device) - vq_model.eval() - # utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), vq_model, None, True) - # utils.load_checkpoint(pretrained_s2G, vq_model, None, True) - print( - vq_model.load_state_dict( - torch.load(pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False - ) - ) + if not osp.exists(hubert_dir): + raise FileNotFoundError(hubert_dir) - def name2go(wav_name, lines): - hubert_path = "%s/%s.pt" % (hubert_dir, wav_name) - if os.path.exists(hubert_path) == False: - return - ssl_content = torch.load(hubert_path, map_location="cpu") - if is_half == True: + version, _, _, hps_dict, dict_s2 = inspect_version(pretrained_s2g) + hps = DictToAttrRecursive(hps_dict) + + if version in {"v3", "v4"}: + vq_model: SynthesizerTrn | SynthesizerTrnV3 = SynthesizerTrnV3( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + version=version, + **hps.model, + ) + else: + vq_model = SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + version=version, + **hps.model, + ) + + load_result = vq_model.load_state_dict(dict_s2["weight"]) + if rank == 0: + console.print("") + console.print(load_result) + + for name in list(vq_model._modules.keys()): + if name not in ["quantizer", "ssl_proj"]: + del vq_model._modules[name] + del dict_s2 + + if fp16: + vq_model = vq_model.to(device).half() + else: + vq_model.to(device) + + match device.index: + case "cuda": + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() + gc.collect() + + def extract_semantic_from_hubert_pt(wav_basename: str) -> str | None: + hubert_path = osp.join(hubert_dir, f"{wav_basename}.pt") + if not osp.exists(hubert_path): + return None + + ssl_content: torch.Tensor = torch.load(hubert_path, map_location="cpu") + if fp16: ssl_content = ssl_content.half().to(device) else: ssl_content = ssl_content.to(device) + codes = vq_model.extract_latent(ssl_content) - semantic = " ".join([str(i) for i in codes[0, 0, :].tolist()]) - lines.append("%s\t%s" % (wav_name, semantic)) + vec = codes[0, 0, :].tolist() - with open(inp_text, "r", encoding="utf8") as f: - lines = f.read().strip("\n").split("\n") + return " ".join(str(i) for i in vec) + + i = 0 + while True: + item = tasks_q.get() + if item is None: + break + + idx, wav_name = item + + i += 1 + if i % 10 == 0: + match device.index: + case "cuda": + torch.cuda.empty_cache() + case "mps": + torch.mps.empty_cache() + case "xpu": + torch.xpu.empty_cache() - lines1 = [] - for line in lines[int(i_part) :: int(all_parts)]: - # print(line) try: - # wav_name,text=line.split("\t") - wav_name, spk_name, language, text = line.split("|") - wav_name = clean_path(wav_name) - wav_name = os.path.basename(wav_name) - # name2go(name,lines1) - name2go(wav_name, lines1) - except: - print(line, traceback.format_exc()) - with open(semantic_path, "w", encoding="utf8") as f: - f.write("\n".join(lines1)) + name = clean_path(osp.basename(wav_name)) + semantic = extract_semantic_from_hubert_pt(name) + if semantic is None: + results_q.put((idx, "")) + else: + results_q.put((idx, f"{name}\t{semantic}")) + except Exception as e: + del device_str, vq_model, hubert_dir, _, version, hps_dict, hps, item, idx, i, load_result + logger.exception(f"[W{rank}] Failed on: {wav_name}") + raise e + + sys.exit(0) + + +@app.command() +def main( + inp_list: Path = typer.Option( + ..., + "--inp-list", + file_okay=True, + dir_okay=False, + exists=True, + readable=True, + show_default=False, + help="list file: wav|spk|lang|text", + ), + opt: Path = typer.Option( + ..., "--opt", file_okay=False, dir_okay=True, writable=True, show_default=False, help="Output Directory" + ), + pretrained_s2g: Path = typer.Option( + ..., + "--pretrained-s2g", + file_okay=True, + dir_okay=False, + exists=True, + readable=True, + show_default=False, + help="Path to pretrained s2G checkpoint", + ), + device: Device = typer.Option(Device.cpu, "--device", help="Compute device"), + device_id: str = typer.Option("0", "--device-id", help="CUDA_VISIBLE_DEVICES style, e.g. '0,1'"), + nproc: int = typer.Option(1, "--nproc", min=1, help="Processes per device"), + fp16: bool = typer.Option(True, is_flag=True, flag_value=True, help="Use FP16 on CUDA"), +): + device_ids = [int(x) for x in device_id.split(",") if x.strip() != ""] + if device in {"cpu", "mps"} and device_ids != [0]: + raise ValueError(f"Invalid Device ID {device_ids}") + if nproc < 1: + raise ValueError(f"Invalid Num Process {nproc}") + + os.makedirs(opt, exist_ok=True) + merged_path = osp.join(opt, "6-name2semantic.tsv") + + with open(inp_list, "r", encoding="utf8") as f: + raw_lines = [ln for ln in f.read().splitlines() if ln.strip()] + + tasks_all: List[Tuple[int, str]] = [] + for idx, line in enumerate(raw_lines): + try: + wav_name = parse_inp_text_line(line) + tasks_all.append((idx, wav_name)) + except Exception: + logger.exception(f"Skip line {idx}: {line}") + + n_tasks = len(tasks_all) + if n_tasks == 0: + logger.warning("Empty list") + with open(merged_path, "w", encoding="utf8") as fout: + pass + return + + device_strs = build_device_strings(device, device_ids, nproc) + world_size = len(device_strs) + + tasks_q: "tmp.Queue[Tuple[int, str] | None]" = tmp.Queue() + results_q: "tmp.Queue[Tuple[int, str]]" = tmp.Queue() + + for task in tasks_all: + tasks_q.put(task) + for _ in range(world_size): + tasks_q.put(None) + + ordered: List[str] = [""] * n_tasks + completed = 0 + + with Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + ) as progress: + progress_task = progress.add_task("Extract Semantic Codes", total=n_tasks) + + ctx = spawn( + worker_entry, + args=(device_strs, tasks_q, results_q, pretrained_s2g, opt, fp16), + nprocs=world_size, + join=False, + daemon=False, + ) + assert ctx + + while completed < n_tasks: + try: + idx, line = results_q.get(timeout=0.05) + if line: + ordered[idx] = line + completed += 1 + progress.update(progress_task, advance=1) + except queue.Empty: + pass + + for p in ctx.processes: + assert p + if (p.exitcode is not None and p.exitcode != 0) or (not p.is_alive()): + progress.live.stop() + try: + ctx.join() + except Exception as e: + console.print(e) + finally: + logger.critical(f"Worker PID {p.pid} crashed with exit code {p.exitcode}.") + sys.exit(1) + ctx.join() + + with open(merged_path, "w", encoding="utf8") as fout: + for line in ordered: + if line: + fout.write(line + "\n") + + logger.info(f"Done: {merged_path}") + + +def is_powershell_env(env: dict) -> bool: + return any(k in env for k in ("PSHOME", "POWERSHELL_DISTRIBUTION_CHANNEL", "PSModulePath")) + + +def get_prog_name() -> str: + system = platform.system() + env = os.environ.copy() + script_rel = osp.join("GPT_SoVITS", "prepare_datasets", osp.basename(__file__)) + if system == "Windows": + if is_powershell_env(env): + return rf"$env:PYTHONPATH='.'; python -s {script_rel}" + else: + return rf"set PYTHONPATH=. && python -s {script_rel}" + else: + return f"PYTHONPATH=. python -s {script_rel}" + + +if __name__ == "__main__": + t = time.perf_counter() + app(prog_name=get_prog_name()) + logger.info(f"Exec Time: {time.perf_counter() - t:.2f} secs") diff --git a/GPT_SoVITS/process_ckpt.py b/GPT_SoVITS/process_ckpt.py index 20db9b19..3a563a0b 100644 --- a/GPT_SoVITS/process_ckpt.py +++ b/GPT_SoVITS/process_ckpt.py @@ -1,44 +1,28 @@ +import os +import shutil import traceback from collections import OrderedDict from time import time as ttime -import shutil -import os +from typing import Any + import torch + +from GPT_SoVITS.module.models import set_serialization from tools.i18n.i18n import I18nAuto i18n = I18nAuto() +set_serialization() -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path +def save(fea, path): # fix issue: torch.save doesn't support chinese path dir = os.path.dirname(path) name = os.path.basename(path) - tmp_path = "%s.pth" % (ttime()) + tmp_path = f"{ttime()}.pth" torch.save(fea, tmp_path) - shutil.move(tmp_path, "%s/%s" % (dir, name)) + shutil.move(tmp_path, f"{dir}/{name}") -from io import BytesIO - -model_version2byte = { - "v3": b"03", - "v4": b"04", - "v2Pro": b"05", - "v2ProPlus": b"06", -} - - -def my_save2(fea, path, model_version): - bio = BytesIO() - torch.save(fea, bio) - bio.seek(0) - data = bio.getvalue() - byte = model_version2byte[model_version] - data = byte + data[2:] - with open(path, "wb") as f: - f.write(data) - - -def savee(ckpt, name, epoch, steps, hps, model_version=None, lora_rank=None): +def save_ckpt(ckpt, name, epoch, steps, hps, lora_rank=None): try: opt = OrderedDict() opt["weight"] = {} @@ -46,93 +30,57 @@ def savee(ckpt, name, epoch, steps, hps, model_version=None, lora_rank=None): if "enc_q" in key: continue opt["weight"][key] = ckpt[key].half() - opt["config"] = hps - opt["info"] = "%sepoch_%siteration" % (epoch, steps) + opt["config"] = hps.to_dict() + opt["info"] = f"{epoch}epoch_{steps}iteration" if lora_rank: opt["lora_rank"] = lora_rank - my_save2(opt, "%s/%s.pth" % (hps.save_weight_dir, name), model_version) - elif model_version != None and "Pro" in model_version: - my_save2(opt, "%s/%s.pth" % (hps.save_weight_dir, name), model_version) - else: - my_save(opt, "%s/%s.pth" % (hps.save_weight_dir, name)) + save(opt, f"{hps.save_weight_dir}/{name}.pth") return "Success." - except: + except Exception: return traceback.format_exc() -""" -00:v1 -01:v2 -02:v3 -03:v3lora -04:v4lora -05:v2Pro -06:v2ProPlus -""" -head2version = { - b"00": ["v1", "v1", False], - b"01": ["v2", "v2", False], - b"02": ["v2", "v3", False], - b"03": ["v2", "v3", True], - b"04": ["v2", "v4", True], - b"05": ["v2", "v2Pro", False], - b"06": ["v2", "v2ProPlus", False], -} -hash_pretrained_dict = { - "dc3c97e17592963677a4a1681f30c653": ["v2", "v2", False], # s2G488k.pth#sovits_v1_pretrained - "43797be674a37c1c83ee81081941ed0f": ["v2", "v3", False], # s2Gv3.pth#sovits_v3_pretrained - "6642b37f3dbb1f76882b69937c95a5f3": ["v2", "v2", False], # s2G2333K.pth#sovits_v2_pretrained - "4f26b9476d0c5033e04162c486074374": ["v2", "v4", False], # s2Gv4.pth#sovits_v4_pretrained - "c7e9fce2223f3db685cdfa1e6368728a": ["v2", "v2Pro", False], # s2Gv2Pro.pth#sovits_v2Pro_pretrained - "66b313e39455b57ab1b0bc0b239c9d0a": ["v2", "v2ProPlus", False], # s2Gv2ProPlus.pth#sovits_v2ProPlus_pretrained -} -import hashlib - - -def get_hash_from_file(sovits_path): - with open(sovits_path, "rb") as f: - data = f.read(8192) - hash_md5 = hashlib.md5() - hash_md5.update(data) - return hash_md5.hexdigest() - - -def get_sovits_version_from_path_fast(sovits_path): - ###1-if it is pretrained sovits models, by hash - hash = get_hash_from_file(sovits_path) - if hash in hash_pretrained_dict: - return hash_pretrained_dict[hash] - ###2-new weights, by head - with open(sovits_path, "rb") as f: - version = f.read(2) - if version != b"PK": - return head2version[version] - ###3-old weights, by file size - if_lora_v3 = False - size = os.path.getsize(sovits_path) +def inspect_version( + f: str, +) -> tuple[str, str, bool, Any, dict]: """ - v1weights:about 82942KB - half thr:82978KB - v2weights:about 83014KB - v3weights:about 750MB + + Returns: + tuple[model_version, lang_version, is_lora, hps, state_dict] """ - if size < 82978 * 1024: - model_version = version = "v1" - elif size < 700 * 1024 * 1024: - model_version = version = "v2" + dict_s2 = torch.load(f, map_location="cpu", mmap=True) + hps = dict_s2["config"] + version: str | None = None + if "version" in hps.keys(): + version = hps["version"] + is_lora = "lora_rank" in dict_s2.keys() + + if version is not None: + # V3 V4 Lora & Finetuned V2 Pro + lang_version = "v2" + model_version = version else: - version = "v2" - model_version = "v3" - return version, model_version, if_lora_v3 + # V2 Pro Pretrain + if hps["model"]["gin_channels"] == 1024: + if hps["model"]["upsample_initial_channel"] == 768: + lang_version = "v2" + model_version = "v2ProPlus" + else: + lang_version = "v2" + model_version = "v2Pro" + return model_version, lang_version, is_lora, hps, dict_s2 -def load_sovits_new(sovits_path): - f = open(sovits_path, "rb") - meta = f.read(2) - if meta != b"PK": - data = b"PK" + f.read() - bio = BytesIO() - bio.write(data) - bio.seek(0) - return torch.load(bio, map_location="cpu", weights_only=False) - return torch.load(sovits_path, map_location="cpu", weights_only=False) + # Old V1/V2 + if "dec.conv_pre.weight" in dict_s2["weight"].keys(): + if dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: + lang_version = model_version = "v1" + else: + lang_version = model_version = "v2" + else: # Old Finetuned V3 & V3/V4 Pretrain + lang_version = "v2" + model_version = "v3" + if dict_s2["info"] == "pretrained_s2G_v4": + model_version = "v4" + + return model_version, lang_version, is_lora, hps, dict_s2 diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index 1176f0bc..c28c7cfc 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -1,32 +1,38 @@ # modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/train_t2s.py -import os - -if "_CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] import argparse import logging +import os import platform +from collections import OrderedDict from pathlib import Path +from typing import Any import torch -from AR.data.data_module import Text2SemanticDataModule -from AR.models.t2s_lightning_module import Text2SemanticLightningModule -from AR.utils.io import load_yaml_config from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger # WandbLogger -from pytorch_lightning.strategies import DDPStrategy +from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy +from pytorch_lightning.strategies.strategy import Strategy + +from GPT_SoVITS.AR.data.data_module import Text2SemanticDataModule +from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModule +from GPT_SoVITS.AR.utils import get_newest_ckpt +from GPT_SoVITS.AR.utils.io import load_yaml_config +from GPT_SoVITS.process_ckpt import save logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) torch.set_float32_matmul_precision("high") -from collections import OrderedDict - -from AR.utils import get_newest_ckpt -from process_ckpt import my_save -class my_model_ckpt(ModelCheckpoint): +os.environ["MASTER_ADDR"] = "localhost" +if platform.system() == "Windows": + os.environ["USE_LIBUV"] = "0" + +torch.set_grad_enabled(True) + + +class ARModelCheckpoint(ModelCheckpoint): def __init__( self, config, @@ -44,40 +50,31 @@ class my_model_ckpt(ModelCheckpoint): self.config = config def on_train_epoch_end(self, trainer, pl_module): - # if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer): if self._should_save_on_train_epoch_end(trainer): monitor_candidates = self._monitor_candidates(trainer) + self._save_topk_checkpoint(trainer, monitor_candidates) + if self.if_save_latest is True: # 如果设置只保存最后一个ckpt,在保存下一个ckpt后要清理掉之前的所有ckpt + to_clean = list(os.listdir(self.dirpath)) + for name in to_clean: + try: + os.remove(f"{self.dirpath}/{name}") + except Exception as _: + pass if self._every_n_epochs >= 1 and (trainer.current_epoch + 1) % self._every_n_epochs == 0: - if ( - self.if_save_latest == True - ): ####如果设置只保存最后一个ckpt,在保存下一个ckpt后要清理掉之前的所有ckpt - to_clean = list(os.listdir(self.dirpath)) - self._save_topk_checkpoint(trainer, monitor_candidates) - if self.if_save_latest == True: - for name in to_clean: - try: - os.remove("%s/%s" % (self.dirpath, name)) - except: - pass - if self.if_save_every_weights == True: - to_save_od = OrderedDict() + if self.if_save_every_weights is True: + to_save_od: OrderedDict[str, Any] = OrderedDict() to_save_od["weight"] = OrderedDict() dictt = trainer.strategy._lightning_module.state_dict() for key in dictt: to_save_od["weight"][key] = dictt[key].half() to_save_od["config"] = self.config - to_save_od["info"] = "GPT-e%s" % (trainer.current_epoch + 1) + to_save_od["info"] = f"GPT-e{trainer.current_epoch + 1}" # torch.save( # print(os.environ) if os.environ.get("LOCAL_RANK", "0") == "0": - my_save( + save( to_save_od, - "%s/%s-e%s.ckpt" - % ( - self.half_weights_save_dir, - self.exp_name, - trainer.current_epoch + 1, - ), + f"{self.half_weights_save_dir}/{self.exp_name}-e{trainer.current_epoch + 1}.ckpt", ) self._save_last_checkpoint(trainer, monitor_candidates) @@ -91,8 +88,19 @@ def main(args): ckpt_dir = output_dir / "ckpt" ckpt_dir.mkdir(parents=True, exist_ok=True) + if torch.cuda.is_available(): + if torch.cuda.device_count() > 1: + strategy: Strategy = DDPStrategy( + process_group_backend="nccl" if platform.system() != "Windows" else "gloo", find_unused_parameters=False + ) + else: + strategy = SingleDeviceStrategy("cuda:0") + else: + strategy = SingleDeviceStrategy("cpu") + seed_everything(config["train"]["seed"], workers=True) - ckpt_callback: ModelCheckpoint = my_model_ckpt( + + ckpt_callback: ModelCheckpoint = ARModelCheckpoint( config=config, if_save_latest=config["train"]["if_save_latest"], if_save_every_weights=config["train"]["if_save_every_weights"], @@ -106,20 +114,15 @@ def main(args): dirpath=ckpt_dir, ) logger = TensorBoardLogger(name=output_dir.stem, save_dir=output_dir) - os.environ["MASTER_ADDR"] = "localhost" - os.environ["USE_LIBUV"] = "0" + trainer: Trainer = Trainer( max_epochs=config["train"]["epochs"], accelerator="gpu" if torch.cuda.is_available() else "cpu", - # val_check_interval=9999999999999999999999,###不要验证 - # check_val_every_n_epoch=None, limit_val_batches=0, devices=-1 if torch.cuda.is_available() else 1, benchmark=False, fast_dev_run=False, - strategy=DDPStrategy(process_group_backend="nccl" if platform.system() != "Windows" else "gloo") - if torch.cuda.is_available() - else "auto", + strategy=strategy, precision=config["train"]["precision"], logger=logger, num_sanity_val_steps=0, @@ -133,8 +136,6 @@ def main(args): config, train_semantic_path=config["train_semantic_path"], train_phoneme_path=config["train_phoneme_path"], - # dev_semantic_path=args.dev_semantic_path, - # dev_phoneme_path=args.dev_phoneme_path ) try: diff --git a/GPT_SoVITS/s2_train.py b/GPT_SoVITS/s2_train.py index 4b9f6488..b9cfc9f1 100644 --- a/GPT_SoVITS/s2_train.py +++ b/GPT_SoVITS/s2_train.py @@ -1,53 +1,63 @@ -import warnings - -warnings.filterwarnings("ignore") -import os - -import utils - -hps = utils.get_hparams(stage=2) -os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging +import os +import platform +import sys +import warnings +from contextlib import nullcontext +from random import randint import torch import torch.distributed as dist -import torch.multiprocessing as mp -from torch.cuda.amp import GradScaler, autocast +from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn, TimeRemainingColumn +from torch.amp.autocast_mode import autocast +from torch.amp.grad_scaler import GradScaler +from torch.multiprocessing.spawn import spawn from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm -logging.getLogger("matplotlib").setLevel(logging.INFO) -logging.getLogger("h5py").setLevel(logging.INFO) -logging.getLogger("numba").setLevel(logging.INFO) -from random import randint - -from module import commons -from module.data_utils import ( +import GPT_SoVITS.utils as utils +from GPT_SoVITS.Accelerate import console, logger +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration +from GPT_SoVITS.module import commons +from GPT_SoVITS.module.data_utils import ( DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader, ) -from module.losses import discriminator_loss, feature_loss, generator_loss, kl_loss -from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from module.models import ( +from GPT_SoVITS.module.losses import discriminator_loss, feature_loss, generator_loss, kl_loss +from GPT_SoVITS.module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch +from GPT_SoVITS.module.models import ( MultiPeriodDiscriminator, SynthesizerTrn, ) -from process_ckpt import savee +from GPT_SoVITS.process_ckpt import save_ckpt +logging.getLogger("matplotlib").setLevel(logging.INFO) +logging.getLogger("h5py").setLevel(logging.INFO) +logging.getLogger("numba").setLevel(logging.INFO) + +hps = utils.get_hparams(stage=2) + +warnings.filterwarnings("ignore") torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = False -###反正A100fp32更快,那试试tf32吧 -torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cuda.matmul.allow_tf32 = True # 反正A100fp32更快,那试试tf32吧 torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 -# from config import pretrained_s2G,pretrained_s2D +torch.set_grad_enabled(True) + global_step = 0 -device = "cpu" # cuda以外的设备,等mps优化后加入 +if torch.cuda.is_available(): + device_str = "cuda" +elif torch.mps.is_available(): + device_str = "mps" +else: + device_str = "cpu" + +multigpu = torch.cuda.device_count() > 1 if torch.cuda.is_available() else False def main(): @@ -57,8 +67,10 @@ def main(): n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) + if platform.system() == "Windows": + os.environ["USE_LIBUV"] = "0" - mp.spawn( + spawn( run, nprocs=n_gpus, args=( @@ -70,19 +82,31 @@ def main(): def run(rank, n_gpus, hps): global global_step - if rank == 0: - logger = utils.get_logger(hps.data.exp_dir) - logger.info(hps) - # utils.check_git_hash(hps.s2_ckpt_dir) - writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + device = torch.device(f"{device_str}:{rank}") + + if rank == 0: + logger.add( + os.path.join(hps.data.exp_dir, "train.log"), + level="INFO", + enqueue=True, + backtrace=True, + diagnose=True, + format="{time:YY-MM-DD HH:mm:ss}\t{name}\t{level}\t{message}", + ) + console.print(hps.to_dict()) + writer: SummaryWriter | None = SummaryWriter(log_dir=hps.s2_ckpt_dir) + writer_eval: SummaryWriter | None = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + else: + writer = writer_eval = None + + if multigpu: + dist.init_process_group( + backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", + init_method="env://", + world_size=n_gpus, + rank=rank, + ) - dist.init_process_group( - backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", - init_method="env://?use_libuv=False", - world_size=n_gpus, - rank=rank, - ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) @@ -126,36 +150,22 @@ def run(rank, n_gpus, hps): persistent_workers=True, prefetch_factor=4, ) - # if rank == 0: - # eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True) - # eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - # batch_size=1, pin_memory=True, - # drop_last=False, collate_fn=collate_fn) - net_g = ( - SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, - ).cuda(rank) - if torch.cuda.is_available() - else SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, - ).to(device) - ) + net_g: SynthesizerTrn = SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model, + ).to(device) + + net_d = MultiPeriodDiscriminator( + hps.model.use_spectral_norm, + version=hps.model.version, + ).to(device) - net_d = ( - MultiPeriodDiscriminator(hps.model.use_spectral_norm, version=hps.model.version).cuda(rank) - if torch.cuda.is_available() - else MultiPeriodDiscriminator(hps.model.use_spectral_norm, version=hps.model.version).to(device) - ) for name, param in net_g.named_parameters(): if not param.requires_grad: - print(name, "not requires_grad") + console.print(name, "not requires_grad") te_p = list(map(id, net_g.enc_p.text_embedding.parameters())) et_p = list(map(id, net_g.enc_p.encoder_text.parameters())) @@ -165,10 +175,6 @@ def run(rank, n_gpus, hps): net_g.parameters(), ) - # te_p=net_g.enc_p.text_embedding.parameters() - # et_p=net_g.enc_p.encoder_text.parameters() - # mrte_p=net_g.enc_p.mrte.parameters() - optim_g = torch.optim.AdamW( # filter(lambda p: p.requires_grad, net_g.parameters()),###默认所有层lr一致 [ @@ -196,69 +202,64 @@ def run(rank, n_gpus, hps): betas=hps.train.betas, eps=hps.train.eps, ) - if torch.cuda.is_available(): - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + if multigpu: + net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # type: ignore + net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # type: ignore else: - net_g = net_g.to(device) - net_d = net_d.to(device) + pass try: # 如果能加载自动resume - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "D_*.pth"), + epoch_str = utils.load_checkpoint( + utils.latest_checkpoint_path(f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", "D_*.pth"), net_d, optim_d, - ) # D多半加载没事 + )[-1] # D多半加载没事 if rank == 0: logger.info("loaded D") - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_*.pth"), + epoch_str = utils.load_checkpoint( + utils.latest_checkpoint_path(f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", "G_*.pth"), net_g, optim_g, - ) + )[-1] epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) - # epoch_str = 1 - # global_step = 0 - except: # 如果首次不能加载,加载pretrain - # traceback.print_exc() + except Exception: epoch_str = 1 global_step = 0 if ( hps.train.pretrained_s2G != "" - and hps.train.pretrained_s2G != None + and hps.train.pretrained_s2G is not None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: - logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) - print( - "loaded pretrained %s" % hps.train.pretrained_s2G, + logger.info(f"loaded pretrained {hps.train.pretrained_s2G}") + console.print( + f"loaded pretrained {hps.train.pretrained_s2G}", net_g.module.load_state_dict( - torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], + torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], strict=False, ) - if torch.cuda.is_available() + if multigpu else net_g.load_state_dict( - torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], + torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], strict=False, ), ) ##测试不加载优化器 if ( hps.train.pretrained_s2D != "" - and hps.train.pretrained_s2D != None + and hps.train.pretrained_s2D is not None and os.path.exists(hps.train.pretrained_s2D) ): if rank == 0: - logger.info("loaded pretrained %s" % hps.train.pretrained_s2D) - print( - "loaded pretrained %s" % hps.train.pretrained_s2D, + logger.info(f"loaded pretrained {hps.train.pretrained_s2D}") + console.print( + f"loaded pretrained {hps.train.pretrained_s2D}", net_d.module.load_state_dict( - torch.load(hps.train.pretrained_s2D, map_location="cpu", weights_only=False)["weight"], strict=False + torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"], strict=False ) - if torch.cuda.is_available() + if multigpu else net_d.load_state_dict( - torch.load(hps.train.pretrained_s2D, map_location="cpu", weights_only=False)["weight"], + torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"], ), ) @@ -279,311 +280,339 @@ def run(rank, n_gpus, hps): scheduler_g.step() scheduler_d.step() - scaler = GradScaler(enabled=hps.train.fp16_run) + scaler = GradScaler(device=device.type, enabled=hps.train.fp16_run) - print("start training from epoch %s" % epoch_str) - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - # [train_loader, eval_loader], logger, [writer, writer_eval]) - [train_loader, None], - logger, - [writer, writer_eval], + if rank == 0: + logger.info(f"start training from epoch {epoch_str}") + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + TimeElapsedColumn(), + console=console, + redirect_stderr=True, + redirect_stdout=True, + ) + if rank == 0 + else nullcontext() as progress + ): + if isinstance(progress, Progress): + epoch_task: TaskID | None = progress.add_task( + "Epoch", + total=int(hps.train.epochs), + completed=int(epoch_str) - 1, ) else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - ) - scheduler_g.step() - scheduler_d.step() - print("training done") + epoch_task = step_task = None + + for epoch in range(epoch_str, hps.train.epochs + 1): + if rank == 0: + assert epoch_task is not None + assert progress is not None + progress.advance(epoch_task, 1) + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + # [train_loader, eval_loader], logger, [writer, writer_eval]) + (train_loader, None), + logger, + (writer, writer_eval), + ) + else: + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + (train_loader, None), + None, + (None, None), + ) + scheduler_g.step() + scheduler_d.step() + if rank == 0: + assert progress + progress.stop() + logger.info("Training Done") + sys.exit(0) -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): +def train_and_evaluate( + device: torch.device, + epoch, + hps, + nets, + optims, + schedulers, + scaler, + loaders, + logger, + writers, +): net_g, net_d = nets optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers + writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() - for batch_idx, data in enumerate(tqdm(train_loader)): - if hps.model.version in {"v2Pro", "v2ProPlus"}: - ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, sv_emb = data + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + redirect_stderr=True, + redirect_stdout=True, + transient=not (int(epoch) == int(hps.train.epochs)), + ) + if device.index == 0 + else nullcontext() as progress + ): + if isinstance(progress, Progress): + step_task: TaskID | None = progress.add_task("Steps", total=len(train_loader)) else: - ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths = data - if torch.cuda.is_available(): - spec, spec_lengths = ( - spec.cuda( - rank, - non_blocking=True, - ), - spec_lengths.cuda( - rank, - non_blocking=True, - ), - ) - y, y_lengths = ( - y.cuda( - rank, - non_blocking=True, - ), - y_lengths.cuda( - rank, - non_blocking=True, - ), - ) - ssl = ssl.cuda(rank, non_blocking=True) - ssl.requires_grad = False - # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = ( - text.cuda( - rank, - non_blocking=True, - ), - text_lengths.cuda( - rank, - non_blocking=True, - ), - ) + step_task = None + + for batch_idx, data in enumerate(train_loader): if hps.model.version in {"v2Pro", "v2ProPlus"}: - sv_emb = sv_emb.cuda(rank, non_blocking=True) - else: - spec, spec_lengths = spec.to(device), spec_lengths.to(device) - y, y_lengths = y.to(device), y_lengths.to(device) - ssl = ssl.to(device) - ssl.requires_grad = False - # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = text.to(device), text_lengths.to(device) - if hps.model.version in {"v2Pro", "v2ProPlus"}: - sv_emb = sv_emb.to(device) - with autocast(enabled=hps.train.fp16_run): - if hps.model.version in {"v2Pro", "v2ProPlus"}: - (y_hat, kl_ssl, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl) = net_g( - ssl, spec, spec_lengths, text, text_lengths, sv_emb + ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, sv_emb = data + ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, sv_emb = map( + lambda x: x.to(device, non_blocking=True), + (ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, sv_emb), ) else: - ( - y_hat, - kl_ssl, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - stats_ssl, - ) = net_g(ssl, spec, spec_lengths, text, text_lengths) - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( - y_d_hat_r, - y_d_hat_g, + ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths = data + ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths = map( + lambda x: x.to(device, non_blocking=True), + (ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths), ) - loss_disc_all = loss_disc - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) + sv_emb = None + ssl.requires_grad = False - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + kl_ssl * 1 + loss_kl - - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, kl_ssl, loss_kl] - logger.info( - "Train Epoch: {} [{:.0f}%]".format( - epoch, - 100.0 * batch_idx / len(train_loader), - ) - ) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = { - "loss/g/total": loss_gen_all, - "loss/d/total": loss_disc_all, - "learning_rate": lr, - "grad_norm_d": grad_norm_d, - "grad_norm_g": grad_norm_g, - } - scalar_dict.update( - { - "loss/g/fm": loss_fm, - "loss/g/mel": loss_mel, - "loss/g/kl_ssl": kl_ssl, - "loss/g/kl": loss_kl, - } - ) - - # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = None - try: ###Some people installed the wrong version of matplotlib. - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy( - y_mel[0].data.cpu().numpy(), - ), - "slice/mel_gen": utils.plot_spectrogram_to_numpy( - y_hat_mel[0].data.cpu().numpy(), - ), - "all/mel": utils.plot_spectrogram_to_numpy( - mel[0].data.cpu().numpy(), - ), - "all/stats_ssl": utils.plot_spectrogram_to_numpy( - stats_ssl[0].data.cpu().numpy(), - ), - } - except: - pass - if image_dict: - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict, + with autocast(device_type=device.type, dtype=torch.float16, enabled=hps.train.fp16_run): + if hps.model.version in {"v2Pro", "v2ProPlus"}: + (y_hat, kl_ssl, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl) = net_g( + ssl, spec, spec_lengths, text, text_lengths, sv_emb ) else: - utils.summarize( - writer=writer, - global_step=global_step, - scalars=scalar_dict, + ( + y_hat, + kl_ssl, + ids_slice, + x_mask, + z_mask, + (z, z_p, m_p, logs_p, m_q, logs_q), + stats_ssl, + ) = net_g(ssl, spec, spec_lengths, text, text_lengths) + + mel = spec_to_mel_torch( + spec, + hps.data.filter_length, + hps.data.n_mel_channels, + hps.data.sampling_rate, + hps.data.mel_fmin, + hps.data.mel_fmax, + ) + y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) + y_hat_mel = mel_spectrogram_torch( + y_hat.squeeze(1), + hps.data.filter_length, + hps.data.n_mel_channels, + hps.data.sampling_rate, + hps.data.hop_length, + hps.data.win_length, + hps.data.mel_fmin, + hps.data.mel_fmax, + ) + + y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice + + # Discriminator + y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) + with autocast(device_type=device.type, enabled=False): + loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( + y_d_hat_r, + y_d_hat_g, ) - global_step += 1 - if epoch % hps.train.save_every_epoch == 0 and rank == 0: - if hps.train.if_save_latest == 0: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "G_{}.pth".format(global_step), - ), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "D_{}.pth".format(global_step), - ), - ) - else: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "G_{}.pth".format(233333333333), - ), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "D_{}.pth".format(233333333333), - ), - ) - if rank == 0 and hps.train.if_save_every_weights == True: + loss_disc_all = loss_disc + + optim_d.zero_grad() + scaler.scale(loss_disc_all).backward() + scaler.unscale_(optim_d) + grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) + scaler.step(optim_d) + + with autocast(device_type=device.type, dtype=torch.float16, enabled=hps.train.fp16_run): + # Generator + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) + with autocast(device_type=device.type, enabled=False): + loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel + loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl + + loss_fm = feature_loss(fmap_r, fmap_g) + loss_gen, losses_gen = generator_loss(y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + kl_ssl * 1 + loss_kl + + optim_g.zero_grad() + scaler.scale(loss_gen_all).backward() + scaler.unscale_(optim_g) + grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) + scaler.step(optim_g) + scaler.update() + + if device.index == 0 and progress is not None and step_task is not None: + progress.advance(step_task, 1) + + if device.index == 0: + if global_step % hps.train.log_interval == 0: + lr = optim_g.param_groups[0]["lr"] + losses = [loss_disc, loss_gen, loss_fm, loss_mel, kl_ssl, loss_kl] + logger.info( + "Train Epoch: {} [{:.0f}%]".format( + epoch, + 100.0 * batch_idx / len(train_loader), + ) + ) + logger.info([x.item() for x in losses] + [global_step, lr]) + + scalar_dict = { + "loss/g/total": loss_gen_all, + "loss/d/total": loss_disc_all, + "learning_rate": lr, + "grad_norm_d": grad_norm_d, + "grad_norm_g": grad_norm_g, + } + scalar_dict.update( + { + "loss/g/fm": loss_fm, + "loss/g/mel": loss_mel, + "loss/g/kl_ssl": kl_ssl, + "loss/g/kl": loss_kl, + } + ) + + # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) + # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) + # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) + image_dict = None + try: # Some people installed the wrong version of matplotlib. + image_dict = { + "slice/mel_org": utils.plot_spectrogram_to_numpy( + y_mel[0].data.cpu().numpy(), + ), + "slice/mel_gen": utils.plot_spectrogram_to_numpy( + y_hat_mel[0].data.cpu().numpy(), + ), + "all/mel": utils.plot_spectrogram_to_numpy( + mel[0].data.cpu().numpy(), + ), + "all/stats_ssl": utils.plot_spectrogram_to_numpy( + stats_ssl[0].data.cpu().numpy(), + ), + } + except Exception as _: + pass + if image_dict: + utils.summarize( + writer=writer, + global_step=global_step, + images=image_dict, + scalars=scalar_dict, + ) + else: + utils.summarize( + writer=writer, + global_step=global_step, + scalars=scalar_dict, + ) + global_step += 1 + + if hps.train.if_save_latest == 0: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join( + f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", + f"G_{global_step}.pth", + ), + logger, + ) + utils.save_checkpoint( + net_d, + optim_d, + hps.train.learning_rate, + epoch, + os.path.join( + "{hps.data.exp_dir}/logs_s2_{hps.model.version}", + "D_{global_step}.pth", + ), + logger, + ) + else: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join( + f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", + "G_233333333333.pth", + ), + logger, + ) + utils.save_checkpoint( + net_d, + optim_d, + hps.train.learning_rate, + epoch, + os.path.join( + f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", + "D_233333333333.pth", + ), + logger, + ) + + if epoch % hps.train.save_every_epoch == 0 and device.index == 0: + if hps.train.if_save_every_weights is True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() - logger.info( - "saving ckpt %s_e%s:%s" - % ( - hps.name, - epoch, - savee( - ckpt, - hps.name + "_e%s_s%s" % (epoch, global_step), - epoch, - global_step, - hps, - model_version=None if hps.model.version not in {"v2Pro", "v2ProPlus"} else hps.model.version, - ), - ) + save_info = save_ckpt( + ckpt, + hps.name + f"_e{epoch}_s{global_step}", + epoch, + global_step, + hps, ) - - if rank == 0: - logger.info("====> Epoch: {}".format(epoch)) + logger.info(f"saving ckpt {hps.name}_e{epoch}:{save_info}") -def evaluate(hps, generator, eval_loader, writer_eval): +def evaluate(hps, generator, eval_loader, writer_eval, device): generator.eval() image_dict = {} audio_dict = {} - print("Evaluating ...") + logger.info("Evaluating ...") with torch.no_grad(): for batch_idx, ( ssl, @@ -595,17 +624,10 @@ def evaluate(hps, generator, eval_loader, writer_eval): text, text_lengths, ) in enumerate(eval_loader): - print(111) - if torch.cuda.is_available(): - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - ssl = ssl.cuda() - text, text_lengths = text.cuda(), text_lengths.cuda() - else: - spec, spec_lengths = spec.to(device), spec_lengths.to(device) - y, y_lengths = y.to(device), y_lengths.to(device) - ssl = ssl.to(device) - text, text_lengths = text.to(device), text_lengths.to(device) + spec, spec_lengths = spec.to(device), spec_lengths.to(device) + y, y_lengths = y.to(device), y_lengths.to(device) + ssl = ssl.to(device) + text, text_lengths = text.to(device), text_lengths.to(device) for test in [0, 1]: y_hat, mask, *_ = ( generator.module.infer( @@ -665,11 +687,6 @@ def evaluate(hps, generator, eval_loader, writer_eval): ) audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, : y_lengths[0]]}) - # y_hat, mask, *_ = generator.module.infer(ssl, spec_lengths, speakers, y=None) - # audio_dict.update({ - # f"gen/audio_{batch_idx}_style_pred": y_hat[0, :, :] - # }) - utils.summarize( writer=writer_eval, global_step=global_step, diff --git a/GPT_SoVITS/s2_train_v3.py b/GPT_SoVITS/s2_train_v3.py index aa8dae7f..414bdd00 100644 --- a/GPT_SoVITS/s2_train_v3.py +++ b/GPT_SoVITS/s2_train_v3.py @@ -1,53 +1,58 @@ -import warnings - -warnings.filterwarnings("ignore") -import os - -import utils - -hps = utils.get_hparams(stage=2) -os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging +import os +import platform +import sys +import warnings +from contextlib import nullcontext +from random import randint import torch import torch.distributed as dist -import torch.multiprocessing as mp -from torch.cuda.amp import GradScaler, autocast +from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn, TimeRemainingColumn +from torch.amp.autocast_mode import autocast +from torch.amp.grad_scaler import GradScaler +from torch.multiprocessing.spawn import spawn from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm +import GPT_SoVITS.utils as utils +from GPT_SoVITS.Accelerate import console, logger +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration +from GPT_SoVITS.module import commons +from GPT_SoVITS.module.data_utils import ( + DistributedBucketSampler, + TextAudioSpeakerCollateV3, + TextAudioSpeakerLoaderV3, +) +from GPT_SoVITS.module.models import SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import save_ckpt + +hps = utils.get_hparams(stage=2) + +warnings.filterwarnings("ignore") logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("h5py").setLevel(logging.INFO) logging.getLogger("numba").setLevel(logging.INFO) -from random import randint - -from module import commons -from module.data_utils import ( - DistributedBucketSampler, -) -from module.data_utils import ( - TextAudioSpeakerCollateV3 as TextAudioSpeakerCollate, -) -from module.data_utils import ( - TextAudioSpeakerLoaderV3 as TextAudioSpeakerLoader, -) -from module.models import ( - SynthesizerTrnV3 as SynthesizerTrn, -) -from process_ckpt import savee torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = False -###反正A100fp32更快,那试试tf32吧 -torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cuda.matmul.allow_tf32 = True ###反正A100fp32更快,那试试tf32吧 torch.backends.cudnn.allow_tf32 = True +torch.set_grad_enabled(True) torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 -# from config import pretrained_s2G,pretrained_s2D + global_step = 0 -device = "cpu" # cuda以外的设备,等mps优化后加入 +if torch.cuda.is_available(): + device_str = "cuda" +elif torch.mps.is_available(): + device_str = "mps" +else: + device_str = "cpu" + + +multigpu = torch.cuda.device_count() > 1 if torch.cuda.is_available() else False def main(): @@ -57,8 +62,10 @@ def main(): n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) + if platform.system() == "Windows": + os.environ["USE_LIBUV"] = "0" - mp.spawn( + spawn( run, nprocs=n_gpus, args=( @@ -70,24 +77,36 @@ def main(): def run(rank, n_gpus, hps): global global_step - if rank == 0: - logger = utils.get_logger(hps.data.exp_dir) - logger.info(hps) - # utils.check_git_hash(hps.s2_ckpt_dir) - writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + device = torch.device(f"{device_str}:{rank}") + + if rank == 0: + logger.add( + os.path.join(hps.data.exp_dir, "train.log"), + level="INFO", + enqueue=True, + backtrace=True, + diagnose=True, + format="{time:YY-MM-DD HH:mm:ss}\t{name}\t{level}\t{message}", + ) + console.print(hps.to_dict()) + writer: SummaryWriter | None = SummaryWriter(log_dir=hps.s2_ckpt_dir) + writer_eval: SummaryWriter | None = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + else: + writer = writer_eval = None + + if multigpu: + dist.init_process_group( + backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", + init_method="env://", + world_size=n_gpus, + rank=rank, + ) - dist.init_process_group( - backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", - init_method="env://?use_libuv=False", - world_size=n_gpus, - rank=rank, - ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) - train_dataset = TextAudioSpeakerLoader(hps.data) ######## + train_dataset = TextAudioSpeakerLoaderV3(hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, @@ -101,21 +120,12 @@ def run(rank, n_gpus, hps): 800, 900, 1000, - # 1100, - # 1200, - # 1300, - # 1400, - # 1500, - # 1600, - # 1700, - # 1800, - # 1900, ], num_replicas=n_gpus, rank=rank, shuffle=True, ) - collate_fn = TextAudioSpeakerCollate() + collate_fn = TextAudioSpeakerCollateV3() train_loader = DataLoader( train_dataset, num_workers=6, @@ -127,153 +137,135 @@ def run(rank, n_gpus, hps): prefetch_factor=4, ) # if rank == 0: - # eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True) + # eval_dataset = TextAudioSpeakerLoaderV3(hps.data.validation_files, hps.data, val=True) # eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, # batch_size=1, pin_memory=True, # drop_last=False, collate_fn=collate_fn) - net_g = ( - SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, - ).cuda(rank) - if torch.cuda.is_available() - else SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, - ).to(device) - ) - - # net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device) - # for name, param in net_g.named_parameters(): - # if not param.requires_grad: - # print(name, "not requires_grad") + net_g = SynthesizerTrnV3( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model, + ).to(device) optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), ###默认所有层lr一致 + filter(lambda p: p.requires_grad, net_g.parameters()), # 默认所有层lr一致 hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) - # optim_d = torch.optim.AdamW( - # net_d.parameters(), - # hps.train.learning_rate, - # betas=hps.train.betas, - # eps=hps.train.eps, - # ) - if torch.cuda.is_available(): - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + + if multigpu: + net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # type: ignore else: - net_g = net_g.to(device) - # net_d = net_d.to(device) + pass try: # 如果能加载自动resume - # _, _, _, epoch_str = utils.load_checkpoint( - # utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_*.pth"), - # net_d, - # optim_d, - # ) # D多半加载没事 - # if rank == 0: - # logger.info("loaded D") - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_*.pth"), + utils.latest_checkpoint_path(f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", "G_*.pth"), net_g, optim_g, ) epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) - # epoch_str = 1 - # global_step = 0 - except: # 如果首次不能加载,加载pretrain - # traceback.print_exc() + except Exception: # 如果首次不能加载,加载pretrain epoch_str = 1 global_step = 0 if ( hps.train.pretrained_s2G != "" - and hps.train.pretrained_s2G != None + and hps.train.pretrained_s2G is not None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: - logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) - print( - "loaded pretrained %s" % hps.train.pretrained_s2G, - net_g.module.load_state_dict( + logger.info(f"loaded pretrained {hps.train.pretrained_s2G}") + console.print( + f"loaded pretrained %{hps.train.pretrained_s2G}", + net_g.module.load_state_dict( # type: ignore torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ) - if torch.cuda.is_available() + if multigpu else net_g.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ), - ) ##测试不加载优化器 - # if hps.train.pretrained_s2D != ""and hps.train.pretrained_s2D != None and os.path.exists(hps.train.pretrained_s2D): - # if rank == 0: - # logger.info("loaded pretrained %s" % hps.train.pretrained_s2D) - # print( - # net_d.module.load_state_dict( - # torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] - # ) if torch.cuda.is_available() else net_d.load_state_dict( - # torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] - # ) - # ) - - # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) + ) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=-1) - # scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - # optim_d, gamma=hps.train.lr_decay, last_epoch=-1 - # ) + for _ in range(epoch_str): scheduler_g.step() - # scheduler_d.step() - scaler = GradScaler(enabled=hps.train.fp16_run) + scaler = GradScaler(device=device.type, enabled=hps.train.fp16_run) net_d = optim_d = scheduler_d = None - print("start training from epoch %s" % epoch_str) - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - # [train_loader, eval_loader], logger, [writer, writer_eval]) - [train_loader, None], - logger, - [writer, writer_eval], + if rank == 0: + logger.info(f"start training from epoch {epoch_str}") + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + TimeElapsedColumn(), + console=console, + redirect_stderr=True, + redirect_stdout=True, + ) + if rank == 0 + else nullcontext() as progress + ): + if isinstance(progress, Progress): + epoch_task: TaskID | None = progress.add_task( + "Epoch", + total=int(hps.train.epochs), + completed=int(epoch_str) - 1, ) else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - ) - scheduler_g.step() - # scheduler_d.step() - print("training done") + epoch_task = step_task = None + + for epoch in range(epoch_str, hps.train.epochs + 1): + if rank == 0: + assert epoch_task is not None + assert progress is not None + progress.advance(epoch_task, 1) + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + (train_loader, None), + logger, + (writer, writer_eval), + ) + progress.advance(epoch_task, 1) + else: + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + (train_loader, None), + None, + (None, None), + ) + scheduler_g.step() + if rank == 0: + assert progress + progress.stop() + logger.info("Training Done") + sys.exit(0) def train_and_evaluate( - rank, + device: torch.device, epoch, hps, nets, @@ -288,179 +280,129 @@ def train_and_evaluate( optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers + writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() - # net_d.train() - # for batch_idx, ( - # ssl, - # ssl_lengths, - # spec, - # spec_lengths, - # y, - # y_lengths, - # text, - # text_lengths, - # ) in enumerate(tqdm(train_loader)): - for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( - tqdm(train_loader) + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + redirect_stderr=True, + redirect_stdout=True, + transient=not (int(epoch) == int(hps.train.epochs)), + ) + if device.index == 0 + else nullcontext() as progress ): - if torch.cuda.is_available(): - spec, spec_lengths = ( - spec.cuda( - rank, - non_blocking=True, - ), - spec_lengths.cuda( - rank, - non_blocking=True, - ), - ) - mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(rank, non_blocking=True) - ssl = ssl.cuda(rank, non_blocking=True) - ssl.requires_grad = False - # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = ( - text.cuda( - rank, - non_blocking=True, - ), - text_lengths.cuda( - rank, - non_blocking=True, - ), - ) + if isinstance(progress, Progress): + step_task: TaskID | None = progress.add_task("Steps", total=len(train_loader)) else: - spec, spec_lengths = spec.to(device), spec_lengths.to(device) - mel, mel_lengths = mel.to(device), mel_lengths.to(device) - ssl = ssl.to(device) + step_task = None + + for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( + train_loader + ): + spec, spec_lengths = spec.to(device, non_blocking=True), spec_lengths.to(device, non_blocking=True) + mel, mel_lengths = mel.to(device, non_blocking=True), mel_lengths.to(device, non_blocking=True) + ssl = ssl.to(device, non_blocking=True) ssl.requires_grad = False # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = text.to(device), text_lengths.to(device) + text, text_lengths = text.to(device, non_blocking=True), text_lengths.to(device, non_blocking=True) - with autocast(enabled=hps.train.fp16_run): - cfm_loss = net_g( - ssl, - spec, - mel, - ssl_lengths, - spec_lengths, - text, - text_lengths, - mel_lengths, - use_grad_ckpt=hps.train.grad_ckpt, - ) - loss_gen_all = cfm_loss - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() + with autocast(device_type=device.type, dtype=torch.float16, enabled=hps.train.fp16_run): + cfm_loss = net_g( + ssl, + spec, + mel, + ssl_lengths, + spec_lengths, + text, + text_lengths, + mel_lengths, + use_grad_ckpt=hps.train.grad_ckpt, + ) + loss_gen_all = cfm_loss + optim_g.zero_grad() + scaler.scale(loss_gen_all).backward() + scaler.unscale_(optim_g) + grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) + scaler.step(optim_g) + scaler.update() - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - # losses = [commit_loss,cfm_loss,mel_loss,loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] - losses = [cfm_loss] - logger.info( - "Train Epoch: {} [{:.0f}%]".format( - epoch, - 100.0 * batch_idx / len(train_loader), + if device.index == 0 and progress is not None and step_task is not None: + progress.advance(step_task, 1) + + if device.index == 0: + if global_step % hps.train.log_interval == 0: + lr = optim_g.param_groups[0]["lr"] + # losses = [commit_loss,cfm_loss,mel_loss,loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] + losses = [cfm_loss] + logger.info( + "Train Epoch: {} [{:.0f}%]".format( + epoch, + 100.0 * batch_idx / len(train_loader), + ) ) - ) - logger.info([x.item() for x in losses] + [global_step, lr]) + logger.info([x.item() for x in losses] + [global_step, lr]) - scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} - # image_dict = { - # "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - # "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - # "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - # "all/stats_ssl": utils.plot_spectrogram_to_numpy(stats_ssl[0].data.cpu().numpy()), - # } - utils.summarize( - writer=writer, - global_step=global_step, - # images=image_dict, - scalars=scalar_dict, - ) + scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} + utils.summarize( + writer=writer, + global_step=global_step, + # images=image_dict, + scalars=scalar_dict, + ) - # if global_step % hps.train.eval_interval == 0: - # # evaluate(hps, net_g, eval_loader, writer_eval) - # utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "G_{}.pth".format(global_step)),scaler) - # # utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "D_{}.pth".format(global_step)),scaler) - # # keep_ckpts = getattr(hps.train, 'keep_ckpts', 3) - # # if keep_ckpts > 0: - # # utils.clean_checkpoints(path_to_models=hps.s2_ckpt_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) + global_step += 1 - global_step += 1 - if epoch % hps.train.save_every_epoch == 0 and rank == 0: - if hps.train.if_save_latest == 0: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "G_{}.pth".format(global_step), - ), - ) - # utils.save_checkpoint( - # net_d, - # optim_d, - # hps.train.learning_rate, - # epoch, - # os.path.join( - # "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(global_step) - # ), - # ) - else: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join( - "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), - "G_{}.pth".format(233333333333), - ), - ) - # utils.save_checkpoint( - # net_d, - # optim_d, - # hps.train.learning_rate, - # epoch, - # os.path.join( - # "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(233333333333) - # ), - # ) - if rank == 0 and hps.train.if_save_every_weights == True: + if hps.train.if_save_latest == 0: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join( + f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", + f"G_{global_step}.pth", + ), + logger, + ) + + else: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join( + f"{hps.data.exp_dir}/logs_s2_{hps.model.version}", + "G_233333333333.pth", + ), + logger, + ) + + if epoch % hps.train.save_every_epoch == 0 and device.index == 0: + if hps.train.if_save_every_weights is True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() - logger.info( - "saving ckpt %s_e%s:%s" - % ( - hps.name, - epoch, - savee( - ckpt, - hps.name + "_e%s_s%s" % (epoch, global_step), - epoch, - global_step, - hps, - ), - ) + save_info = save_ckpt( + ckpt, + hps.name + f"_e{epoch}_s{global_step}", + epoch, + global_step, + hps, ) - - if rank == 0: - logger.info("====> Epoch: {}".format(epoch)) + logger.info(f"saving ckpt {hps.name}_e{epoch}:{save_info}") if __name__ == "__main__": diff --git a/GPT_SoVITS/s2_train_v3_lora.py b/GPT_SoVITS/s2_train_v3_lora.py index ba9e4ed4..2186a467 100644 --- a/GPT_SoVITS/s2_train_v3_lora.py +++ b/GPT_SoVITS/s2_train_v3_lora.py @@ -1,53 +1,67 @@ -import warnings - -warnings.filterwarnings("ignore") -import os - -import utils - -hps = utils.get_hparams(stage=2) -os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging +import os +import platform +import sys +import warnings +from collections import OrderedDict as od +from contextlib import nullcontext +from random import randint +from typing import Any import torch import torch.distributed as dist -import torch.multiprocessing as mp -from torch.cuda.amp import GradScaler, autocast +from peft import LoraConfig, get_peft_model +from rich import print +from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn, TimeRemainingColumn +from torch.amp.autocast_mode import autocast +from torch.amp.grad_scaler import GradScaler +from torch.multiprocessing.spawn import spawn from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm +import GPT_SoVITS.utils as utils +from GPT_SoVITS.Accelerate import console, logger +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration +from GPT_SoVITS.module import commons +from GPT_SoVITS.module.data_utils import ( + DistributedBucketSampler, + TextAudioSpeakerCollateV3, + TextAudioSpeakerCollateV4, + TextAudioSpeakerLoaderV3, + TextAudioSpeakerLoaderV4, +) +from GPT_SoVITS.module.models import SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import save_ckpt + +hps = utils.get_hparams(stage=2) + +warnings.filterwarnings("ignore") logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("h5py").setLevel(logging.INFO) logging.getLogger("numba").setLevel(logging.INFO) -from collections import OrderedDict as od -from random import randint - -from module import commons -from module.data_utils import ( - DistributedBucketSampler, - TextAudioSpeakerCollateV3, - TextAudioSpeakerLoaderV3, - TextAudioSpeakerCollateV4, - TextAudioSpeakerLoaderV4, -) -from module.models import ( - SynthesizerTrnV3 as SynthesizerTrn, -) -from peft import LoraConfig, get_peft_model -from process_ckpt import savee torch.backends.cudnn.benchmark = False -torch.backends.cudnn.deterministic = False -###反正A100fp32更快,那试试tf32吧 +torch.backends.cudnn.deterministic = False # 反正A100fp32更快,那试试tf32吧 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 -# from config import pretrained_s2G,pretrained_s2D -global_step = 0 +torch.set_grad_enabled(True) -device = "cpu" # cuda以外的设备,等mps优化后加入 +global_step = 0 +save_root = "" +no_grad_names: set[Any] = set() +lora_rank = 0 + +if torch.cuda.is_available(): + device_str = "cuda" +elif torch.mps.is_available(): + device_str = "mps" +else: + device_str = "cpu" + + +multigpu = torch.cuda.device_count() > 1 if torch.cuda.is_available() else False def main(): @@ -57,8 +71,10 @@ def main(): n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) + if platform.system() == "Windows": + os.environ["USE_LIBUV"] = "0" - mp.spawn( + spawn( run, nprocs=n_gpus, args=( @@ -69,27 +85,39 @@ def main(): def run(rank, n_gpus, hps): - global global_step, no_grad_names, save_root, lora_rank - if rank == 0: - logger = utils.get_logger(hps.data.exp_dir) - logger.info(hps) - # utils.check_git_hash(hps.s2_ckpt_dir) - writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + global global_step, save_root, no_grad_names, lora_rank + device = torch.device(f"{device_str}:{rank}") + + if rank == 0: + logger.add( + os.path.join(hps.data.exp_dir, "train.log"), + level="INFO", + enqueue=True, + backtrace=True, + diagnose=True, + format="{time:YY-MM-DD HH:mm:ss}\t{name}\t{level}\t{message}", + ) + console.print(hps.to_dict()) + writer: SummaryWriter | None = SummaryWriter(log_dir=hps.s2_ckpt_dir) + writer_eval: SummaryWriter | None = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) + else: + writer = writer_eval = None + + if multigpu: + dist.init_process_group( + backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", + init_method="env://", + world_size=n_gpus, + rank=rank, + ) - dist.init_process_group( - backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", - init_method="env://?use_libuv=False", - world_size=n_gpus, - rank=rank, - ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) TextAudioSpeakerLoader = TextAudioSpeakerLoaderV3 if hps.model.version == "v3" else TextAudioSpeakerLoaderV4 TextAudioSpeakerCollate = TextAudioSpeakerCollateV3 if hps.model.version == "v3" else TextAudioSpeakerCollateV4 - train_dataset = TextAudioSpeakerLoader(hps.data) ######## + train_dataset = TextAudioSpeakerLoader(hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, @@ -103,15 +131,6 @@ def run(rank, n_gpus, hps): 800, 900, 1000, - # 1100, - # 1200, - # 1300, - # 1400, - # 1500, - # 1600, - # 1700, - # 1800, - # 1900, ], num_replicas=n_gpus, rank=rank, @@ -128,7 +147,7 @@ def run(rank, n_gpus, hps): persistent_workers=True, prefetch_factor=4, ) - save_root = "%s/logs_s2_%s_lora_%s" % (hps.data.exp_dir, hps.model.version, hps.train.lora_rank) + save_root = f"{hps.data.exp_dir}/logs_s2_{hps.model.version}_lora_{hps.train.lora_rank}" os.makedirs(save_root, exist_ok=True) lora_rank = int(hps.train.lora_rank) lora_config = LoraConfig( @@ -139,12 +158,12 @@ def run(rank, n_gpus, hps): ) def get_model(hps): - return SynthesizerTrn( + return SynthesizerTrnV3( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, - ) + ).to(device) def get_optim(net_g): return torch.optim.AdamW( @@ -154,19 +173,18 @@ def run(rank, n_gpus, hps): eps=hps.train.eps, ) - def model2cuda(net_g, rank): - if torch.cuda.is_available(): - net_g = DDP(net_g.cuda(rank), device_ids=[rank], find_unused_parameters=True) + def model2DDP(net_g, rank): + if multigpu: + net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) else: - net_g = net_g.to(device) + pass return net_g try: # 如果能加载自动resume net_g = get_model(hps) net_g.cfm = get_peft_model(net_g.cfm, lora_config) - net_g = model2cuda(net_g, rank) + net_g = model2DDP(net_g, rank) optim_g = get_optim(net_g) - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(save_root, "G_*.pth"), net_g, @@ -174,205 +192,221 @@ def run(rank, n_gpus, hps): ) epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) - except: # 如果首次不能加载,加载pretrain - # traceback.print_exc() + except Exception: # 如果首次不能加载,加载pretrain epoch_str = 1 global_step = 0 net_g = get_model(hps) if ( hps.train.pretrained_s2G != "" - and hps.train.pretrained_s2G != None + and hps.train.pretrained_s2G is not None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: - logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) - print( - "loaded pretrained %s" % hps.train.pretrained_s2G, + logger.info(f"loaded pretrained {hps.train.pretrained_s2G}") + console.print( + f"loaded pretrained {hps.train.pretrained_s2G}", net_g.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ), ) net_g.cfm = get_peft_model(net_g.cfm, lora_config) - net_g = model2cuda(net_g, rank) + net_g = model2DDP(net_g, rank) optim_g = get_optim(net_g) no_grad_names = set() for name, param in net_g.named_parameters(): if not param.requires_grad: no_grad_names.add(name.replace("module.", "")) - # print(name, "not requires_grad") - # print(no_grad_names) - # os._exit(233333) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=-1) for _ in range(epoch_str): scheduler_g.step() - scaler = GradScaler(enabled=hps.train.fp16_run) + scaler = GradScaler(device=device.type, enabled=hps.train.fp16_run) net_d = optim_d = scheduler_d = None - print("start training from epoch %s" % epoch_str) - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - # [train_loader, eval_loader], logger, [writer, writer_eval]) - [train_loader, None], - logger, - [writer, writer_eval], + print(f"start training from epoch {epoch_str}") + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + TimeElapsedColumn(), + console=console, + redirect_stderr=True, + redirect_stdout=True, + ) + if rank == 0 + else nullcontext() as progress + ): + if isinstance(progress, Progress): + epoch_task: TaskID | None = progress.add_task( + "Epoch", + total=int(hps.train.epochs), + completed=int(epoch_str) - 1, ) else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - ) - scheduler_g.step() - print("training done") + epoch_task = step_task = None + + for epoch in range(epoch_str, hps.train.epochs + 1): + if rank == 0: + assert epoch_task is not None + assert progress is not None + progress.advance(epoch_task, 1) + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + (train_loader, None), + logger, + (writer, writer_eval), + ) + else: + train_and_evaluate( + device, + epoch, + hps, + (net_g, net_d), + (optim_g, optim_d), + (scheduler_g, scheduler_d), + scaler, + (train_loader, None), + None, + (None, None), + ) + scheduler_g.step() + if rank == 0: + assert progress + progress.stop() + logger.info("Training Done") + sys.exit(0) -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): +def train_and_evaluate( + device: torch.device, + epoch, + hps, + nets, + optims, + schedulers, + scaler, + loaders, + logger, + writers, +): net_g, net_d = nets optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers + writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() - for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( - tqdm(train_loader) + + with ( + Progress( + TextColumn("[cyan]{task.description}"), + BarColumn(), + TextColumn("{task.completed}/{task.total}"), + SpeedColumnIteration(show_speed=True), + TimeRemainingColumn(elapsed_when_finished=True), + console=console, + redirect_stderr=True, + redirect_stdout=True, + transient=not (int(epoch) == int(hps.train.epochs)), + ) + if device.index == 0 + else nullcontext() as progress ): - if torch.cuda.is_available(): - spec, spec_lengths = ( - spec.cuda( - rank, - non_blocking=True, - ), - spec_lengths.cuda( - rank, - non_blocking=True, - ), - ) - mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(rank, non_blocking=True) - ssl = ssl.cuda(rank, non_blocking=True) - ssl.requires_grad = False - text, text_lengths = ( - text.cuda( - rank, - non_blocking=True, - ), - text_lengths.cuda( - rank, - non_blocking=True, - ), - ) + if isinstance(progress, Progress): + step_task: TaskID | None = progress.add_task("Steps", total=len(train_loader)) else: - spec, spec_lengths = spec.to(device), spec_lengths.to(device) - mel, mel_lengths = mel.to(device), mel_lengths.to(device) - ssl = ssl.to(device) + step_task = None + + for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( + train_loader + ): + spec, spec_lengths = spec.to(device, non_blocking=True), spec_lengths.to(device, non_blocking=True) + mel, mel_lengths = mel.to(device, non_blocking=True), mel_lengths.to(device, non_blocking=True) + ssl = ssl.to(device, non_blocking=True) ssl.requires_grad = False - text, text_lengths = text.to(device), text_lengths.to(device) + text, text_lengths = text.to(device, non_blocking=True), text_lengths.to(device, non_blocking=True) - with autocast(enabled=hps.train.fp16_run): - cfm_loss = net_g( - ssl, - spec, - mel, - ssl_lengths, - spec_lengths, - text, - text_lengths, - mel_lengths, - use_grad_ckpt=hps.train.grad_ckpt, - ) - loss_gen_all = cfm_loss - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - losses = [cfm_loss] - logger.info("Train Epoch: {} [{:.0f}%]".format(epoch, 100.0 * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} - utils.summarize( - writer=writer, - global_step=global_step, - scalars=scalar_dict, + with autocast(device_type=device.type, dtype=torch.float16, enabled=hps.train.fp16_run): + cfm_loss = net_g( + ssl, + spec, + mel, + ssl_lengths, + spec_lengths, + text, + text_lengths, + mel_lengths, + use_grad_ckpt=hps.train.grad_ckpt, ) + loss_gen_all = cfm_loss + optim_g.zero_grad() + scaler.scale(loss_gen_all).backward() + scaler.unscale_(optim_g) + grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) + scaler.step(optim_g) + scaler.update() - global_step += 1 - if epoch % hps.train.save_every_epoch == 0 and rank == 0: - if hps.train.if_save_latest == 0: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(save_root, "G_{}.pth".format(global_step)), - ) - else: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(save_root, "G_{}.pth".format(233333333333)), - ) - if rank == 0 and hps.train.if_save_every_weights == True: + if device.index == 0 and progress is not None and step_task is not None: + progress.advance(step_task, 1) + + if device.index == 0: + if global_step % hps.train.log_interval == 0: + lr = optim_g.param_groups[0]["lr"] + losses = [cfm_loss] + logger.info("Train Epoch: {} [{:.0f}%]".format(epoch, 100.0 * batch_idx / len(train_loader))) + logger.info([x.item() for x in losses] + [global_step, lr]) + + scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} + utils.summarize( + writer=writer, + global_step=global_step, + scalars=scalar_dict, + ) + + global_step += 1 + + if hps.train.if_save_latest == 0: + utils.save_checkpoint( + net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(save_root, f"G_{global_step}.pth"), logger + ) + else: + utils.save_checkpoint( + net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(save_root, "G_233333333333.pth"), logger + ) + + if epoch % hps.train.save_every_epoch == 0 and device.index == 0: + if hps.train.if_save_every_weights is True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() sim_ckpt = od() for key in ckpt: - # if "cfm"not in key: - # print(key) if key not in no_grad_names: sim_ckpt[key] = ckpt[key].half().cpu() - logger.info( - "saving ckpt %s_e%s:%s" - % ( - hps.name, - epoch, - savee( - sim_ckpt, - hps.name + "_e%s_s%s_l%s" % (epoch, global_step, lora_rank), - epoch, - global_step, - hps, - model_version=hps.model.version, - lora_rank=lora_rank, - ), - ) + save_info = save_ckpt( + sim_ckpt, + hps.name + f"_e{epoch}_s{global_step}_l{lora_rank}", + epoch, + global_step, + hps, + lora_rank=lora_rank, ) - - if rank == 0: - logger.info("====> Epoch: {}".format(epoch)) + logger.info(f"saving ckpt {hps.name}_e{epoch}:{save_info}") if __name__ == "__main__": diff --git a/GPT_SoVITS/sv.py b/GPT_SoVITS/sv.py index 22e70369..0c084c1d 100644 --- a/GPT_SoVITS/sv.py +++ b/GPT_SoVITS/sv.py @@ -1,32 +1,29 @@ -import sys -import os import torch -sys.path.append(f"{os.getcwd()}/GPT_SoVITS/eres2net") +from GPT_SoVITS.eres2net import kaldi +from GPT_SoVITS.eres2net.ERes2NetV2 import ERes2NetV2 + sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt" -from ERes2NetV2 import ERes2NetV2 -import kaldi as Kaldi class SV: def __init__(self, device, is_half): - pretrained_state = torch.load(sv_path, map_location="cpu", weights_only=False) + pretrained_state = torch.load(sv_path, map_location="cpu") embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4) embedding_model.load_state_dict(pretrained_state) embedding_model.eval() self.embedding_model = embedding_model - if is_half == False: + if is_half is False: self.embedding_model = self.embedding_model.to(device) else: self.embedding_model = self.embedding_model.half().to(device) self.is_half = is_half - def compute_embedding3(self, wav): - with torch.no_grad(): - if self.is_half == True: - wav = wav.half() - feat = torch.stack( - [Kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav] - ) - sv_emb = self.embedding_model.forward3(feat) + def compute_embedding(self, wav): + if self.is_half is True: + wav = wav.half() + feat = torch.stack( + [kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav] + ) + sv_emb = self.embedding_model.forward3(feat) return sv_emb diff --git a/GPT_SoVITS/text/LangSegmenter/__init__.py b/GPT_SoVITS/text/LangSegmenter/__init__.py index 0a764905..6ff277a7 100644 --- a/GPT_SoVITS/text/LangSegmenter/__init__.py +++ b/GPT_SoVITS/text/LangSegmenter/__init__.py @@ -1 +1,3 @@ from .langsegmenter import LangSegmenter + +__all__ = ["LangSegmenter"] diff --git a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py index 99b3a422..db1512dc 100644 --- a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py +++ b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py @@ -1,40 +1,42 @@ import logging import re +from pathlib import Path -# jieba静音 +import fast_langdetect import jieba +from split_lang import LangSplitter + jieba.setLogLevel(logging.CRITICAL) -# 更改fast_langdetect大模型位置 -from pathlib import Path -import fast_langdetect -fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector(fast_langdetect.infer.LangDetectConfig(cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect")) - -from split_lang import LangSplitter +fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector( + fast_langdetect.infer.LangDetectConfig( + cache_dir=str(Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect") + ) +) def full_en(text): - pattern = r'^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$' + pattern = r"^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$" return bool(re.match(pattern, text)) def full_cjk(text): # 来自wiki cjk_ranges = [ - (0x4E00, 0x9FFF), # CJK Unified Ideographs - (0x3400, 0x4DB5), # CJK Extension A - (0x20000, 0x2A6DD), # CJK Extension B - (0x2A700, 0x2B73F), # CJK Extension C - (0x2B740, 0x2B81F), # CJK Extension D - (0x2B820, 0x2CEAF), # CJK Extension E - (0x2CEB0, 0x2EBEF), # CJK Extension F - (0x30000, 0x3134A), # CJK Extension G - (0x31350, 0x323AF), # CJK Extension H - (0x2EBF0, 0x2EE5D), # CJK Extension H + (0x4E00, 0x9FFF), # CJK Unified Ideographs + (0x3400, 0x4DB5), # CJK Extension A + (0x20000, 0x2A6DD), # CJK Extension B + (0x2A700, 0x2B73F), # CJK Extension C + (0x2B740, 0x2B81F), # CJK Extension D + (0x2B820, 0x2CEAF), # CJK Extension E + (0x2CEB0, 0x2EBEF), # CJK Extension F + (0x30000, 0x3134A), # CJK Extension G + (0x31350, 0x323AF), # CJK Extension H + (0x2EBF0, 0x2EE5D), # CJK Extension H ] - pattern = r'[0-9、-〜。!?.!?… /]+$' + pattern = r"[0-9、-〜。!?.!?… /]+$" cjk_text = "" for char in text: @@ -45,7 +47,7 @@ def full_cjk(text): return cjk_text -def split_jako(tag_lang,item): +def split_jako(tag_lang, item): if tag_lang == "ja": pattern = r"([\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]+(?:[0-9、-〜。!?.!?… ]+[\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]*)*)" else: @@ -53,41 +55,42 @@ def split_jako(tag_lang,item): lang_list: list[dict] = [] tag = 0 - for match in re.finditer(pattern, item['text']): + for match in re.finditer(pattern, item["text"]): if match.start() > tag: - lang_list.append({'lang':item['lang'],'text':item['text'][tag:match.start()]}) + lang_list.append({"lang": item["lang"], "text": item["text"][tag : match.start()]}) tag = match.end() - lang_list.append({'lang':tag_lang,'text':item['text'][match.start():match.end()]}) + lang_list.append({"lang": tag_lang, "text": item["text"][match.start() : match.end()]}) - if tag < len(item['text']): - lang_list.append({'lang':item['lang'],'text':item['text'][tag:len(item['text'])]}) + if tag < len(item["text"]): + lang_list.append({"lang": item["lang"], "text": item["text"][tag : len(item["text"])]}) return lang_list def merge_lang(lang_list, item): - if lang_list and item['lang'] == lang_list[-1]['lang']: - lang_list[-1]['text'] += item['text'] + if lang_list and item["lang"] == lang_list[-1]["lang"]: + lang_list[-1]["text"] += item["text"] else: lang_list.append(item) return lang_list -class LangSegmenter(): +class LangSegmenter: # 默认过滤器, 基于gsv目前四种语言 DEFAULT_LANG_MAP = { "zh": "zh", "yue": "zh", # 粤语 "wuu": "zh", # 吴语 "zh-cn": "zh", - "zh-tw": "x", # 繁体设置为x + "zh-tw": "x", # 繁体设置为x "ko": "ko", "ja": "ja", "en": "en", } - def getTexts(text,default_lang = ""): + @staticmethod + def getTexts(text, default_lang=""): lang_splitter = LangSplitter(lang_map=LangSegmenter.DEFAULT_LANG_MAP) lang_splitter.merge_across_digit = False substr = lang_splitter.split_by_lang(text=text) @@ -97,31 +100,31 @@ class LangSegmenter(): have_num = False for _, item in enumerate(substr): - dict_item = {'lang':item.lang,'text':item.text} + dict_item = {"lang": item.lang, "text": item.text} - if dict_item['lang'] == 'digit': + if dict_item["lang"] == "digit": if default_lang != "": - dict_item['lang'] = default_lang + dict_item["lang"] = default_lang else: have_num = True - lang_list = merge_lang(lang_list,dict_item) + lang_list = merge_lang(lang_list, dict_item) continue # 处理短英文被识别为其他语言的问题 - if full_en(dict_item['text']): - dict_item['lang'] = 'en' - lang_list = merge_lang(lang_list,dict_item) + if full_en(dict_item["text"]): + dict_item["lang"] = "en" + lang_list = merge_lang(lang_list, dict_item) continue if default_lang != "": - dict_item['lang'] = default_lang - lang_list = merge_lang(lang_list,dict_item) + dict_item["lang"] = default_lang + lang_list = merge_lang(lang_list, dict_item) continue else: # 处理非日语夹日文的问题(不包含CJK) ja_list: list[dict] = [] - if dict_item['lang'] != 'ja': - ja_list = split_jako('ja',dict_item) + if dict_item["lang"] != "ja": + ja_list = split_jako("ja", dict_item) if not ja_list: ja_list.append(dict_item) @@ -130,8 +133,8 @@ class LangSegmenter(): ko_list: list[dict] = [] temp_list: list[dict] = [] for _, ko_item in enumerate(ja_list): - if ko_item["lang"] != 'ko': - ko_list = split_jako('ko',ko_item) + if ko_item["lang"] != "ko": + ko_list = split_jako("ko", ko_item) if ko_list: temp_list.extend(ko_list) @@ -141,77 +144,76 @@ class LangSegmenter(): # 未存在非日韩文夹日韩文 if len(temp_list) == 1: # 未知语言检查是否为CJK - if dict_item['lang'] == 'x': - cjk_text = full_cjk(dict_item['text']) + if dict_item["lang"] == "x": + cjk_text = full_cjk(dict_item["text"]) if cjk_text: - dict_item = {'lang':'zh','text':cjk_text} - lang_list = merge_lang(lang_list,dict_item) + dict_item = {"lang": "zh", "text": cjk_text} + lang_list = merge_lang(lang_list, dict_item) else: - lang_list = merge_lang(lang_list,dict_item) + lang_list = merge_lang(lang_list, dict_item) continue else: - lang_list = merge_lang(lang_list,dict_item) + lang_list = merge_lang(lang_list, dict_item) continue # 存在非日韩文夹日韩文 for _, temp_item in enumerate(temp_list): # 未知语言检查是否为CJK - if temp_item['lang'] == 'x': - cjk_text = full_cjk(temp_item['text']) + if temp_item["lang"] == "x": + cjk_text = full_cjk(temp_item["text"]) if cjk_text: - lang_list = merge_lang(lang_list,{'lang':'zh','text':cjk_text}) + lang_list = merge_lang(lang_list, {"lang": "zh", "text": cjk_text}) else: - lang_list = merge_lang(lang_list,temp_item) + lang_list = merge_lang(lang_list, temp_item) else: - lang_list = merge_lang(lang_list,temp_item) + lang_list = merge_lang(lang_list, temp_item) # 有数字 if have_num: temp_list = lang_list lang_list = [] for i, temp_item in enumerate(temp_list): - if temp_item['lang'] == 'digit': + if temp_item["lang"] == "digit": if default_lang: - temp_item['lang'] = default_lang + temp_item["lang"] = default_lang elif lang_list and i == len(temp_list) - 1: - temp_item['lang'] = lang_list[-1]['lang'] + temp_item["lang"] = lang_list[-1]["lang"] elif not lang_list and i < len(temp_list) - 1: - temp_item['lang'] = temp_list[1]['lang'] + temp_item["lang"] = temp_list[1]["lang"] elif lang_list and i < len(temp_list) - 1: - if lang_list[-1]['lang'] == temp_list[i + 1]['lang']: - temp_item['lang'] = lang_list[-1]['lang'] - elif lang_list[-1]['text'][-1] in [",",".","!","?",",","。","!","?"]: - temp_item['lang'] = temp_list[i + 1]['lang'] - elif temp_list[i + 1]['text'][0] in [",",".","!","?",",","。","!","?"]: - temp_item['lang'] = lang_list[-1]['lang'] - elif temp_item['text'][-1] in ["。","."]: - temp_item['lang'] = lang_list[-1]['lang'] - elif len(lang_list[-1]['text']) >= len(temp_list[i + 1]['text']): - temp_item['lang'] = lang_list[-1]['lang'] + if lang_list[-1]["lang"] == temp_list[i + 1]["lang"]: + temp_item["lang"] = lang_list[-1]["lang"] + elif lang_list[-1]["text"][-1] in [",", ".", "!", "?", ",", "。", "!", "?"]: + temp_item["lang"] = temp_list[i + 1]["lang"] + elif temp_list[i + 1]["text"][0] in [",", ".", "!", "?", ",", "。", "!", "?"]: + temp_item["lang"] = lang_list[-1]["lang"] + elif temp_item["text"][-1] in ["。", "."]: + temp_item["lang"] = lang_list[-1]["lang"] + elif len(lang_list[-1]["text"]) >= len(temp_list[i + 1]["text"]): + temp_item["lang"] = lang_list[-1]["lang"] else: - temp_item['lang'] = temp_list[i + 1]['lang'] + temp_item["lang"] = temp_list[i + 1]["lang"] else: - temp_item['lang'] = 'zh' - - lang_list = merge_lang(lang_list,temp_item) + temp_item["lang"] = "zh" + lang_list = merge_lang(lang_list, temp_item) # 筛X temp_list = lang_list lang_list = [] for _, temp_item in enumerate(temp_list): - if temp_item['lang'] == 'x': + if temp_item["lang"] == "x": if lang_list: - temp_item['lang'] = lang_list[-1]['lang'] + temp_item["lang"] = lang_list[-1]["lang"] elif len(temp_list) > 1: - temp_item['lang'] = temp_list[1]['lang'] + temp_item["lang"] = temp_list[1]["lang"] else: - temp_item['lang'] = 'zh' + temp_item["lang"] = "zh" - lang_list = merge_lang(lang_list,temp_item) + lang_list = merge_lang(lang_list, temp_item) return lang_list - + if __name__ == "__main__": text = "MyGO?,你也喜欢まいご吗?" @@ -221,5 +223,5 @@ if __name__ == "__main__": print(LangSegmenter.getTexts(text)) text = "当时ThinkPad T60刚刚发布,一同推出的还有一款名为Advanced Dock的扩展坞配件。这款扩展坞通过连接T60底部的插槽,扩展出包括PCIe在内的一大堆接口,并且自带电源,让T60可以安装桌面显卡来提升性能。" - print(LangSegmenter.getTexts(text,"zh")) - print(LangSegmenter.getTexts(text)) \ No newline at end of file + print(LangSegmenter.getTexts(text, "zh")) + print(LangSegmenter.getTexts(text)) diff --git a/GPT_SoVITS/text/__init__.py b/GPT_SoVITS/text/__init__.py index 82df1fbb..e12e98ba 100644 --- a/GPT_SoVITS/text/__init__.py +++ b/GPT_SoVITS/text/__init__.py @@ -1,11 +1,11 @@ import os -# if os.environ.get("version","v1")=="v1": -# from text.symbols import symbols -# else: -# from text.symbols2 import symbols +import warnings -from text import symbols as symbols_v1 -from text import symbols2 as symbols_v2 +from . import symbols as symbols_v1 +from . import symbols2 as symbols_v2 + +warnings.filterwarnings("ignore", category=UserWarning, module="jieba_fast._compat") +warnings.filterwarnings("ignore", category=UserWarning, module="jieba._compat") _symbol_to_id_v1 = {s: i for i, s in enumerate(symbols_v1.symbols)} _symbol_to_id_v2 = {s: i for i, s in enumerate(symbols_v2.symbols)} diff --git a/GPT_SoVITS/text/cantonese.py b/GPT_SoVITS/text/cantonese.py index 1f07c414..6e4dfc39 100644 --- a/GPT_SoVITS/text/cantonese.py +++ b/GPT_SoVITS/text/cantonese.py @@ -1,13 +1,17 @@ # reference: https://huggingface.co/spaces/Naozumi0512/Bert-VITS2-Cantonese-Yue/blob/main/text/chinese.py import re + import cn2an import ToJyutping -from text.symbols import punctuation -from text.zh_normalization.text_normlization import TextNormalizer +from .symbols import punctuation +from .zh_normalization.text_normlization import TextNormalizer + + +def normalizer(x): + return cn2an.transform(x, "an2cn") -normalizer = lambda x: cn2an.transform(x, "an2cn") INITIALS = [ "aa", @@ -194,12 +198,6 @@ def get_jyutping(text): return jyutping_array -def get_bert_feature(text, word2ph): - from text import chinese_bert - - return chinese_bert.get_bert_feature(text, word2ph) - - def g2p(text): # word2ph = [] jyuping = get_jyutping(text) diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index 944c9cb7..2c134f1a 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -1,14 +1,22 @@ +import logging import os import re import cn2an -from pypinyin import lazy_pinyin, Style +import jieba_fast +import jieba_fast.posseg as psg +from pypinyin import Style, lazy_pinyin -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi -from text.zh_normalization.text_normlization import TextNormalizer +from .symbols import punctuation +from .tone_sandhi import ToneSandhi +from .zh_normalization.text_normlization import TextNormalizer + +jieba_fast.setLogLevel(logging.CRITICAL) + + +def normalizer(x): + return cn2an.transform(x, "an2cn") -normalizer = lambda x: cn2an.transform(x, "an2cn") current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { @@ -16,12 +24,6 @@ pinyin_to_symbol_map = { for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() } -import jieba_fast -import logging - -jieba_fast.setLogLevel(logging.CRITICAL) -import jieba_fast.posseg as psg - rep_map = { ":": ",", diff --git a/GPT_SoVITS/text/chinese2.py b/GPT_SoVITS/text/chinese2.py index dcce0d96..3730cae5 100644 --- a/GPT_SoVITS/text/chinese2.py +++ b/GPT_SoVITS/text/chinese2.py @@ -1,15 +1,24 @@ +import logging import os import re import cn2an -from pypinyin import lazy_pinyin, Style +import jieba_fast +import jieba_fast.posseg as psg +from pypinyin import Style, lazy_pinyin from pypinyin.contrib.tone_convert import to_finals_tone3, to_initials -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi -from text.zh_normalization.text_normlization import TextNormalizer +from .g2pw import G2PWPinyin, correct_pronunciation +from .symbols import punctuation +from .tone_sandhi import ToneSandhi +from .zh_normalization.text_normlization import TextNormalizer + +jieba_fast.setLogLevel(logging.CRITICAL) + + +def normalizer(x): + return cn2an.transform(x, "an2cn") -normalizer = lambda x: cn2an.transform(x, "an2cn") current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { @@ -17,20 +26,11 @@ pinyin_to_symbol_map = { for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() } -import jieba_fast -import logging -jieba_fast.setLogLevel(logging.CRITICAL) -import jieba_fast.posseg as psg +parent_directory = os.path.dirname(current_file_path) -# is_g2pw_str = os.environ.get("is_g2pw", "True")##默认开启 -# is_g2pw = False#True if is_g2pw_str.lower() == 'true' else False -is_g2pw = True # True if is_g2pw_str.lower() == 'true' else False +is_g2pw = True if is_g2pw: - # print("当前使用g2pw进行拼音推理") - from text.g2pw import G2PWPinyin, correct_pronunciation - - parent_directory = os.path.dirname(current_file_path) g2pw = G2PWPinyin( model_dir="GPT_SoVITS/text/G2PWModel", model_source=os.environ.get("bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"), @@ -139,7 +139,7 @@ not_erhua = { } -def _merge_erhua(initials: list[str], finals: list[str], word: str, pos: str) -> list[list[str]]: +def _merge_erhua(initials: list[str], finals: list[str], word: str, pos: str) -> tuple[list[str], list[str]]: """ Do erhub. """ diff --git a/GPT_SoVITS/text/cleaner.py b/GPT_SoVITS/text/cleaner.py index 7ba8f376..fa47938b 100644 --- a/GPT_SoVITS/text/cleaner.py +++ b/GPT_SoVITS/text/cleaner.py @@ -1,14 +1,11 @@ -from text import cleaned_text_to_sequence import os -# if os.environ.get("version","v1")=="v1": -# from text import chinese -# from text.symbols import symbols -# else: -# from text import chinese2 as chinese -# from text.symbols2 import symbols +import warnings -from text import symbols as symbols_v1 -from text import symbols2 as symbols_v2 +from . import cleaned_text_to_sequence +from . import symbols as symbols_v1 +from . import symbols2 as symbols_v2 + +warnings.filterwarnings("ignore", category=UserWarning, module="jieba_fast._compat") special = [ # ("%", "zh", "SP"), @@ -18,7 +15,7 @@ special = [ ] -def clean_text(text, language, version=None): +def clean_text(text, language, version=None) -> tuple[list[str], list[int] | None, str]: if version is None: version = os.environ.get("version", "v2") if version == "v1": @@ -34,7 +31,9 @@ def clean_text(text, language, version=None): for special_s, special_l, target_symbol in special: if special_s in text and language == special_l: return clean_special(text, language, special_s, target_symbol, version) - language_module = __import__("text." + language_module_map[language], fromlist=[language_module_map[language]]) + language_module = __import__( + "GPT_SoVITS.text." + language_module_map[language], fromlist=[language_module_map[language]] + ) if hasattr(language_module, "text_normalize"): norm_text = language_module.text_normalize(text) else: @@ -69,7 +68,9 @@ def clean_special(text, language, special_s, target_symbol, version=None): 特殊静音段sp符号处理 """ text = text.replace(special_s, ",") - language_module = __import__("text." + language_module_map[language], fromlist=[language_module_map[language]]) + language_module = __import__( + "GPT_SoVITS.text." + language_module_map[language], fromlist=[language_module_map[language]] + ) norm_text = language_module.text_normalize(text) phones = language_module.g2p(norm_text) new_ph = [] diff --git a/GPT_SoVITS/text/en_normalization/expend.py b/GPT_SoVITS/text/en_normalization/expend.py index bbd607cd..cae91a87 100644 --- a/GPT_SoVITS/text/en_normalization/expend.py +++ b/GPT_SoVITS/text/en_normalization/expend.py @@ -1,11 +1,10 @@ # by https://github.com/Cosmo-klara -from __future__ import print_function - import re -import inflect import unicodedata +import inflect + # 后缀计量单位替换表 measurement_map = { "m": ["meter", "meters"], @@ -109,7 +108,7 @@ def _expand_measurement(m): num = int(m.group(1).replace(sign, "").replace(".", "")) decimal_part = m.group(2) # 上面判断的漏洞,比如 0.1 的情况,在这里排除了 - if decimal_part == None and num == 1: + if decimal_part is None and num == 1: ptr = 0 return m.group(1).replace(sign, " " + measurement_map[sign][ptr]) diff --git a/GPT_SoVITS/text/english.py b/GPT_SoVITS/text/english.py index f6c69449..449f4d46 100644 --- a/GPT_SoVITS/text/english.py +++ b/GPT_SoVITS/text/english.py @@ -1,19 +1,18 @@ -import pickle import os +import pickle import re +from builtins import str as unicode + import wordsegment from g2p_en import G2p - -from text.symbols import punctuation - -from text.symbols2 import symbols - -from builtins import str as unicode -from text.en_normalization.expend import normalize +from nltk import pos_tag from nltk.tokenize import TweetTokenizer +from .en_normalization.expend import normalize +from .symbols import punctuation +from .symbols2 import symbols + word_tokenize = TweetTokenizer().tokenize -from nltk import pos_tag current_file_path = os.path.dirname(__file__) CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep") diff --git a/GPT_SoVITS/text/g2pw/__init__.py b/GPT_SoVITS/text/g2pw/__init__.py index 5ab811d9..385cda97 100644 --- a/GPT_SoVITS/text/g2pw/__init__.py +++ b/GPT_SoVITS/text/g2pw/__init__.py @@ -1 +1 @@ -from text.g2pw.g2pw import * +from .g2pw import * diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index 1d5e4231..a1fb9a36 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -3,7 +3,6 @@ import json import os -import warnings import zipfile from typing import Any, Dict, List, Tuple @@ -22,10 +21,9 @@ from .utils import load_config onnxruntime.set_default_logger_severity(3) try: onnxruntime.preload_dlls() -except: +except Exception: pass # traceback.print_exc() -warnings.filterwarnings("ignore") model_version = "1.1" @@ -97,7 +95,13 @@ class G2PWOnnxConverter: self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"), sess_options=sess_options, - providers=["CUDAExecutionProvider", "CPUExecutionProvider"], + providers=[ + ( + "CUDAExecutionProvider", + {"device_id": torch.cuda.current_device()}, + ), + ("CPUExecutionProvider", {}), + ], ) else: self.session_g2pW = onnxruntime.InferenceSession( diff --git a/GPT_SoVITS/text/japanese.py b/GPT_SoVITS/text/japanese.py index a54d0cf0..4ffd62db 100644 --- a/GPT_SoVITS/text/japanese.py +++ b/GPT_SoVITS/text/japanese.py @@ -1,7 +1,9 @@ # modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import os import hashlib +import os +import re + +from .symbols import punctuation try: import pyopenjtalk @@ -77,8 +79,6 @@ except Exception: pass -from text.symbols import punctuation - # Regular expression matching Japanese without punctuation marks: _japanese_characters = re.compile( r"[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]" diff --git a/GPT_SoVITS/text/korean.py b/GPT_SoVITS/text/korean.py index 254b05cf..e041712a 100644 --- a/GPT_SoVITS/text/korean.py +++ b/GPT_SoVITS/text/korean.py @@ -1,12 +1,16 @@ # reference: https://github.com/ORI-Muchim/MB-iSTFT-VITS-Korean/blob/main/text/korean.py -import re -from jamo import h2j, j2hcj -import ko_pron -from g2pk2 import G2p - import importlib import os +import re +import shutil +import sys + +import ko_pron +from g2pk2 import G2p +from jamo import h2j, j2hcj + +from .symbols2 import symbols # 防止win下无法读取模型 if os.name == "nt": @@ -21,14 +25,11 @@ if os.name == "nt": else: installpath = spam_spec.submodule_search_locations[0] if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)): - import sys from eunjeon import Mecab as _Mecab class Mecab(_Mecab): def get_dicpath(installpath): if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)): - import shutil - python_dir = os.getcwd() if installpath[: len(python_dir)].upper() == python_dir.upper(): dicpath = os.path.join(os.path.relpath(installpath, python_dir), "data", "mecabrc") @@ -56,8 +57,6 @@ if os.name == "nt": G2p = win_G2p -from text.symbols2 import symbols - # This is a list of Korean classifiers preceded by pure Korean numerals. _korean_classifiers = ( "군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통" diff --git a/GPT_SoVITS/text/zh_normalization/__init__.py b/GPT_SoVITS/text/zh_normalization/__init__.py index 46b367a6..6c1721a4 100644 --- a/GPT_SoVITS/text/zh_normalization/__init__.py +++ b/GPT_SoVITS/text/zh_normalization/__init__.py @@ -11,4 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from text.zh_normalization.text_normlization import * +from .text_normlization import * diff --git a/GPT_SoVITS/text/zh_normalization/num.py b/GPT_SoVITS/text/zh_normalization/num.py index 14d602b0..fbc3684b 100644 --- a/GPT_SoVITS/text/zh_normalization/num.py +++ b/GPT_SoVITS/text/zh_normalization/num.py @@ -257,6 +257,8 @@ def replace_to_range(match) -> str: RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)") + + def replace_vrsion_num(match) -> str: """ Args: @@ -273,7 +275,6 @@ def replace_vrsion_num(match) -> str: return result - def _get_value(value_string: str, use_zero: bool = True) -> List[str]: stripped = value_string.lstrip("0") if len(stripped) == 0: diff --git a/GPT_SoVITS/utils.py b/GPT_SoVITS/utils.py index 08e18384..a58430ad 100644 --- a/GPT_SoVITS/utils.py +++ b/GPT_SoVITS/utils.py @@ -3,9 +3,11 @@ import glob import json import logging import os +import shutil import subprocess import sys import traceback +from time import time as ttime import librosa import numpy as np @@ -42,9 +44,9 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False saved_state_dict[k].shape, v.shape, ) - except: + except AssertionError: traceback.print_exc() - print("error, %s is not in the checkpoint" % k) # shape不对也会,比如text_embedding当cleaner修改时 + print(f"error, {k} is not in the checkpoint") # shape不对也会,比如text_embedding当cleaner修改时 new_state_dict[k] = v if hasattr(model, "module"): model.module.load_state_dict(new_state_dict) @@ -60,26 +62,22 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False return model, optimizer, learning_rate, iteration -import shutil -from time import time as ttime - - -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path +def save(fea, path): #####fix issue: torch.save doesn't support chinese path dir = os.path.dirname(path) name = os.path.basename(path) - tmp_path = "%s.pth" % (ttime()) + tmp_path = f"{ttime()}.pth" torch.save(fea, tmp_path) shutil.move(tmp_path, "%s/%s" % (dir, name)) -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): +def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path, logger): logger.info("Saving model and optimizer state at iteration {} to {}".format(iteration, checkpoint_path)) if hasattr(model, "module"): state_dict = model.module.state_dict() else: state_dict = model.state_dict() # torch.save( - my_save( + save( { "model": state_dict, "iteration": iteration, @@ -136,8 +134,7 @@ def plot_spectrogram_to_numpy(spectrogram): plt.tight_layout() fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + data = np.asarray(fig.canvas.renderer.buffer_rgba(), dtype=np.uint8)[:, :, :3] plt.close() return data @@ -169,8 +166,7 @@ def plot_alignment_to_numpy(alignment, info=None): plt.tight_layout() fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + data = np.asarray(fig.canvas.renderer.buffer_rgba(), dtype=np.uint8)[:, :, :3] plt.close() return data @@ -245,18 +241,31 @@ def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_tim import re ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = lambda _f: int(re.compile("._(\d+)\.pth").match(_f).group(1)) - time_key = lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)) + + def name_key(_f): + return int(re.compile("._(\d+)\.pth").match(_f).group(1)) + + def time_key(_f): + return os.path.getmtime(os.path.join(path_to_models, _f)) + sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted( - [f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")], - key=sort_key, - ) + + def x_sorted(_x): + return sorted( + [f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")], + key=sort_key, + ) + to_del = [ os.path.join(path_to_models, fn) for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep]) ] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] + + def del_info(fn): + return logger.info(f".. Free up space by deleting ckpt {fn}") + + def del_routine(x): + return [os.remove(x), del_info(x)] + rs = [del_routine(fn) for fn in to_del] @@ -324,7 +333,7 @@ def get_logger(model_dir, filename="train.log"): class HParams: def __init__(self, **kwargs): for k, v in kwargs.items(): - if type(v) == dict: + if isinstance(v, dict): v = HParams(**v) self[k] = v @@ -352,10 +361,12 @@ class HParams: def __repr__(self): return self.__dict__.__repr__() - -if __name__ == "__main__": - print( - load_wav_to_torch( - "/home/fish/wenetspeech/dataset_vq/Y0000022499_wHFSeHEx9CM/S00261.flac", - ) - ) + def to_dict(self): + """Convert HParams to a plain dictionary recursively""" + result = {} + for k, v in self.__dict__.items(): + if isinstance(v, HParams): + result[k] = v.to_dict() + else: + result[k] = v + return result diff --git a/README.md b/README.md index 86d50ea2..f85556b2 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

--- -## Features: +## Features 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion. @@ -41,12 +41,16 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

Unseen speakers few-shot fine-tuning demo: -https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + -**RTF(inference speed) of GPT-SoVITS v2 ProPlus**: -0.028 tested in 4060Ti, 0.014 tested in 4090 (1400words~=4min, inference time is 3.36s), 0.526 in M4 CPU. You can test our [huggingface demo](https://lj1995-gpt-sovits-proplus.hf.space/) (half H200) to experience high-speed inference . +## Infer Speed -请不要尬黑GPT-SoVITS推理速度慢,谢谢! +| Device | RTF | Batch Size | Backend | +| ----------- | ----- | ---------- | --------------------------- | +| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph | +| Apple M4 | 0.21 | 1 | MLX Quantized Affined | +| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph | +| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph | **User guide: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)** @@ -56,15 +60,13 @@ For users in China, you can [click here](https://www.codewithgpu.com/i/RVC-Boss/ ### Tested Environments -| Python Version | PyTorch Version | Device | -| -------------- | ---------------- | ------------- | -| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | -| Python 3.9 | PyTorch 2.8.0dev | CUDA 12.8 | -| Python 3.9 | PyTorch 2.5.1 | Apple silicon | -| Python 3.11 | PyTorch 2.7.0 | Apple silicon | -| Python 3.9 | PyTorch 2.2.2 | CPU | +| Python Version | PyTorch Version | Device | +| -------------- | --------------- | ------------- | +| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | +| Python 3.11 | PyTorch 2.8.0 | Apple Silicon | +| Python 3.10 | PyTorch 2.8.0 | CPU | ### Windows @@ -97,7 +99,7 @@ Install the program by running the following commands: ```bash conda create -n GPTSoVits python=3.10 conda activate GPTSoVits -bash install.sh --device --source [--download-uvr5] +bash install.sh --device --source [--download-uvr5] ``` ### Install Manually @@ -118,13 +120,13 @@ pip install -r requirements.txt ```bash conda activate GPTSoVits -conda install ffmpeg +conda install ffmpeg=7 -c conda-forge ``` ##### Ubuntu/Debian Users ```bash -sudo apt install ffmpeg +sudo apt install ffmpeg=7 sudo apt install libsox-dev ``` @@ -214,7 +216,7 @@ docker exec -it ``` -if you want to switch to V1,then - -```bash -python webui.py v1 -``` - -Or maunally switch version in WebUI - ### Finetune #### Path Auto-filling is now supported @@ -279,13 +273,13 @@ Double-click `go-webui-v2.bat` or use `go-webui-v2.ps1` ,then open the inference #### Others ```bash -python GPT_SoVITS/inference_webui.py +PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p ``` OR ```bash -python webui.py +PYTHONPATH=. python webui.py ``` then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference` @@ -332,7 +326,7 @@ Use v3 from v2 environment: 3. Download v3 pretrained models (s1v3.ckpt, s2Gv3.pth and models--nvidia--bigvgan_v2_24khz_100band_256x folder) from [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) and put them into `GPT_SoVITS/pretrained_models`. - additional: for Audio Super Resolution model, you can read [how to download](./tools/AP_BWE_main/24kto48k/readme.txt) + additional: for Audio Super Resolution model, you can read [how to download](./tools/AP_BWE/24kto48k/readme.txt) ## V4 Release Notes diff --git a/api.py b/api.py index cc0896a2..3f409331 100644 --- a/api.py +++ b/api.py @@ -141,37 +141,40 @@ RESP: 无 """ import argparse +import logging import os import re -import sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -sys.path.append("%s/GPT_SoVITS" % (now_dir)) - import signal -from text.LangSegmenter import LangSegmenter +import subprocess +import sys +import threading +from io import BytesIO from time import time as ttime + +import librosa +import numpy as np +import soundfile as sf import torch import torchaudio -import librosa -import soundfile as sf -from fastapi import FastAPI, Request, Query -from fastapi.responses import StreamingResponse, JSONResponse import uvicorn -from transformers import AutoModelForMaskedLM, AutoTokenizer -import numpy as np -from feature_extractor import cnhubert -from io import BytesIO -from module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 +from fastapi import FastAPI, Query, Request +from fastapi.responses import JSONResponse, StreamingResponse from peft import LoraConfig, get_peft_model -from AR.models.t2s_lightning_module import Text2SemanticLightningModule -from text import cleaned_text_to_sequence -from text.cleaner import clean_text -from module.mel_processing import spectrogram_torch +from transformers import AutoModelForMaskedLM, AutoTokenizer + import config as global_config -import logging -import subprocess +from config import pretrained_sovits_name +from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModule +from GPT_SoVITS.BigVGAN import bigvgan +from GPT_SoVITS.feature_extractor import cnhubert +from GPT_SoVITS.module.mel_processing import mel_spectrogram_torch, spectrogram_torch +from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 +from GPT_SoVITS.process_ckpt import get_sovits_version_from_path_fast, load_sovits_new +from GPT_SoVITS.sv import SV +from GPT_SoVITS.text import cleaned_text_to_sequence +from GPT_SoVITS.text.cleaner import clean_text +from GPT_SoVITS.text.LangSegmenter import LangSegmenter +from tools.audio_sr import AP_BWE class DefaultRefer: @@ -206,10 +209,8 @@ def clean_hifigan_model(): if hifigan_model: hifigan_model = hifigan_model.cpu() hifigan_model = None - try: + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass def clean_bigvgan_model(): @@ -217,10 +218,8 @@ def clean_bigvgan_model(): if bigvgan_model: bigvgan_model = bigvgan_model.cpu() bigvgan_model = None - try: + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass def clean_sv_cn_model(): @@ -228,25 +227,22 @@ def clean_sv_cn_model(): if sv_cn_model: sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu() sv_cn_model = None - try: + if torch.cuda.is_available(): torch.cuda.empty_cache() - except: - pass def init_bigvgan(): global bigvgan_model, hifigan_model, sv_cn_model - from BigVGAN import bigvgan bigvgan_model = bigvgan.BigVGAN.from_pretrained( - "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), + "GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x", use_cuda_kernel=False, ) # if True, RuntimeError: Ninja is required to load C++ extensions # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() bigvgan_model = bigvgan_model.eval() - if is_half == True: + if is_half is True: bigvgan_model = bigvgan_model.half().to(device) else: bigvgan_model = bigvgan_model.to(device) @@ -268,20 +264,17 @@ def init_hifigan(): hifigan_model.eval() hifigan_model.remove_weight_norm() state_dict_g = torch.load( - "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), + "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth", map_location="cpu", weights_only=False, ) print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) - if is_half == True: + if is_half is True: hifigan_model = hifigan_model.half().to(device) else: hifigan_model = hifigan_model.to(device) -from sv import SV - - def init_sv_cn(): global hifigan_model, bigvgan_model, sv_cn_model sv_cn_model = SV(device, is_half) @@ -292,14 +285,12 @@ resample_transform_dict = {} def resample(audio_tensor, sr0, sr1, device): global resample_transform_dict - key = "%s-%s-%s" % (sr0, sr1, str(device)) + key = f"{sr0}-{sr1}-{device}" if key not in resample_transform_dict: resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device) return resample_transform_dict[key](audio_tensor) -from module.mel_processing import mel_spectrogram_torch - spec_min = -12 spec_max = 2 @@ -312,32 +303,32 @@ def denorm_spec(x): return (x + 1) / 2 * (spec_max - spec_min) + spec_min -mel_fn = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1024, - "win_size": 1024, - "hop_size": 256, - "num_mels": 100, - "sampling_rate": 24000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) -mel_fn_v4 = lambda x: mel_spectrogram_torch( - x, - **{ - "n_fft": 1280, - "win_size": 1280, - "hop_size": 320, - "num_mels": 100, - "sampling_rate": 32000, - "fmin": 0, - "fmax": None, - "center": False, - }, -) +def mel_fn(x): + return mel_spectrogram_torch( + y=x, + n_fft=1024, + num_mels=100, + sampling_rate=24000, + hop_size=256, + win_size=1024, + fmin=0, + fmax=None, + center=False, + ) + + +def mel_fn_v4(x): + return mel_spectrogram_torch( + y=x, + n_fft=1280, + num_mels=100, + sampling_rate=32000, + hop_size=320, + win_size=1280, + fmin=0, + fmax=None, + center=False, + ) sr_model = None @@ -345,9 +336,7 @@ sr_model = None def audio_sr(audio, sr): global sr_model - if sr_model == None: - from tools.audio_sr import AP_BWE - + if sr_model is None: try: sr_model = AP_BWE(device, DictToAttrRecursive) except FileNotFoundError: @@ -375,12 +364,7 @@ class Sovits: self.hps = hps -from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new - - def get_sovits_weights(sovits_path): - from config import pretrained_sovits_name - path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] is_exist_s2gv3 = os.path.exists(path_sovits_v3) @@ -390,8 +374,8 @@ def get_sovits_weights(sovits_path): is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 - if if_lora_v3 == True and is_exist == False: - logger.info("SoVITS %s 底模缺失,无法加载相应 LoRA 权重" % model_version) + if if_lora_v3 is True and is_exist is False: + logger.info(f"SoVITS {model_version} 底模缺失,无法加载相应 LoRA 权重") dict_s2 = load_sovits_new(sovits_path) hps = dict_s2["config"] @@ -408,10 +392,10 @@ def get_sovits_weights(sovits_path): if model_version not in {"v3", "v4"}: if "Pro" in model_version: hps.model.version = model_version - if sv_cn_model == None: + if sv_cn_model is None: init_sv_cn() - vq_model = SynthesizerTrn( + vq_model: SynthesizerTrn | SynthesizerTrnV3 = SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, @@ -433,16 +417,14 @@ def get_sovits_weights(sovits_path): model_version = hps.model.version logger.info(f"模型版本: {model_version}") if "pretrained" not in sovits_path: - try: + if hasattr(vq_model, "enc_q"): del vq_model.enc_q - except: - pass - if is_half == True: + if is_half is True: vq_model = vq_model.half().to(device) else: vq_model = vq_model.to(device) vq_model.eval() - if if_lora_v3 == False: + if if_lora_v3 is False: vq_model.load_state_dict(dict_s2["weight"], strict=False) else: path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 @@ -457,7 +439,6 @@ def get_sovits_weights(sovits_path): vq_model.cfm = get_peft_model(vq_model.cfm, lora_config) vq_model.load_state_dict(dict_s2["weight"], strict=False) vq_model.cfm = vq_model.cfm.merge_and_unload() - # torch.save(vq_model.state_dict(),"merge_win.pth") vq_model.eval() sovits = Sovits(vq_model, hps) @@ -480,7 +461,7 @@ def get_gpt_weights(gpt_path): max_sec = config["data"]["max_sec"] t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) t2s_model.load_state_dict(dict_s1["weight"]) - if is_half == True: + if is_half is True: t2s_model = t2s_model.half() t2s_model = t2s_model.to(device) t2s_model.eval() @@ -506,7 +487,7 @@ def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") for i in inputs: - inputs[i] = inputs[i].to(device) #####输入是long不用管精度问题,精度随bert_model + inputs[i] = inputs[i].to(device) res = bert_model(**inputs, output_hidden_states=True) res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] assert len(word2ph) == len(text) @@ -515,7 +496,6 @@ def get_bert_feature(text, word2ph): repeat_feature = res[i].repeat(word2ph[i], 1) phone_level_feature.append(repeat_feature) phone_level_feature = torch.cat(phone_level_feature, dim=0) - # if(is_half==True):phone_level_feature=phone_level_feature.half() return phone_level_feature.T @@ -529,39 +509,36 @@ def clean_text_inf(text, language, version): def get_bert_inf(phones, word2ph, norm_text, language): language = language.replace("all_", "") if language == "zh": - bert = get_bert_feature(norm_text, word2ph).to(device) # .to(dtype) + bert = get_bert_feature(norm_text, word2ph).to(device) else: bert = torch.zeros( (1024, len(phones)), - dtype=torch.float16 if is_half == True else torch.float32, + dtype=torch.float16 if is_half is True else torch.float32, ).to(device) return bert -from text import chinese - - def get_phones_and_bert(text, language, version, final=False): - text = re.sub(r' {2,}', ' ', text) + text = re.sub(r" {2,}", " ", text) textlist = [] langlist = [] if language == "all_zh": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_yue": - for tmp in LangSegmenter.getTexts(text,"zh"): + for tmp in LangSegmenter.getTexts(text, "zh"): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ja": - for tmp in LangSegmenter.getTexts(text,"ja"): + for tmp in LangSegmenter.getTexts(text, "ja"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ko": - for tmp in LangSegmenter.getTexts(text,"ko"): + for tmp in LangSegmenter.getTexts(text, "ko"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "en": @@ -606,7 +583,7 @@ def get_phones_and_bert(text, language, version, final=False): if not final and len(phones) < 6: return get_phones_and_bert("." + text, language, version, final=True) - return phones, bert.to(torch.float16 if is_half == True else torch.float32), norm_text + return phones, bert.to(torch.float16 if is_half is True else torch.float32), norm_text class DictToAttrRecursive(dict): @@ -639,18 +616,10 @@ class DictToAttrRecursive(dict): def get_spepc(hps, filename, dtype, device, is_v2pro=False): sr1 = int(hps.data.sampling_rate) - audio, sr0 = torchaudio.load(filename) - if sr0 != sr1: - audio = audio.to(device) - if audio.shape[0] == 2: - audio = audio.mean(0).unsqueeze(0) - audio = resample(audio, sr0, sr1, device) - else: - audio = audio.to(device) - if audio.shape[0] == 2: - audio = audio.mean(0).unsqueeze(0) + audio_n, _ = librosa.load(filename, sr=sr1) + audio = torch.from_numpy(audio_n).to(device).unsqueeze(0) - maxx = audio.abs().max() + maxx = float(audio.abs().max()) if maxx > 1: audio /= min(2, maxx) spec = spectrogram_torch( @@ -662,7 +631,7 @@ def get_spepc(hps, filename, dtype, device, is_v2pro=False): center=False, ) spec = spec.to(dtype) - if is_v2pro == True: + if is_v2pro is True: audio = resample(audio, sr1, 16000, device).to(dtype) return spec, audio @@ -700,8 +669,6 @@ def pack_ogg(audio_bytes, data, rate): with sf.SoundFile(audio_bytes, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file: audio_file.write(data) - import threading - # See: https://docs.python.org/3/library/threading.html # The stack size of this thread is at least 32768 # If stack overflow error still occurs, just modify the `stack_size`. @@ -866,13 +833,13 @@ def get_tts_wav( if prompt_text[-1] not in splits: prompt_text += "。" if prompt_language != "en" else "." prompt_language, text = prompt_language, text.strip("\n") - dtype = torch.float16 if is_half == True else torch.float32 - zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32) + dtype = torch.float16 if is_half is True else torch.float32 + zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half is True else np.float32) with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) - if is_half == True: + if is_half is True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: @@ -889,7 +856,7 @@ def get_tts_wav( refers = [] if is_v2pro: sv_emb = [] - if sv_cn_model == None: + if sv_cn_model is None: init_sv_cn() if inp_refs: for path in inp_refs: @@ -897,14 +864,14 @@ def get_tts_wav( refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro) refers.append(refer) if is_v2pro: - sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor)) + sv_emb.append(sv_cn_model.compute_embedding(audio_tensor)) except Exception as e: logger.error(e) if len(refers) == 0: refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro) refers = [refers] if is_v2pro: - sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)] + sv_emb = [sv_cn_model.compute_embedding(audio_tensor)] else: refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device) @@ -974,12 +941,12 @@ def get_tts_wav( phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) - ref_audio, sr = torchaudio.load(ref_wav_path) + tgt_sr = 24000 if version == "v3" else 32000 + ref_audio = torch.from_numpy(librosa.load(ref_wav_path, sr=tgt_sr)[0]).unsqueeze(0) ref_audio = ref_audio.to(device).float() if ref_audio.shape[0] == 2: ref_audio = ref_audio.mean(0).unsqueeze(0) - tgt_sr = 24000 if version == "v3" else 32000 if sr != tgt_sr: ref_audio = resample(ref_audio, sr, tgt_sr, device) mel2 = mel_fn(ref_audio) if version == "v3" else mel_fn_v4(ref_audio) @@ -1014,10 +981,10 @@ def get_tts_wav( cfm_res = torch.cat(cfm_resss, 2) cfm_res = denorm_spec(cfm_res) if version == "v3": - if bigvgan_model == None: + if bigvgan_model is None: init_bigvgan() else: # v4 - if hifigan_model == None: + if hifigan_model is None: init_hifigan() vocoder_model = bigvgan_model if version == "v3" else hifigan_model with torch.inference_mode(): @@ -1128,7 +1095,7 @@ def handle( if not default_refer.is_ready(): return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400) - if cut_punc == None: + if cut_punc is None: text = cut_text(text, default_cut_punc) else: text = cut_text(text, cut_punc) diff --git a/api_v2.py b/api_v2.py index 5947df53..c84e1e58 100644 --- a/api_v2.py +++ b/api_v2.py @@ -98,30 +98,27 @@ RESP: """ +import argparse import os +import signal +import subprocess import sys import traceback +import wave +from io import BytesIO from typing import Generator -now_dir = os.getcwd() -sys.path.append(now_dir) -sys.path.append("%s/GPT_SoVITS" % (now_dir)) - -import argparse -import subprocess -import wave -import signal import numpy as np import soundfile as sf -from fastapi import FastAPI, Response -from fastapi.responses import StreamingResponse, JSONResponse import uvicorn -from io import BytesIO -from tools.i18n.i18n import I18nAuto -from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config -from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names +from fastapi import FastAPI, Response +from fastapi.responses import JSONResponse, StreamingResponse from pydantic import BaseModel +from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names +from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config +from tools.i18n.i18n import I18nAuto + # print(sys.path) i18n = I18nAuto() cut_method_names = get_cut_method_names() diff --git a/config.py b/config.py index fdc11c0a..c2e5169a 100644 --- a/config.py +++ b/config.py @@ -1,18 +1,16 @@ import os import re +import subprocess import sys +import cpuinfo +import gradio as gr import torch -from tools.i18n.i18n import I18nAuto - -i18n = I18nAuto(language=os.environ.get("language", "Auto")) - - pretrained_sovits_name = { "v1": "GPT_SoVITS/pretrained_models/s2G488k.pth", "v2": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", - "v3": "GPT_SoVITS/pretrained_models/s2Gv3.pth", ###v3v4还要检查vocoder,算了。。。 + "v3": "GPT_SoVITS/pretrained_models/s2Gv3.pth", "v4": "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth", "v2Pro": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth", "v2ProPlus": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth", @@ -27,19 +25,13 @@ pretrained_gpt_name = { "v2ProPlus": "GPT_SoVITS/pretrained_models/s1v3.ckpt", } name2sovits_path = { - # i18n("不训练直接推v1底模!"): "GPT_SoVITS/pretrained_models/s2G488k.pth", - i18n("不训练直接推v2底模!"): "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", - # i18n("不训练直接推v3底模!"): "GPT_SoVITS/pretrained_models/s2Gv3.pth", - # i18n("不训练直接推v4底模!"): "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth", - i18n("不训练直接推v2Pro底模!"): "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth", - i18n("不训练直接推v2ProPlus底模!"): "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth", + "不训练直接推v2底模!": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", + "不训练直接推v2Pro底模!": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth", + "不训练直接推v2ProPlus底模!": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth", } name2gpt_path = { - # i18n("不训练直接推v1底模!"):"GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", - i18n( - "不训练直接推v2底模!" - ): "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", - i18n("不训练直接推v3底模!"): "GPT_SoVITS/pretrained_models/s1v3.ckpt", + "不训练直接推v2底模!": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", + "不训练直接推v3底模!": "GPT_SoVITS/pretrained_models/s1v3.ckpt", } SoVITS_weight_root = [ "SoVITS_weights", @@ -77,48 +69,47 @@ GPT_weight_version2root = { def custom_sort_key(s): # 使用正则表达式提取字符串中的数字部分和非数字部分 - parts = re.split("(\d+)", s) + parts = re.split(r"(\d+)", s[-1]) # 将数字部分转换为整数,非数字部分保持不变 parts = [int(part) if part.isdigit() else part for part in parts] return parts -def get_weights_names(): - SoVITS_names = [] - for key in name2sovits_path: - if os.path.exists(name2sovits_path[key]): - SoVITS_names.append(key) +def get_weights_names(i18n): + SoVITS_names: list[tuple[str, str]] = [] + for key, value in name2sovits_path.items(): + if os.path.exists(value): + SoVITS_names.append((i18n(key), value)) for path in SoVITS_weight_root: if not os.path.exists(path): continue for name in os.listdir(path): if name.endswith(".pth"): - SoVITS_names.append("%s/%s" % (path, name)) - if not SoVITS_names: - SoVITS_names = [""] - GPT_names = [] - for key in name2gpt_path: - if os.path.exists(name2gpt_path[key]): - GPT_names.append(key) + SoVITS_names.append((f"{path}/{name}", f"{path}/{name}")) + GPT_names: list[tuple[str, str]] = [] + for key, value in name2gpt_path.items(): + if os.path.exists(value): + GPT_names.append((i18n(key), value)) for path in GPT_weight_root: if not os.path.exists(path): continue for name in os.listdir(path): if name.endswith(".ckpt"): - GPT_names.append("%s/%s" % (path, name)) + GPT_names.append((f"{path}/{name}", f"{path}/{name}")) + SoVITS_names = sorted(SoVITS_names, key=custom_sort_key) GPT_names = sorted(GPT_names, key=custom_sort_key) - if not GPT_names: - GPT_names = [""] + + for key, value in pretrained_sovits_name.items(): + if key in {"v3", "v4", "v1"}: + SoVITS_names.append((value, value)) + GPT_names.append((pretrained_gpt_name["v1"], pretrained_gpt_name["v1"])) return SoVITS_names, GPT_names -def change_choices(): - SoVITS_names, GPT_names = get_weights_names() - return {"choices": SoVITS_names, "__type__": "update"}, { - "choices": GPT_names, - "__type__": "update", - } +def change_choices(i18n): + SoVITS_names, GPT_names = get_weights_names(i18n) + return gr.update(choices=SoVITS_names), gr.update(choices=GPT_names) # 推理用的指定模型 @@ -145,10 +136,33 @@ webui_port_subfix = 9871 api_port = 9880 +def get_apple_silicon_name(): + result = subprocess.run(["sysctl", "-n", "machdep.cpu.brand_string"], capture_output=True, text=True) + return result.stdout.strip() + + +def get_dtype(idx: int): + if not torch.cuda.is_available(): + return torch.float32 + capability = torch.cuda.get_device_capability(idx) + major, minor = capability + sm_version = major + minor / 10.0 + if sm_version > 6.1: + return torch.float16 + return torch.float32 + + # Thanks to the contribution of @Karasukaigan and @XXXXRT666 def get_device_dtype_sm(idx: int) -> tuple[torch.device, torch.dtype, float, float]: - cpu = torch.device("cpu") + cpu = torch.device("cpu:0") cuda = torch.device(f"cuda:{idx}") + if torch.mps.is_available(): + return ( + torch.device("mps:0"), + torch.float16, + 100, + os.sysconf("SC_PHYS_PAGES") * os.sysconf("SC_PAGE_SIZE") / (1024**3), + ) if not torch.cuda.is_available(): return cpu, torch.float32, 0.0, 0.0 device_idx = idx @@ -161,7 +175,7 @@ def get_device_dtype_sm(idx: int) -> tuple[torch.device, torch.dtype, float, flo is_16_series = bool(re.search(r"16\d{2}", name)) and sm_version == 7.5 if mem_gb < 4 or sm_version < 5.3: return cpu, torch.float32, 0.0, 0.0 - if sm_version == 6.1 or is_16_series == True: + if sm_version == 6.1 or is_16_series is True: return cuda, torch.float32, sm_version, mem_gb if sm_version > 6.1: return cuda, torch.float16, sm_version, mem_gb @@ -172,7 +186,7 @@ IS_GPU = True GPU_INFOS: list[str] = [] GPU_INDEX: set[int] = set() GPU_COUNT = torch.cuda.device_count() -CPU_INFO: str = "0\tCPU " + i18n("CPU训练,较慢") +CPU_INFO: str = f"0\t{cpuinfo.get_cpu_info()['brand_raw']}" tmp: list[tuple[torch.device, torch.dtype, float, float]] = [] memset: set[float] = set() @@ -182,16 +196,22 @@ for i in range(max(GPU_COUNT, 1)): for j in tmp: device = j[0] memset.add(j[3]) - if device.type != "cpu": + if device.type == "cuda": GPU_INFOS.append(f"{device.index}\t{torch.cuda.get_device_name(device.index)}") GPU_INDEX.add(device.index) + elif device.type == "mps": + GPU_INFOS.append(f"0\t{get_apple_silicon_name()}") + GPU_INDEX.add(0) if not GPU_INFOS: IS_GPU = False GPU_INFOS.append(CPU_INFO) GPU_INDEX.add(0) +if torch.mps.is_available(): + infer_device = torch.device("mps:0") +else: + infer_device = max(tmp, key=lambda x: (x[2], x[3]))[0] -infer_device = max(tmp, key=lambda x: (x[2], x[3]))[0] is_half = any(dtype == torch.float16 for _, dtype, _, _ in tmp) @@ -216,3 +236,22 @@ class Config: self.webui_port_subfix = webui_port_subfix self.api_port = api_port + + +def get_implement(device: torch.device): + if torch.cuda.is_available(): + idx = device.index + capability = torch.cuda.get_device_capability(idx) + major, minor = capability + sm_version = major + minor / 10.0 + if sm_version >= 7.5: + return "flash_attn" + else: + if sys.platform == "linux": + return "sage_attn" + else: + return "naive" + elif torch.mps.is_available(): + return "mlx" + else: + return "naive" diff --git a/docs/cn/README.md b/docs/cn/README.md index 793734d8..afc18369 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -19,7 +19,6 @@ [![Change Log](https://img.shields.io/badge/Change%20Log-View%20Updates-blue?style=for-the-badge&logo=googledocs&logoColor=white)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/docs/en/Changelog_EN.md) [![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge&logo=opensourceinitiative)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) - [**English**](../../README.md) | **中文简体** | [**日本語**](../ja/README.md) | [**한국어**](../ko/README.md) | [**Türkçe**](../tr/README.md) @@ -42,6 +41,15 @@ +## 推理速度 + +| Device | RTF | Batch Size | Backend | +| ----------- | ----- | ---------- | --------------------------- | +| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph | +| Apple M4 | 0.21 | 1 | MLX Quantized Affined | +| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph | +| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph | + **用户手册: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)** ## 安装 @@ -50,15 +58,13 @@ ### 测试通过的环境 -| Python Version | PyTorch Version | Device | -| -------------- | ---------------- | ------------- | -| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | -| Python 3.9 | PyTorch 2.8.0dev | CUDA 12.8 | -| Python 3.9 | PyTorch 2.5.1 | Apple silicon | -| Python 3.11 | PyTorch 2.7.0 | Apple silicon | -| Python 3.9 | PyTorch 2.2.2 | CPU | +| Python Version | PyTorch Version | Device | +| -------------- | --------------- | ------------- | +| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | +| Python 3.11 | PyTorch 2.8.0 | Apple Silicon | +| Python 3.10 | PyTorch 2.8.0 | CPU | ### Windows @@ -89,7 +95,7 @@ bash install.sh --device --source --source [--download-uvr5] +bash install.sh --device --source [--download-uvr5] ``` ### 手动安装 @@ -110,13 +116,13 @@ pip install -r requirements.txt ```bash conda activate GPTSoVits -conda install ffmpeg +conda install ffmpeg=7 -c conda-forge ``` ##### Ubuntu/Debian 用户 ```bash -sudo apt install ffmpeg +sudo apt install ffmpeg=7 sudo apt install libsox-dev ``` @@ -206,7 +212,7 @@ docker exec -it ``` -若想使用 V1,则 - -```bash -python webui.py v1 -``` - -或者在 webUI 内动态切换 - ### 微调 #### 现已支持自动填充路径 @@ -267,7 +265,7 @@ python webui.py v1 #### 其他 ```bash -python GPT_SoVITS/inference_webui.py +PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p ``` 或者 @@ -320,7 +318,7 @@ python webui.py 3. 从[huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)下载这些 v3 新增预训练模型 (s1v3.ckpt, s2Gv3.pth and models--nvidia--bigvgan_v2_24khz_100band_256x folder)将他们放到`GPT_SoVITS/pretrained_models`目录下 - 如果想用音频超分功能缓解 v3 模型生成 24k 音频觉得闷的问题, 需要下载额外的模型参数, 参考[how to download](../../tools/AP_BWE_main/24kto48k/readme.txt) + 如果想用音频超分功能缓解 v3 模型生成 24k 音频觉得闷的问题, 需要下载额外的模型参数, 参考[how to download](../../tools/AP_BWE/24kto48k/readme.txt) ## V4 更新说明 @@ -382,11 +380,6 @@ python webui.py python tools/uvr5/webui.py "" ``` - - 这是使用命令行完成数据集的音频切分的方式 ```bash diff --git a/docs/ja/README.md b/docs/ja/README.md index 1ee9abb4..a37b817d 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -19,14 +19,13 @@ [![Change Log](https://img.shields.io/badge/Change%20Log-View%20Updates-blue?style=for-the-badge&logo=googledocs&logoColor=white)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/docs/en/Changelog_EN.md) [![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge&logo=opensourceinitiative)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) - [**English**](../../README.md) | [**中文简体**](../cn/README.md) | **日本語** | [**한국어**](../ko/README.md) | [**Türkçe**](../tr/README.md) --- -## 機能: +## 機能 1. **Zero-Shot TTS:** たった 5 秒間の音声サンプルで、即座にテキストからその音声に変換できます. @@ -40,7 +39,16 @@ 声の事前学習無しかつ Few-Shot でトレーニングされたモデルのデモ: -https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + + +## 推論速度 + +| Device | RTF | Batch Size | Backend | +| ----------- | ----- | ---------- | --------------------------- | +| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph | +| Apple M4 | 0.21 | 1 | MLX Quantized Affined | +| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph | +| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph | **ユーザーマニュアル: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)** @@ -48,15 +56,13 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- ### テスト済みの環境 -| Python Version | PyTorch Version | Device | -| -------------- | ---------------- | ------------- | -| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | -| Python 3.9 | PyTorch 2.8.0dev | CUDA 12.8 | -| Python 3.9 | PyTorch 2.5.1 | Apple silicon | -| Python 3.11 | PyTorch 2.7.0 | Apple silicon | -| Python 3.9 | PyTorch 2.2.2 | CPU | +| Python Version | PyTorch Version | Device | +| -------------- | --------------- | ------------- | +| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | +| Python 3.11 | PyTorch 2.8.0 | Apple Silicon | +| Python 3.10 | PyTorch 2.8.0 | CPU | ### Windows @@ -79,7 +85,7 @@ bash install.sh --device --source --source [--download-uvr5] +bash install.sh --device --source [--download-uvr5] ``` ### 手動インストール @@ -100,13 +106,13 @@ pip install -r requirements.txt ```bash conda activate GPTSoVits -conda install ffmpeg +conda install ffmpeg=7 -c conda-forge ``` ##### Ubuntu/Debian ユーザー ```bash -sudo apt install ffmpeg +sudo apt install ffmpeg=7 sudo apt install libsox-dev ``` @@ -194,7 +200,7 @@ docker exec -it ``` -V1 に切り替えたい場合は - -```bash -python webui.py v1 <言語(オプション)> -``` - -または WebUI で手動でバージョンを切り替えてください. - ### 微調整 #### パス自動補完のサポート @@ -253,13 +251,13 @@ python webui.py v1 <言語(オプション)> #### その他 ```bash -python GPT_SoVITS/inference_webui.py <言語(オプション)> +PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p ``` または ```bash -python webui.py +PYTHONPATH=. python webui.py ``` その後、`1-GPT-SoVITS-TTS/1C-inference`で推論 webui を開きます. @@ -306,7 +304,7 @@ v2 環境から v3 を使用する方法: 3. v3 の事前学習済みモデル (s1v3.ckpt、s2Gv3.pth、models--nvidia--bigvgan_v2_24khz_100band_256x フォルダ) を[Huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) からダウンロードし、GPT_SoVITS/pretrained_models フォルダに配置します. - 追加: 音声超解像モデルについては、[ダウンロード方法](../../tools/AP_BWE_main/24kto48k/readme.txt)を参照してください. + 追加: 音声超解像モデルについては、[ダウンロード方法](../../tools/AP_BWE/24kto48k/readme.txt)を参照してください. ## V4 リリースノート diff --git a/docs/ko/README.md b/docs/ko/README.md index 9ff32f83..afd85d4d 100644 --- a/docs/ko/README.md +++ b/docs/ko/README.md @@ -19,14 +19,13 @@ [![Change Log](https://img.shields.io/badge/Change%20Log-View%20Updates-blue?style=for-the-badge&logo=googledocs&logoColor=white)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/docs/en/Changelog_EN.md) [![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge&logo=opensourceinitiative)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) - [**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](../ja/README.md) | **한국어** | [**Türkçe**](../tr/README.md) --- -## 기능: +## 기능 1. **제로샷 텍스트 음성 변환 (TTS):** 5초의 음성 샘플을 입력하면 즉시 텍스트를 음성으로 변환할 수 있습니다. @@ -40,7 +39,16 @@ 보지 못한 발화자의 퓨샷(few-shot) 파인튜닝 데모: -https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + + +## 추론 속도 + +| Device | RTF | Batch Size | Backend | +| ----------- | ----- | ---------- | --------------------------- | +| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph | +| Apple M4 | 0.21 | 1 | MLX Quantized Affined | +| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph | +| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph | **사용자 설명서: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)** @@ -54,8 +62,8 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- | Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | | Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | | Python 3.9 | PyTorch 2.8.0dev | CUDA 12.8 | -| Python 3.9 | PyTorch 2.5.1 | Apple silicon | -| Python 3.11 | PyTorch 2.7.0 | Apple silicon | +| Python 3.9 | PyTorch 2.5.1 | Apple Silicon | +| Python 3.11 | PyTorch 2.7.0 | Apple Silicon | | Python 3.9 | PyTorch 2.2.2 | CPU | ### Windows @@ -85,7 +93,7 @@ bash install.sh --device --source --source [--download-uvr5] +bash install.sh --device --source [--download-uvr5] ``` ### 수동 설치 @@ -106,13 +114,13 @@ pip install -r requirements.txt ```bash conda activate GPTSoVits -conda install ffmpeg +conda install ffmpeg=7 -c conda-forge ``` ##### Ubuntu/Debian 사용자 ```bash -sudo apt install ffmpeg +sudo apt install ffmpeg=7 sudo apt install libsox-dev ``` @@ -200,7 +208,7 @@ docker exec -it ``` -V1으로 전환하려면, - -```bash -python webui.py v1 <언어(옵션)> -``` - -또는 WebUI에서 수동으로 버전을 전환하십시오. - ### 미세 조정 #### 경로 자동 채우기가 지원됩니다 @@ -259,7 +259,7 @@ python webui.py v1 <언어(옵션)> #### 기타 ```bash -python GPT_SoVITS/inference_webui.py <언어(옵션)> +PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p ``` 또는 @@ -312,7 +312,7 @@ v2 환경에서 v3 사용하기: 3. v3 사전 훈련된 모델(s1v3.ckpt, s2Gv3.pth, 그리고 models--nvidia--bigvgan_v2_24khz_100band_256x 폴더)을 [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)에서 다운로드하여 `GPT_SoVITS/pretrained_models` 폴더에 넣습니다. - 추가: 오디오 슈퍼 해상도 모델에 대해서는 [다운로드 방법](../../tools/AP_BWE_main/24kto48k/readme.txt)을 참고하세요. + 추가: 오디오 슈퍼 해상도 모델에 대해서는 [다운로드 방법](../../tools/AP_BWE/24kto48k/readme.txt)을 참고하세요. ## V4 릴리스 노트 diff --git a/docs/tr/README.md b/docs/tr/README.md index b80c764b..51a090cd 100644 --- a/docs/tr/README.md +++ b/docs/tr/README.md @@ -25,7 +25,7 @@ Güçlü Birkaç Örnekli Ses Dönüştürme ve Metinden Konuşmaya Web Arayüz --- -## Özellikler: +## Özellikler 1. **Sıfır Örnekli Metinden Konuşmaya:** 5 saniyelik bir vokal örneği girin ve anında metinden konuşmaya dönüşümünü deneyimleyin. @@ -39,7 +39,16 @@ Güçlü Birkaç Örnekli Ses Dönüştürme ve Metinden Konuşmaya Web Arayüz Görünmeyen konuşmacılar birkaç örnekli ince ayar demosu: -https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + + +## çıkarım hızı + +| Device | RTF | Batch Size | Backend | +| ----------- | ----- | ---------- | --------------------------- | +| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph | +| Apple M4 | 0.21 | 1 | MLX Quantized Affined | +| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph | +| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph | **Kullanıcı Kılavuzu: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)** @@ -47,15 +56,13 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- ### Test Edilmiş Ortamlar -| Python Version | PyTorch Version | Device | -| -------------- | ---------------- | ------------- | -| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | -| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | -| Python 3.9 | PyTorch 2.8.0dev | CUDA 12.8 | -| Python 3.9 | PyTorch 2.5.1 | Apple silicon | -| Python 3.11 | PyTorch 2.7.0 | Apple silicon | -| Python 3.9 | PyTorch 2.2.2 | CPU | +| Python Version | PyTorch Version | Device | +| -------------- | --------------- | ------------- | +| Python 3.10 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.5.1 | CUDA 12.4 | +| Python 3.11 | PyTorch 2.7.0 | CUDA 12.8 | +| Python 3.11 | PyTorch 2.8.0 | Apple Silicon | +| Python 3.10 | PyTorch 2.8.0 | CPU | ### Windows @@ -84,7 +91,7 @@ Aşağıdaki komutları çalıştırarak programı yükleyin: ```bash conda create -n GPTSoVits python=3.10 conda activate GPTSoVits -bash install.sh --device --source [--download-uvr5] +bash install.sh --device --source [--download-uvr5] ``` ### El ile Yükleme @@ -105,13 +112,13 @@ pip install -r requirements.txt ```bash conda activate GPTSoVits -conda install ffmpeg +conda install ffmpeg=7 -c conda-forge ``` ##### Ubuntu/Debian Kullanıcıları ```bash -sudo apt install ffmpeg +sudo apt install ffmpeg=7 sudo apt install libsox-dev ``` @@ -232,14 +239,6 @@ V1'e geçmek istiyorsanız, `go-webui-v1.bat` dosyasına çift tıklayın veya ` python webui.py ``` -V1'e geçmek istiyorsanız, - -```bash -python webui.py v1 -``` - -veya WebUI'de manuel olarak sürüm değiştirin. - ### İnce Ayar #### Yol Otomatik Doldurma artık destekleniyor @@ -259,13 +258,13 @@ veya WebUI'de manuel olarak sürüm değiştirin. #### Diğerleri -```bash -python GPT_SoVITS/inference_webui.py +```text +PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p ``` VEYA -```bash +```text python webui.py ``` @@ -313,7 +312,7 @@ V2 ortamında V3 kullanımı: 3. [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) üzerinden v3 önceden eğitilmiş modellerini (s1v3.ckpt, s2Gv3.pth ve models--nvidia--bigvgan_v2_24khz_100band_256x klasörünü) indirin ve `GPT_SoVITS/pretrained_models` dizinine yerleştirin. - ek: Ses Süper Çözünürlük modeli için [nasıl indirileceği](../../tools/AP_BWE_main/24kto48k/readme.txt) hakkında bilgi alabilirsiniz. + ek: Ses Süper Çözünürlük modeli için [nasıl indirileceği](../../tools/AP_BWE/24kto48k/readme.txt) hakkında bilgi alabilirsiniz. ## V4 Sürüm Notları diff --git a/go-webui.bat b/go-webui.bat index c1c81089..6918cc5f 100644 --- a/go-webui.bat +++ b/go-webui.bat @@ -1,6 +1,7 @@ set "SCRIPT_DIR=%~dp0" set "SCRIPT_DIR=%SCRIPT_DIR:~0,-1%" cd /d "%SCRIPT_DIR%" -set "PATH=%SCRIPT_DIR%\runtime;%PATH%" +set "PATH=%SCRIPT_DIR%\runtime" +set "PYTHONPATH=%SCRIPT_DIR%" runtime\python.exe -I webui.py zh_CN pause diff --git a/go-webui.ps1 b/go-webui.ps1 index 09103425..42d336e3 100644 --- a/go-webui.ps1 +++ b/go-webui.ps1 @@ -2,6 +2,7 @@ $ErrorActionPreference = "SilentlyContinue" chcp 65001 Set-Location $PSScriptRoot $runtimePath = Join-Path $PSScriptRoot "runtime" -$env:PATH = "$runtimePath;$env:PATH" +$env:PATH = "$runtimePath" +$env:PYTHONPATH = "$runtimePath" & "$runtimePath\python.exe" -I "$PSScriptRoot\webui.py" zh_CN pause diff --git a/gpt-sovits_kaggle.ipynb b/gpt-sovits_kaggle.ipynb deleted file mode 100644 index 764c23c4..00000000 --- a/gpt-sovits_kaggle.ipynb +++ /dev/null @@ -1,243 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9fd922fb", - "metadata": {}, - "source": [ - "# Deprecated" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45857cb2", - "metadata": { - "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", - "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", - "execution": { - "iopub.execute_input": "2024-02-18T14:43:46.735480Z", - "iopub.status.busy": "2024-02-18T14:43:46.735183Z", - "iopub.status.idle": "2024-02-18T14:48:10.724175Z", - "shell.execute_reply": "2024-02-18T14:48:10.723059Z" - }, - "papermill": { - "duration": 263.994935, - "end_time": "2024-02-18T14:48:10.726613", - "exception": false, - "start_time": "2024-02-18T14:43:46.731678", - "status": "completed" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "!git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n", - "%cd GPT-SoVITS\n", - "!apt-get update && apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && git lfs install\n", - "!pip install -r requirements.txt\n", - "!pip install -r extra-req.txt --no-deps" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b9d346b4", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-18T14:48:10.815802Z", - "iopub.status.busy": "2024-02-18T14:48:10.814899Z", - "iopub.status.idle": "2024-02-18T14:50:31.253276Z", - "shell.execute_reply": "2024-02-18T14:50:31.252024Z" - }, - "papermill": { - "duration": 140.484893, - "end_time": "2024-02-18T14:50:31.255720", - "exception": false, - "start_time": "2024-02-18T14:48:10.770827", - "status": "completed" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "# @title Download pretrained models 下载预训练模型\n", - "!mkdir -p /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", - "!mkdir -p /kaggle/working/GPT-SoVITS/tools/asr/models\n", - "!mkdir -p /kaggle/working/GPT-SoVITS/tools/uvr5\n", - "%cd /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", - "!git clone https://huggingface.co/lj1995/GPT-SoVITS\n", - "%cd /kaggle/working/GPT-SoVITS/tools/asr/models\n", - "!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n", - "!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n", - "!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n", - "# # @title UVR5 pretrains 安装uvr5模型\n", - "%cd /kaggle/working/GPT-SoVITS/tools/uvr5\n", - "!git clone https://huggingface.co/Delik/uvr5_weights\n", - "!git config core.sparseCheckout true\n", - "!mv /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea94d245", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-18T14:29:01.071549Z", - "iopub.status.busy": "2024-02-18T14:29:01.070592Z", - "iopub.status.idle": "2024-02-18T14:40:45.318368Z", - "shell.execute_reply": "2024-02-18T14:40:45.317130Z", - "shell.execute_reply.started": "2024-02-18T14:29:01.071512Z" - }, - "papermill": { - "duration": null, - "end_time": null, - "exception": false, - "start_time": "2024-02-18T14:50:31.309013", - "status": "running" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "# @title launch WebUI 启动WebUI\n", - "%cd /kaggle/working/GPT-SoVITS/\n", - "!npm install -g localtunnel\n", - "import subprocess\n", - "import threading\n", - "import time\n", - "import socket\n", - "import urllib.request\n", - "\n", - "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex((\"127.0.0.1\", port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - "\n", - " from colorama import Fore, Style\n", - " print(\n", - " Fore.GREEN + \"\\nIP: \",\n", - " Fore.RED,\n", - " urllib.request.urlopen(\"https://ipv4.icanhazip.com\").read().decode(\"utf8\").strip(\"\\n\"),\n", - " \"\\n\",\n", - " Style.RESET_ALL,\n", - " )\n", - " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", - " for line in p.stdout:\n", - " print(line.decode(), end=\"\")\n", - "\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(9874,)).start()\n", - "\n", - "!python webui.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dda88a6d", - "metadata": { - "execution": { - "iopub.execute_input": "2024-02-18T14:40:56.880608Z", - "iopub.status.busy": "2024-02-18T14:40:56.879879Z" - }, - "papermill": { - "duration": null, - "end_time": null, - "exception": null, - "start_time": null, - "status": "pending" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "# 开启推理页面\n", - "%cd /kaggle/working/GPT-SoVITS/\n", - "!npm install -g localtunnel\n", - "import threading\n", - "\n", - "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex((\"127.0.0.1\", port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - "\n", - " from colorama import Fore, Style\n", - " print(\n", - " Fore.GREEN + \"\\nIP: \",\n", - " Fore.RED,\n", - " urllib.request.urlopen(\"https://ipv4.icanhazip.com\").read().decode(\"utf8\").strip(\"\\n\"),\n", - " \"\\n\",\n", - " Style.RESET_ALL,\n", - " )\n", - " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", - " for line in p.stdout:\n", - " print(line.decode(), end=\"\")\n", - "\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(9872,)).start()\n", - "\n", - "!python ./GPT_SoVITS/inference_webui.py" - ] - } - ], - "metadata": { - "kaggle": { - "accelerator": "nvidiaTeslaT4", - "dataSources": [ - { - "datasetId": 4459328, - "sourceId": 7649639, - "sourceType": "datasetVersion" - } - ], - "dockerImageVersionId": 30646, - "isGpuEnabled": true, - "isInternetEnabled": true, - "language": "python", - "sourceType": "notebook" - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - }, - "papermill": { - "default_parameters": {}, - "duration": null, - "end_time": null, - "environment_variables": {}, - "exception": null, - "input_path": "__notebook__.ipynb", - "output_path": "__notebook__.ipynb", - "parameters": {}, - "start_time": "2024-02-18T14:43:44.011910", - "version": "2.5.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/install.ps1 b/install.ps1 index 9c33ace8..9aaf700f 100644 --- a/install.ps1 +++ b/install.ps1 @@ -40,11 +40,21 @@ function Write-Info($msg) { Write-Host "[INFO]:" -ForegroundColor Green -NoNewline Write-Host " $msg" } +function Write-Warning($msg) { + Write-Host "[Warning]:" -ForegroundColor Yellow -NoNewline + Write-Host " $msg" +} function Write-Success($msg) { Write-Host "[SUCCESS]:" -ForegroundColor Blue -NoNewline Write-Host " $msg" } +python -c "import sys; sys.exit(0 if sys.version_info >= (3,10) else 1)" +if ($LASTEXITCODE -ne 0) { + Write-Error "Python version < 3.10" + exit 1 +} + function Invoke-Conda { param ( @@ -137,7 +147,7 @@ chcp 65001 Set-Location $PSScriptRoot Write-Info "Installing FFmpeg & CMake..." -Invoke-Conda ffmpeg cmake +Invoke-Conda ffmpeg=7 cmake vc14_runtime Write-Success "FFmpeg & CMake Installed" $PretrainedURL = "" @@ -208,12 +218,30 @@ if ($DownloadUVR5) { switch ($Device) { "CU128" { + $cudaLine = nvidia-smi | Select-String "CUDA Version" + $version = ($cudaLine -split "CUDA Version:")[1].Trim() + Write-Info "Maximum CUDA Version Supported By Current Driver: $version" + if ([version](nvidia-smi | Select-String "CUDA Version" | ForEach-Object { ($_ -split "CUDA Version:")[1].Trim() }) -ge [version]"12.8") { + Write-Warning "CUDA 12.8 Is Not Supported By Current Driver" + } Write-Info "Installing PyTorch For CUDA 12.8..." Invoke-Pip torch torchaudio --index-url "https://download.pytorch.org/whl/cu128" + Invoke-Conda cuda-nvcc=12.8 + Invoke-Pip psutil ninja packaging wheel "setuptools>=42" + Invoke-Pip flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation } "CU126" { + $cudaLine = nvidia-smi | Select-String "CUDA Version" + $version = ($cudaLine -split "CUDA Version:")[1].Trim() + Write-Info "Maximum CUDA Version Supported By Current Driver: $version" + if ([version](nvidia-smi | Select-String "CUDA Version" | ForEach-Object { ($_ -split "CUDA Version:")[1].Trim() }) -ge [version]"12.8") { + Write-Warning "CUDA 12.6 Is Not Supported By Current Driver" + } Write-Info "Installing PyTorch For CUDA 12.6..." Invoke-Pip torch torchaudio --index-url "https://download.pytorch.org/whl/cu126" + Invoke-Conda cuda-nvcc=12.6 + Invoke-Pip psutil ninja packaging wheel "setuptools>=42" + Invoke-Pip flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation } "CPU" { Write-Info "Installing PyTorch For CPU..." @@ -223,6 +251,7 @@ switch ($Device) { Write-Success "PyTorch Installed" Write-Info "Installing Python Dependencies From requirements.txt..." +Invoke-Pip --pre torchcodec --index-url https://download.pytorch.org/whl/nightly/cpu Invoke-Pip -r extra-req.txt --no-deps Invoke-Pip -r requirements.txt Write-Success "Python Dependencies Installed" diff --git a/install.sh b/install.sh index 7d80ec28..96214d41 100644 --- a/install.sh +++ b/install.sh @@ -34,7 +34,7 @@ on_error() { run_conda_quiet() { local output output=$(conda install --yes --quiet -c conda-forge "$@" 2>&1) || { - echo -e "${ERROR} Conda install failed:\n$output" + echo -e "${ERROR}Conda install failed:\n$output" exit 1 } } @@ -42,7 +42,7 @@ run_conda_quiet() { run_pip_quiet() { local output output=$(pip install "$@" 2>&1) || { - echo -e "${ERROR} Pip install failed:\n$output" + echo -e "${ERROR}Pip install failed:\n$output" exit 1 } } @@ -51,7 +51,7 @@ run_wget_quiet() { if wget --tries=25 --wait=5 --read-timeout=40 -q --show-progress "$@" 2>&1; then tput cuu1 && tput el else - echo -e "${ERROR} Wget failed" + echo -e "${ERROR}Wget failed" exit 1 fi } @@ -75,16 +75,21 @@ print_help() { echo "Usage: bash install.sh [OPTIONS]" echo "" echo "Options:" - echo " --device CU126|CU128|ROCM|MPS|CPU Specify the Device (REQUIRED)" + echo " --device CU126|CU128|ROCM|MLX|CPU Specify the Device (REQUIRED)" echo " --source HF|HF-Mirror|ModelScope Specify the model source (REQUIRED)" echo " --download-uvr5 Enable downloading the UVR5 model" echo " -h, --help Show this help message and exit" echo "" echo "Examples:" echo " bash install.sh --device CU128 --source HF --download-uvr5" - echo " bash install.sh --device MPS --source ModelScope" + echo " bash install.sh --device MLX --source ModelScope" } +if ! python3 -c "import sys; sys.exit(0 if sys.version_info >= (3,10) else 1)"; then + echo -e "${ERROR}Python version < 3.10" + exit 1 +fi + # Show help if no arguments provided if [[ $# -eq 0 ]]; then print_help @@ -106,7 +111,7 @@ while [[ $# -gt 0 ]]; do USE_MODELSCOPE=true ;; *) - echo -e "${ERROR}Error: Invalid Download Source: $2" + echo -e "${ERROR}Invalid Download Source: $2" echo -e "${ERROR}Choose From: [HF, HF-Mirror, ModelScope]" exit 1 ;; @@ -126,15 +131,15 @@ while [[ $# -gt 0 ]]; do ROCM) USE_ROCM=true ;; - MPS) - USE_CPU=true + MLX) + USE_MLX=true ;; CPU) USE_CPU=true ;; *) - echo -e "${ERROR}Error: Invalid Device: $2" - echo -e "${ERROR}Choose From: [CU126, CU128, ROCM, MPS, CPU]" + echo -e "${ERROR}Invalid Device: $2" + echo -e "${ERROR}Choose From: [CU126, CU128, ROCM, MLX, CPU]" exit 1 ;; esac @@ -157,15 +162,15 @@ while [[ $# -gt 0 ]]; do esac done -if ! $USE_CUDA && ! $USE_ROCM && ! $USE_CPU; then - echo -e "${ERROR}Error: Device is REQUIRED" +if ! $USE_CUDA && ! $USE_ROCM && ! $USE_MLX && ! $USE_CPU; then + echo -e "${ERROR}Device is REQUIRED" echo "" print_help exit 1 fi if ! $USE_HF && ! $USE_HF_MIRROR && ! $USE_MODELSCOPE; then - echo -e "${ERROR}Error: Download Source is REQUIRED" + echo -e "${ERROR}Download Source is REQUIRED" echo "" print_help exit 1 @@ -215,14 +220,14 @@ else else XCODE_PATH=$(xcode-select -p) if [[ "$XCODE_PATH" == *"Xcode.app"* ]]; then - echo -e "${WARNING} Detected Xcode path: $XCODE_PATH" - echo -e "${WARNING} If your Xcode version does not match your macOS version, it may cause unexpected issues during compilation or package builds." + echo -e "${WARNING}Detected Xcode path: $XCODE_PATH" + echo -e "${WARNING}If your Xcode version does not match your macOS version, it may cause unexpected issues during compilation or package builds." fi fi fi echo -e "${INFO}Installing FFmpeg & CMake..." -run_conda_quiet ffmpeg cmake make +run_conda_quiet ffmpeg=7 cmake make echo -e "${SUCCESS}FFmpeg & CMake Installed" echo -e "${INFO}Installing unzip..." @@ -296,7 +301,7 @@ fi if [ "$USE_CUDA" = true ] && [ "$WORKFLOW" = false ]; then echo -e "${INFO}Checking For Nvidia Driver Installation..." if command -v nvidia-smi &>/dev/null; then - echo "${INFO}Nvidia Driver Founded" + echo -e "${INFO}Nvidia Driver Founded" else echo -e "${WARNING}Nvidia Driver Not Found, Fallback to CPU" USE_CUDA=false @@ -322,13 +327,29 @@ if [ "$USE_ROCM" = true ] && [ "$WORKFLOW" = false ]; then fi if [ "$USE_CUDA" = true ] && [ "$WORKFLOW" = false ]; then + CUDAVERSION=$(nvidia-smi | grep "CUDA Version" | sed -E 's/.*CUDA Version: ([0-9]+\.[0-9]+).*/\1/') + echo -e "${INFO}Maximum CUDA Version Supported By Current Driver: $CUDAVERSION" if [ "$CUDA" = 128 ]; then + if awk "BEGIN {exit !($CUDAVERSION < 12.8)}"; then + echo -r "${WARNING}CUDA 12.8 Is Not Supported By Current Driver" + fi echo -e "${INFO}Installing PyTorch For CUDA 12.8..." run_pip_quiet torch torchaudio --index-url "https://download.pytorch.org/whl/cu128" + run_conda_quiet cuda-nvcc=12.8 elif [ "$CUDA" = 126 ]; then + if awk "BEGIN {exit !($CUDAVERSION < 12.6)}"; then + echo -r "${WARNING}CUDA 12.6 Is Not Supported By Current Driver" + fi echo -e "${INFO}Installing PyTorch For CUDA 12.6..." run_pip_quiet torch torchaudio --index-url "https://download.pytorch.org/whl/cu126" + run_conda_quiet cuda-nvcc=12.6 fi + run_pip_quiet psutil ninja packaging wheel "setuptools>=42" + run_pip_quiet flash-attn -i https://xxxxrt666.github.io/PIP-Index/ --no-build-isolation +elif [ "$USE_MLX" = true ] && [ "$WORKFLOW" = false ]; then + echo -e "${INFO}Installing MLX & PyTorch For MPS..." + run_pip_quiet torch torchaudio --index-url "https://download.pytorch.org/whl/cpu" + run_pip_quiet mlx elif [ "$USE_ROCM" = true ] && [ "$WORKFLOW" = false ]; then echo -e "${INFO}Installing PyTorch For ROCm 6.2..." run_pip_quiet torch torchaudio --index-url "https://download.pytorch.org/whl/rocm6.2" @@ -345,6 +366,8 @@ echo -e "${INFO}Installing Python Dependencies From requirements.txt..." hash -r +run_pip_quiet torchcodec + run_pip_quiet -r extra-req.txt --no-deps run_pip_quiet -r requirements.txt diff --git a/requirements.txt b/requirements.txt index 90e4957d..14a6b95a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,45 +1,39 @@ --no-binary=opencc -numpy<2.0 -scipy -tensorboard -librosa==0.10.2 -numba -pytorch-lightning>=2.4 -gradio<5 + +cn2an ffmpeg-python +g2pk2 +g2p_en +jieba_fast +kernels +ko_pron +modelscope +opencc +peft +py-cpuinfo +pypinyin +split-lang +torchcodec +transformers +tensorboard +ToJyutping +wordsegment +x_transformers + onnxruntime; platform_machine == "aarch64" or platform_machine == "arm64" onnxruntime-gpu; platform_machine == "x86_64" or platform_machine == "AMD64" -tqdm -funasr==1.0.27 -cn2an -pypinyin -pyopenjtalk>=0.4.1 -g2p_en -torchaudio -modelscope==1.10.0 -sentencepiece -transformers>=4.43,<=4.50 -peft -chardet -PyYAML -psutil -jieba_fast -jieba -split-lang -fast_langdetect>=0.3.1 -wordsegment -rotary_embedding_torch -ToJyutping -g2pk2 -ko_pron -opencc python_mecab_ko; sys_platform != 'win32' -fastapi[standard]>=0.115.2 -x_transformers -torchmetrics<=1.5 -pydantic<=2.10.6 -ctranslate2>=4.0,<5 -huggingface_hub>=0.13 -tokenizers>=0.13,<1 + av>=11 -tqdm +ctranslate2>=4.0,<5 +fastapi[standard]>=0.115.2 +fast_langdetect>=0.3.1 +funasr==1.0.27 +gradio==5.25.0 +librosa==0.10.2 +matplotlib>=3.10 +numpy<2.0 +pydantic<=2.10.6 +pyopenjtalk>=0.4.1 +pytorch-lightning>=2.4 +torchmetrics<=1.5 \ No newline at end of file diff --git a/tools/AP_BWE_main/24kto48k/readme.txt b/tools/AP_BWE/24kto48k/readme.txt similarity index 100% rename from tools/AP_BWE_main/24kto48k/readme.txt rename to tools/AP_BWE/24kto48k/readme.txt diff --git a/tools/AP_BWE_main/LICENSE b/tools/AP_BWE/LICENSE similarity index 100% rename from tools/AP_BWE_main/LICENSE rename to tools/AP_BWE/LICENSE diff --git a/tools/AP_BWE_main/README.md b/tools/AP_BWE/README.md similarity index 100% rename from tools/AP_BWE_main/README.md rename to tools/AP_BWE/README.md diff --git a/tools/AP_BWE_main/datasets1/__init__.py b/tools/AP_BWE/datasets1/__init__.py similarity index 100% rename from tools/AP_BWE_main/datasets1/__init__.py rename to tools/AP_BWE/datasets1/__init__.py diff --git a/tools/AP_BWE/datasets1/dataset.py b/tools/AP_BWE/datasets1/dataset.py new file mode 100644 index 00000000..6cf85e68 --- /dev/null +++ b/tools/AP_BWE/datasets1/dataset.py @@ -0,0 +1,31 @@ +import torch + + +def amp_pha_stft(audio, n_fft, hop_size, win_size, center=True): + hann_window = torch.hann_window(win_size).to(audio.device) + stft_spec = torch.stft( + audio, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window, + center=center, + pad_mode="reflect", + normalized=False, + return_complex=True, + ) + log_amp = torch.log(torch.abs(stft_spec) + 1e-4) + pha = torch.angle(stft_spec) + + com = torch.stack((torch.exp(log_amp) * torch.cos(pha), torch.exp(log_amp) * torch.sin(pha)), dim=-1) + + return log_amp, pha, com + + +def amp_pha_istft(log_amp, pha, n_fft, hop_size, win_size, center=True): + amp = torch.exp(log_amp) + com = torch.complex(amp * torch.cos(pha), amp * torch.sin(pha)) + hann_window = torch.hann_window(win_size).to(com.device) + audio = torch.istft(com, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center) + + return audio diff --git a/tools/AP_BWE_main/models/__init__.py b/tools/AP_BWE/models/__init__.py similarity index 100% rename from tools/AP_BWE_main/models/__init__.py rename to tools/AP_BWE/models/__init__.py diff --git a/tools/AP_BWE_main/models/model.py b/tools/AP_BWE/models/model.py similarity index 98% rename from tools/AP_BWE_main/models/model.py rename to tools/AP_BWE/models/model.py index e5386001..51f7aa2f 100644 --- a/tools/AP_BWE_main/models/model.py +++ b/tools/AP_BWE/models/model.py @@ -1,7 +1,11 @@ +from typing import List, Tuple + +import numpy as np import torch -import torch.nn.functional as F import torch.nn as nn -from torch.nn.utils import weight_norm, spectral_norm +import torch.nn.functional as F +from torch.nn.utils import spectral_norm +from torch.nn.utils import weight_norm # from utils import init_weights, get_padding @@ -15,9 +19,6 @@ def init_weights(m, mean=0.0, std=0.01): m.weight.data.normal_(mean, std) -import numpy as np -from typing import Tuple, List - LRELU_SLOPE = 0.1 @@ -148,7 +149,7 @@ class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm + norm_f = weight_norm if use_spectral_norm is False else spectral_norm self.convs = nn.ModuleList( [ norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), diff --git a/tools/AP_BWE_main/datasets1/dataset.py b/tools/AP_BWE_main/datasets1/dataset.py deleted file mode 100644 index 40f993b5..00000000 --- a/tools/AP_BWE_main/datasets1/dataset.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import random -import torch -import torchaudio -import torch.utils.data -import torchaudio.functional as aF - - -def amp_pha_stft(audio, n_fft, hop_size, win_size, center=True): - hann_window = torch.hann_window(win_size).to(audio.device) - stft_spec = torch.stft( - audio, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window, - center=center, - pad_mode="reflect", - normalized=False, - return_complex=True, - ) - log_amp = torch.log(torch.abs(stft_spec) + 1e-4) - pha = torch.angle(stft_spec) - - com = torch.stack((torch.exp(log_amp) * torch.cos(pha), torch.exp(log_amp) * torch.sin(pha)), dim=-1) - - return log_amp, pha, com - - -def amp_pha_istft(log_amp, pha, n_fft, hop_size, win_size, center=True): - amp = torch.exp(log_amp) - com = torch.complex(amp * torch.cos(pha), amp * torch.sin(pha)) - hann_window = torch.hann_window(win_size).to(com.device) - audio = torch.istft(com, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center) - - return audio - - -def get_dataset_filelist(a): - with open(a.input_training_file, "r", encoding="utf-8") as fi: - training_indexes = [x.split("|")[0] for x in fi.read().split("\n") if len(x) > 0] - - with open(a.input_validation_file, "r", encoding="utf-8") as fi: - validation_indexes = [x.split("|")[0] for x in fi.read().split("\n") if len(x) > 0] - - return training_indexes, validation_indexes - - -class Dataset(torch.utils.data.Dataset): - def __init__( - self, - training_indexes, - wavs_dir, - segment_size, - hr_sampling_rate, - lr_sampling_rate, - split=True, - shuffle=True, - n_cache_reuse=1, - device=None, - ): - self.audio_indexes = training_indexes - random.seed(1234) - if shuffle: - random.shuffle(self.audio_indexes) - self.wavs_dir = wavs_dir - self.segment_size = segment_size - self.hr_sampling_rate = hr_sampling_rate - self.lr_sampling_rate = lr_sampling_rate - self.split = split - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - - def __getitem__(self, index): - filename = self.audio_indexes[index] - if self._cache_ref_count == 0: - audio, orig_sampling_rate = torchaudio.load(os.path.join(self.wavs_dir, filename + ".wav")) - self.cached_wav = audio - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - if orig_sampling_rate == self.hr_sampling_rate: - audio_hr = audio - else: - audio_hr = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.hr_sampling_rate) - - audio_lr = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.lr_sampling_rate) - audio_lr = aF.resample(audio_lr, orig_freq=self.lr_sampling_rate, new_freq=self.hr_sampling_rate) - audio_lr = audio_lr[:, : audio_hr.size(1)] - - if self.split: - if audio_hr.size(1) >= self.segment_size: - max_audio_start = audio_hr.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio_hr = audio_hr[:, audio_start : audio_start + self.segment_size] - audio_lr = audio_lr[:, audio_start : audio_start + self.segment_size] - else: - audio_hr = torch.nn.functional.pad(audio_hr, (0, self.segment_size - audio_hr.size(1)), "constant") - audio_lr = torch.nn.functional.pad(audio_lr, (0, self.segment_size - audio_lr.size(1)), "constant") - - return (audio_hr.squeeze(), audio_lr.squeeze()) - - def __len__(self): - return len(self.audio_indexes) diff --git a/tools/__init__.py b/tools/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tools/assets.py b/tools/assets.py index b2c302fe..de1b8557 100644 --- a/tools/assets.py +++ b/tools/assets.py @@ -51,7 +51,7 @@ footer * { top_html = """
-
{}
+
{}
@@ -60,9 +60,9 @@ top_html = """ - + - + diff --git a/tools/audio_sr.py b/tools/audio_sr.py index 58df6d20..3f9f88ae 100644 --- a/tools/audio_sr.py +++ b/tools/audio_sr.py @@ -1,24 +1,23 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import sys + +import json import os -AP_BWE_main_dir_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "AP_BWE_main") -sys.path.append(AP_BWE_main_dir_path) -import json import torch -import torchaudio.functional as aF -# from attrdict import AttrDict####will be bug in py3.10 +import torchaudio -from datasets1.dataset import amp_pha_stft, amp_pha_istft -from models.model import APNet_BWE_Model +from tools.AP_BWE.datasets1.dataset import amp_pha_istft, amp_pha_stft +from tools.AP_BWE.models.model import APNet_BWE_Model + +AP_BWE_dir_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "AP_BWE") class AP_BWE: def __init__(self, device, DictToAttrRecursive, checkpoint_file=None): - if checkpoint_file == None: - checkpoint_file = "%s/24kto48k/g_24kto48k.zip" % (AP_BWE_main_dir_path) - if os.path.exists(checkpoint_file) == False: - raise FileNotFoundError + if checkpoint_file is None: + checkpoint_file = "{AP_BWE_dir_path}/24kto48k/g_24kto48k.zip" + if os.path.exists(checkpoint_file) is False: + raise FileNotFoundError() config_file = os.path.join(os.path.split(checkpoint_file)[0], "config.json") with open(config_file) as f: data = f.read() @@ -39,12 +38,8 @@ class AP_BWE: return self def __call__(self, audio, orig_sampling_rate): - with torch.no_grad(): - # audio, orig_sampling_rate = torchaudio.load(inp_path) - # audio = audio.to(self.device) - audio = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.h.hr_sampling_rate) - amp_nb, pha_nb, com_nb = amp_pha_stft(audio, self.h.n_fft, self.h.hop_size, self.h.win_size) - amp_wb_g, pha_wb_g, com_wb_g = self.model(amp_nb, pha_nb) - audio_hr_g = amp_pha_istft(amp_wb_g, pha_wb_g, self.h.n_fft, self.h.hop_size, self.h.win_size) - # sf.write(opt_path, audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate, 'PCM_16') - return audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate + audio = torchaudio.functional.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.h.hr_sampling_rate) + amp_nb, pha_nb, com_nb = amp_pha_stft(audio, self.h.n_fft, self.h.hop_size, self.h.win_size) + amp_wb_g, pha_wb_g, com_wb_g = self.model(amp_nb, pha_nb) + audio_hr_g = amp_pha_istft(amp_wb_g, pha_wb_g, self.h.n_fft, self.h.hop_size, self.h.win_size) + return audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate diff --git a/tools/i18n/i18n.py b/tools/i18n/i18n.py index 4cd123f3..815e7db4 100644 --- a/tools/i18n/i18n.py +++ b/tools/i18n/i18n.py @@ -2,7 +2,7 @@ import json import locale import os -I18N_JSON_DIR: os.PathLike = os.path.join(os.path.dirname(os.path.relpath(__file__)), "locale") +I18N_JSON_DIR: str = os.path.join(os.path.dirname(os.path.relpath(__file__)), "locale") def load_language_list(language): @@ -29,7 +29,7 @@ class I18nAuto: self.language = language self.language_map = load_language_list(language) - def __call__(self, key): + def __call__(self, key: str) -> str: return self.language_map.get(key, key) def __repr__(self): diff --git a/tools/i18n/locale/en_US.json b/tools/i18n/locale/en_US.json index 24d24de4..3aa8676b 100644 --- a/tools/i18n/locale/en_US.json +++ b/tools/i18n/locale/en_US.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb): Best choice for dual-channel reverberation, cannot remove single-channel reverberation;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverberation, can remove mono reverberation, but does not clean heavily high-frequency plate reverberation.", - "*实验/模型名": "*Experiment/model name", - "*文本标注文件": "*Text labelling file", - "*训练集音频文件目录": "*Audio dataset folder", - "*请上传并填写参考信息": "*Please upload and fill reference information", - "*请填写需要合成的目标文本和语种模式": "*Please fill in the target text and language mode for synthesis", + "实验/模型名": "Experiment/model name", + "文本标注文件": "Text labelling file", + "训练集音频文件目录": "Audio dataset folder", + "请上传并填写参考信息": "Please upload and fill reference information", + "请填写需要合成的目标文本和语种模式": "Please fill in the target text and language mode for synthesis", ".限制范围越小判别效果越好。": "Less Multilingual is better", "1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. The DeEcho-DeReverb model's processing time is nearly twice that of the other two DeEcho models.", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT Training: Model Weights saved in GPT_weights/", "GPT模型列表": "GPT weight list", "GPT训练": "GPT Training", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT sampling parameters (not too low when there's no reference text. Use default if unsure):", + "GPT采样参数(不懂就用默认):": "GPT sampling parameters", "GPU卡号,只能填1个整数": "GPU number, can only input ONE integer", "GPU卡号以-分割,每个卡号一个进程": "GPU number is separated by -, each GPU will run one process ", "LoRA秩": "LoRA Rank", @@ -89,7 +89,7 @@ "参考音频在3~10秒范围外,请更换!": "Reference audio is outside the 3-10 second range, please choose another one!", "参考音频的文本": "Text for reference audio", "参考音频的语种": "Language for reference audio", - "句间停顿秒数": "Pause Duration between Sentences (Seconds)", + "句间停顿秒数": "Sentence Pause Duration", "可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。": "Optional: Upload multiple reference audio files by dragging and dropping them (recommended to be of the same gender), and average their tone. If this option is left blank, the tone will be controlled by the single reference audio on the left. If fine-tuning the model, it is recommended that all reference audio files have tones within the fine-tuning training set; the pretrained model can be ignored.", "合成语音": "Start inference", "合成音频": "Synthesize Audio", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Save only the latest weight file to save disk space", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:", "是否开启DPO训练选项(实验性)": "Enable DPO Training (Experimental)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Adjust the speech rate and tone of the last synthesis result to prevent randomness.", + "是否直接对上次合成结果调整语速和音色": "Adjust the speech rate and tone of the last synthesis result", "显卡信息": "GPU Information", "未下载模型": "Model Not Downloaded", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.", @@ -222,5 +222,6 @@ "预训练SoVITS-D模型路径": "Pretrained SoVITS-D Model Path", "预训练SoVITS-G模型路径": "Pretrained SoVITS-G Model Path", "预训练中文BERT模型路径": "Pretrained Chinese BERT Model Path", - "预训练模型路径": "Pretrained Model Path" + "预训练模型路径": "Pretrained Model Path", + "推理后端": "Inference Backend" } diff --git a/tools/i18n/locale/es_ES.json b/tools/i18n/locale/es_ES.json index 18c8af99..d60ee915 100644 --- a/tools/i18n/locale/es_ES.json +++ b/tools/i18n/locale/es_ES.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): reverberación estéreo, la mejor opción; no puede eliminar reverberación mono", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Eliminar el efecto de retardo. Aggressive elimina más que Normal, DeReverb elimina reverberación adicional, puede eliminar reverberación mono, pero no limpia bien la reverberación de placa de alta frecuencia", - "*实验/模型名": "*Nombre del experimento/modelo", - "*文本标注文件": "*Archivo de etiquetado de texto", - "*训练集音频文件目录": "*Directorio de archivos de audio de entrenamiento", - "*请上传并填写参考信息": "*Por favor, suba y complete la información de referencia", - "*请填写需要合成的目标文本和语种模式": "*Por favor, complete el texto objetivo a sintetizar y el modo de idioma", + "实验/模型名": "Nombre del experimento/modelo", + "文本标注文件": "Archivo de etiquetado de texto", + "训练集音频文件目录": "Directorio de archivos de audio de entrenamiento", + "请上传并填写参考信息": "Por favor, suba y complete la información de referencia", + "请填写需要合成的目标文本和语种模式": "Por favor, complete el texto objetivo a sintetizar y el modo de idioma", ".限制范围越小判别效果越好。": ".Cuanto más pequeño sea el rango, mejor será el efecto de discriminación.", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. El modelo DeEcho-DeReverb tarda casi el doble que los otros dos modelos DeEcho", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "Entrenamiento de GPT: los archivos de pesos del modelo están en GPT_weights/", "GPT模型列表": "Lista de modelos GPT", "GPT训练": "Entrenamiento de GPT", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "Parámetros de muestreo de GPT (no demasiado bajos cuando no hay texto de referencia. Use los valores por defecto si no está seguro):", + "GPT采样参数(不懂就用默认):": "Parámetros de muestreo de GPT (no demasiado bajos cuando no hay texto de referencia. Use los valores por defecto si no está seguro):", "GPU卡号,只能填1个整数": "Número de tarjeta GPU, solo se puede ingresar un número entero", "GPU卡号以-分割,每个卡号一个进程": "Número de tarjeta GPU separado por '-', cada número de tarjeta es un proceso", "LoRA秩": "Rango de LoRA", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "¿Guardar solo el último archivo de pesos más reciente para ahorrar espacio en disco?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el modelo final pequeño en la carpeta de pesos en cada punto de guardado?", "是否开启DPO训练选项(实验性)": "¿Habilitar la opción de entrenamiento dpo (experimental)?", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "¿Ajustar directamente la velocidad del habla y el tono del último resultado de síntesis? Para prevenir la aleatoriedad.", + "是否直接对上次合成结果调整语速和音色": "¿Ajustar directamente la velocidad del habla y el tono del último resultado de síntesis? Para prevenir la aleatoriedad.", "显卡信息": "Información de la tarjeta gráfica", "未下载模型": "Modelo no descargado", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad.", diff --git a/tools/i18n/locale/fr_FR.json b/tools/i18n/locale/fr_FR.json index 1d55a56b..dccf7c96 100644 --- a/tools/i18n/locale/fr_FR.json +++ b/tools/i18n/locale/fr_FR.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1) MDX-Net (onnx_dereverb) : C'est le meilleur choix pour la réverbération à deux canaux, mais il ne peut pas éliminer la réverbération à un seul canal;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho : Supprime les effets de délai. Aggressive est plus exhaustif que Normal dans la suppression, DeReverb élimine également la réverbération, peut supprimer la réverbération monocanal, mais n'élimine pas complètement la réverbération de plaque à haute fréquence.", - "*实验/模型名": "*Nom de l'expérience/modèle", - "*文本标注文件": "*Fichier d'annotation de texte", - "*训练集音频文件目录": "*Répertoire des fichiers audio d'entraînement", - "*请上传并填写参考信息": "*Veuillez télécharger et remplir les informations de référence", - "*请填写需要合成的目标文本和语种模式": "*Veuillez saisir le texte cible à synthétiser et le mode de langue.", + "实验/模型名": "Nom de l'expérience/modèle", + "文本标注文件": "Fichier d'annotation de texte", + "训练集音频文件目录": "Répertoire des fichiers audio d'entraînement", + "请上传并填写参考信息": "Veuillez télécharger et remplir les informations de référence", + "请填写需要合成的目标文本和语种模式": "Veuillez saisir le texte cible à synthétiser et le mode de langue.", ".限制范围越小判别效果越好。": "Moins il y a de langues, mieux c'est", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Le temps de traitement du modèle DeEcho-DeReverb est presque le double de celui des deux autres modèles DeEcho;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "Entraînement GPT : les poids du modèle sont dans GPT_weights/", "GPT模型列表": "Liste des modèles GPT", "GPT训练": "Entraînement GPT", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "Paramètres d'échantillonnage de GPT (ne pas mettre trop bas lorsqu'il n'y a pas de texte de référence. Utilisez les valeurs par défaut si vous n'êtes pas sûr):", + "GPT采样参数(不懂就用默认):": "Paramètres d'échantillonnage de GPT (ne pas mettre trop bas lorsqu'il n'y a pas de texte de référence. Utilisez les valeurs par défaut si vous n'êtes pas sûr):", "GPU卡号,只能填1个整数": "Numéro de carte GPU, ne peut contenir qu'un seul entier", "GPU卡号以-分割,每个卡号一个进程": "Numéro de carte GPU séparé par des tirets, un processus par numéro de carte", "LoRA秩": "Rang LoRA", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Faut-il ne conserver que les derniers fichiers de poids pour économiser de l'espace disque ?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Sauvegarder le petit modèle final dans le dossier weights à chaque point de sauvegarde", "是否开启DPO训练选项(实验性)": "Activer l'option d'entraînement DPO (expérimental) ?", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Ajuster la vitesse de parole et la tonalité du dernier résultat de synthèse pour prévenir l'aléatoire.", + "是否直接对上次合成结果调整语速和音色": "Ajuster la vitesse de parole et la tonalité du dernier résultat de synthèse pour prévenir l'aléatoire.", "显卡信息": "Informations sur la carte graphique", "未下载模型": "Modèle non téléchargé", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Ce logiciel est open-source sous licence MIT. L'auteur n'exerce aucun contrôle sur le logiciel. L'utilisateur et toute personne diffusant les sorties audio générées sont entièrement responsables.", diff --git a/tools/i18n/locale/it_IT.json b/tools/i18n/locale/it_IT.json index 1c236475..b3d99e64 100644 --- a/tools/i18n/locale/it_IT.json +++ b/tools/i18n/locale/it_IT.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): È la scelta migliore per la riverberazione a due canali, ma non può rimuovere la riverberazione a canale singolo;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: Rimuove gli effetti di ritardo. Aggressive è più completo di Normal nella rimozione, DeReverb rimuove ulteriormente la riverberazione, può rimuovere la riverberazione a canale singolo, ma non rimuove completamente la riverberazione a piastra ad alta frequenza.", - "*实验/模型名": "*Nome dell'esperimento/modello", - "*文本标注文件": "*File di annotazione del testo", - "*训练集音频文件目录": "*Directory dei file audio del set di addestramento", - "*请上传并填写参考信息": "*Carica e compila le informazioni di riferimento", - "*请填写需要合成的目标文本和语种模式": "*Si prega di inserire il testo di destinazione da sintetizzare e la modalità lingua", + "实验/模型名": "Nome dell'esperimento/modello", + "文本标注文件": "File di annotazione del testo", + "训练集音频文件目录": "Directory dei file audio del set di addestramento", + "请上传并填写参考信息": "Carica e compila le informazioni di riferimento", + "请填写需要合成的目标文本和语种模式": "Si prega di inserire il testo di destinazione da sintetizzare e la modalità lingua", ".限制范围越小判别效果越好。": "Meno multilingue è meglio", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Il tempo di elaborazione del modello DeEcho-DeReverb è quasi il doppio di quello degli altri due modelli DeEcho;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "Addestramento GPT: i pesi del modello sono in GPT_weights/", "GPT模型列表": "Elenco dei modelli GPT", "GPT训练": "Addestramento GPT", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "Parametri di campionamento di GPT (non troppo bassi quando non c'è testo di riferimento. Utilizzare i valori predefiniti in caso di incertezza):", + "GPT采样参数(不懂就用默认):": "Parametri di campionamento di GPT (non troppo bassi quando non c'è testo di riferimento. Utilizzare i valori predefiniti in caso di incertezza):", "GPU卡号,只能填1个整数": "Numero della scheda grafica, può essere inserito solo un numero intero", "GPU卡号以-分割,每个卡号一个进程": "Numero di GPU separati da '-'; ogni numero corrisponde a un processo", "LoRA秩": "Rango LoRA", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Salvare solo i file di pesi più recenti per risparmiare spazio su disco?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Salvare il modello finale più piccolo nella cartella weights ad ogni punto di salvataggio", "是否开启DPO训练选项(实验性)": "Attivare l'opzione di addestramento DPO (sperimentale)?", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Regola la velocità del parlato e il tono dell'ultimo risultato di sintesi per prevenire la casualità.", + "是否直接对上次合成结果调整语速和音色": "Regola la velocità del parlato e il tono dell'ultimo risultato di sintesi per prevenire la casualità.", "显卡信息": "Informazioni sulla scheda grafica", "未下载模型": "Modello non scaricato", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Questo software è open-source sotto licenza MIT. L'autore non esercita alcun controllo sul software. L'utente e chiunque diffonda gli output audio generati sono pienamente responsabili.", diff --git a/tools/i18n/locale/ja_JP.json b/tools/i18n/locale/ja_JP.json index 4abe5606..b6283cb9 100644 --- a/tools/i18n/locale/ja_JP.json +++ b/tools/i18n/locale/ja_JP.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):二重チャンネルのリバーブに最適な選択ですが、単一チャンネルのリバーブは除去できません;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:遅延効果を除去します。AggressiveはNormalよりも徹底的に除去し、DeReverbは追加でリバーブを除去し、モノラルリバーブを除去できますが、高周波数のプレートリバーブは完全には除去できません。", - "*实验/模型名": "*実験/モデル名", - "*文本标注文件": "*テキスト注釈ファイル", - "*训练集音频文件目录": "*トレーニングデータのオーディオファイルディレクトリ", - "*请上传并填写参考信息": "*参照情報をアップロードして記入してください", - "*请填写需要合成的目标文本和语种模式": "*合成対象テキストと言語モードを入力してください", + "实验/模型名": "実験/モデル名", + "文本标注文件": "テキスト注釈ファイル", + "训练集音频文件目录": "トレーニングデータのオーディオファイルディレクトリ", + "请上传并填写参考信息": "参照情報をアップロードして記入してください", + "请填写需要合成的目标文本和语种模式": "合成対象テキストと言語モードを入力してください", ".限制范围越小判别效果越好。": "多言語対応を減らした方が良い", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverbモデルの処理時間は、他の2つのDeEchoモデルのほぼ2倍です;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT トレーニング: モデルの重みファイルは GPT_weights/ にあります", "GPT模型列表": "GPTモデルリスト", "GPT训练": "GPTトレーニング", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT サンプリングパラメーター(参照テキストがない場合はあまり低くしないでください。わからない場合はデフォルトを使用してください):", + "GPT采样参数(不懂就用默认):": "GPT サンプリングパラメーター(参照テキストがない場合はあまり低くしないでください。わからない場合はデフォルトを使用してください):", "GPU卡号,只能填1个整数": "GPU番号、1つの整数しか入力できません", "GPU卡号以-分割,每个卡号一个进程": "GPUカード番号はハイフンで区切り、各カード番号ごとに1つのプロセスが実行されます", "LoRA秩": "LoRAランク", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "最新の重みファイルのみを保存し、ディスクスペースを節約しますか?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時間点で最終的な小さなモデルをweightsフォルダに保存するかどうか", "是否开启DPO训练选项(实验性)": "DPO トレーニングオプションを有効にしますか?(実験的)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "ランダム性を防ぐために、前回の合成結果のスピーチ速度とトーンを調整します。", + "是否直接对上次合成结果调整语速和音色": "ランダム性を防ぐために、前回の合成結果のスピーチ速度とトーンを調整します。", "显卡信息": "グラフィックカード情報", "未下载模型": "モデルがダウンロードされていません", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェアを使用する者、ソフトウェアから導出される音声を広める者は、自己責任で行ってください。", diff --git a/tools/i18n/locale/ko_KR.json b/tools/i18n/locale/ko_KR.json index f2a78336..35119aca 100644 --- a/tools/i18n/locale/ko_KR.json +++ b/tools/i18n/locale/ko_KR.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): 듀얼 채널 리버브에는 가장 적합하지만, 싱글 채널 리버브는 제거할 수 없습니다", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:지연 효과를 제거합니다. Aggressive는 Normal보다 더 철저하게 제거하며, DeReverb는 추가로 리버브를 제거하여 단일 채널 리버브를 제거할 수 있지만 고주파 리버브는 완전히 제거하지 못합니다.", - "*实验/模型名": "*실험/모델 이름", - "*文本标注文件": "*텍스트 주석 파일", - "*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터리", - "*请上传并填写参考信息": "*참고 정보를 업로드하고 입력하십시오", - "*请填写需要合成的目标文本和语种模式": "*합성할 목표 텍스트와 언어 모드를 입력하세요", + "实验/模型名": "실험/모델 이름", + "文本标注文件": "텍스트 주석 파일", + "训练集音频文件目录": "훈련 세트 오디오 파일 디렉터리", + "请上传并填写参考信息": "참고 정보를 업로드하고 입력하십시오", + "请填写需要合成的目标文本和语种模式": "합성할 목표 텍스트와 언어 모드를 입력하세요", ".限制范围越小判别效果越好。": "다언어 지원을 줄이는 것이 더 좋습니다", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. DeEcho-DeReverb 모델의 처리 시간은 다른 두 DeEcho 모델의 거의 두 배입니다;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT 훈련: 모델 가중치 파일은 GPT_weights/에 있습니다", "GPT模型列表": "GPT 모델 목록", "GPT训练": "GPT훈련", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 샘플링 매개변수 (참조 텍스트가 없을 때 너무 낮게 설정하지 마십시오. 확실하지 않으면 기본값을 사용하십시오):", + "GPT采样参数(不懂就用默认):": "GPT 샘플링 매개변수 (참조 텍스트가 없을 때 너무 낮게 설정하지 마십시오. 확실하지 않으면 기본값을 사용하십시오):", "GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능", "GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함", "LoRA秩": "LoRA 랭크", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 가중치 파일만 저장할지 여부", "是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부", "是否开启DPO训练选项(实验性)": "DPO 훈련 옵션 활성화 여부 (실험적 기능)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "랜덤성을 방지하기 위해 마지막 합성 결과의 말하기 속도와 톤을 조정합니다.", + "是否直接对上次合成结果调整语速和音色": "랜덤성을 방지하기 위해 마지막 합성 결과의 말하기 속도와 톤을 조정합니다.", "显卡信息": "그래픽 카드 정보", "未下载模型": "모델이 다운로드되지 않음", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "본 소프트웨어는 MIT 라이선스로 오픈소스이며, 개발자는 소프트웨어에 대한 어떠한 통제력도 가지지 않습니다. 사용자는 소프트웨어를 이용하거나 이를 통해 생성된 음성을 배포할 경우 모든 책임을 집니다.", diff --git a/tools/i18n/locale/pt_BR.json b/tools/i18n/locale/pt_BR.json index 987ec468..d60e6441 100644 --- a/tools/i18n/locale/pt_BR.json +++ b/tools/i18n/locale/pt_BR.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net (onnx_dereverb): É a melhor opção para reverberação de dois canais, mas não pode remover a reverberação de um único canal;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Remove os efeitos de atraso. Aggressive é mais completo que Normal na remoção, DeReverb remove adicionalmente a reverberação, pode remover a reverberação de um canal único, mas não remove completamente a reverberação de placa de alta frequência.", - "*实验/模型名": "*Nome do experimento/modelo", - "*文本标注文件": "*Arquivo de marcação de texto", - "*训练集音频文件目录": "*Diretório de arquivos de áudio do conjunto de treinamento", - "*请上传并填写参考信息": "Por favor, faça o upload e preencha as informações de referência", - "*请填写需要合成的目标文本和语种模式": "*Por favor, insira o texto alvo a ser sintetizado e o modo de idioma.", + "实验/模型名": "Nome do experimento/modelo", + "文本标注文件": "Arquivo de marcação de texto", + "训练集音频文件目录": "Diretório de arquivos de áudio do conjunto de treinamento", + "请上传并填写参考信息": "Por favor, faça o upload e preencha as informações de referência", + "请填写需要合成的目标文本和语种模式": "*Por favor, insira o texto alvo a ser sintetizado e o modo de idioma.", ".限制范围越小判别效果越好。": "Menos multilinguismo é melhor", "1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. O tempo de processamento do modelo DeEcho-DeReverb é quase o dobro dos outros dois modelos DeEcho;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "Treinamento GPT: O arquivo de pesos do modelo está em GPT_weights/", "GPT模型列表": "Lista de modelos GPT", "GPT训练": "Treinamento GPT", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "Parâmetros de amostragem do GPT (não muito baixos quando não houver texto de referência. Use o padrão se não tiver certeza):", + "GPT采样参数(不懂就用默认):": "Parâmetros de amostragem do GPT (não muito baixos quando não houver texto de referência. Use o padrão se não tiver certeza):", "GPU卡号,只能填1个整数": "Número da placa de vídeo, só é possível preencher com um número inteiro", "GPU卡号以-分割,每个卡号一个进程": "Número da placa de vídeo dividido por-, cada número de placa é um processo", "LoRA秩": "Classificação LoRA", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Deseja salvar apenas os arquivos de pesos mais recentes para economizar espaço em disco?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Se deve salvar o modelo pequeno final na pasta Weights em cada ponto de salvamento de tempo", "是否开启DPO训练选项(实验性)": "Ativar a opção de treinamento DPO (experimental)?", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Ajuste a velocidade da fala e o tom do último resultado de síntese para evitar aleatoriedade.", + "是否直接对上次合成结果调整语速和音色": "Ajuste a velocidade da fala e o tom do último resultado de síntese para evitar aleatoriedade.", "显卡信息": "Informações da placa de vídeo", "未下载模型": "Modelo não baixado", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Este software é de código aberto sob a licença MIT, e o autor não tem controle sobre seu uso. O usuário e qualquer pessoa que distribua áudio gerado pelo software são totalmente responsáveis.", diff --git a/tools/i18n/locale/ru_RU.json b/tools/i18n/locale/ru_RU.json index 5d016987..055f5e06 100644 --- a/tools/i18n/locale/ru_RU.json +++ b/tools/i18n/locale/ru_RU.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):Это лучший выбор для реверберации с двумя каналами, но он не может устранить реверберацию с одним каналом;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Устраняет эффект задержки. Aggressive устраняет более тщательно, чем Normal, DeReverb дополнительно устраняет реверберацию, может устранить реверберацию с одного канала, но не полностью устраняет высокочастотную реверберацию.", - "*实验/模型名": "*Название эксперимента/модели", - "*文本标注文件": "*Файл текстовой аннотации", - "*训练集音频文件目录": "*Директория аудиофайлов обучающего набора", - "*请上传并填写参考信息": "*Пожалуйста, загрузите и заполните референтные данные", - "*请填写需要合成的目标文本和语种模式": "*Пожалуйста, введите целевой текст для синтеза и режим языка", + "实验/模型名": "Название эксперимента/модели", + "文本标注文件": "Файл текстовой аннотации", + "训练集音频文件目录": "Директория аудиофайлов обучающего набора", + "请上传并填写参考信息": "Пожалуйста, загрузите и заполните референтные данные", + "请填写需要合成的目标文本和语种模式": "Пожалуйста, введите целевой текст для синтеза и режим языка", ".限制范围越小判别效果越好。": "Чем меньше языков, тем лучше", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. Время обработки модели DeEcho-DeReverb почти вдвое больше, чем у двух других моделей DeEcho;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "Обучение GPT: файлы весов модели находятся в GPT_weights/", "GPT模型列表": "Список моделей GPT", "GPT训练": "Обучение GPT", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "Параметры выборки GPT (не устанавливайте слишком низкие значения, если нет ссылочного текста. Используйте значения по умолчанию, если не уверены):", + "GPT采样参数(不懂就用默认):": "Параметры выборки GPT (не устанавливайте слишком низкие значения, если нет ссылочного текста. Используйте значения по умолчанию, если не уверены):", "GPU卡号,只能填1个整数": "Номер GPU, можно указать только одно целое число", "GPU卡号以-分割,每个卡号一个进程": "Номера GPU разделяются дефисом, на каждый номер отдельный процесс", "LoRA秩": "Ранг LoRA", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Сохранить только последние файлы весов для экономии дискового пространства?", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Сохранять финальную версию модели в папке weights на каждом этапе сохранения?", "是否开启DPO训练选项(实验性)": "Включить опцию обучения DPO (экспериментально)?", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Настройте скорость речи и тон последнего результата синтеза, чтобы избежать случайности.", + "是否直接对上次合成结果调整语速和音色": "Настройте скорость речи и тон последнего результата синтеза, чтобы избежать случайности.", "显卡信息": "Информация о видеокарте", "未下载模型": "Модель не загружена", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Эта программа распространяется с открытым исходным кодом по лицензии MIT, и автор не несёт ответственности за её использование. Пользователь и распространитель звука, созданного программой, несут полную ответственность.", diff --git a/tools/i18n/locale/tr_TR.json b/tools/i18n/locale/tr_TR.json index 41c5684d..b7113851 100644 --- a/tools/i18n/locale/tr_TR.json +++ b/tools/i18n/locale/tr_TR.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):İki kanallı yankılar için en iyi seçimdir, ancak tek kanallı yankıları ortadan kaldıramaz;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:Gecikme etkilerini giderir. Aggressive, Normal'dan daha kapsamlı bir şekilde giderir, DeReverb ek olarak yankıyı giderir, tek kanallı yankıyı giderebilir, ancak yüksek frekanslı plaka yankısını tamamen gideremez.", - "*实验/模型名": "*Deney/model adı", - "*文本标注文件": "*Metin etiketleme dosyası", - "*训练集音频文件目录": "*Eğitim seti ses dosyası dizini", - "*请上传并填写参考信息": "*Lütfen referans bilgilerini yükleyin ve doldurun", - "*请填写需要合成的目标文本和语种模式": "*Lütfen sentezlenecek hedef metni ve dil modunu giriniz.", + "实验/模型名": "Deney/model adı", + "文本标注文件": "Metin etiketleme dosyası", + "训练集音频文件目录": "Eğitim seti ses dosyası dizini", + "请上传并填写参考信息": "Lütfen referans bilgilerini yükleyin ve doldurun", + "请填写需要合成的目标文本和语种模式": "Lütfen sentezlenecek hedef metni ve dil modunu giriniz.", ".限制范围越小判别效果越好。": "Daha az çok dilli olmak daha iyidir", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1. DeEcho-DeReverb modelinin işleme süresi, diğer iki DeEcho modelinin neredeyse iki katıdır;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT Eğitimi: Model ağırlık dosyaları GPT_weights/ içinde", "GPT模型列表": "GPT model listesi", "GPT训练": "GPT Eğitimi", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT örnekleme parametreleri (referans metin olmadığında çok düşük olmamalıdır. Emin değilseniz varsayılanı kullanın):", + "GPT采样参数(不懂就用默认):": "GPT örnekleme parametreleri (referans metin olmadığında çok düşük olmamalıdır. Emin değilseniz varsayılanı kullanın):", "GPU卡号,只能填1个整数": "GPU kart numarası, sadece bir tamsayı girilebilir", "GPU卡号以-分割,每个卡号一个进程": "GPU kart numaraları - ile ayrılır, her kart numarası için bir işlem", "LoRA秩": "LoRA Derecesi", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "Sadece en son ağırlık dosyasını kaydedip sabit disk alanı tasarrufu sağlamak isterseniz", "是否在每次保存时间点将最终小模型保存至weights文件夹": "Her kayıt zamanında son küçük modelin weights klasörüne kaydedilmesi gerekiyor mu", "是否开启DPO训练选项(实验性)": "DPO Eğitim Seçeneğini Açmak (Deneysel)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "Rastgeleliği önlemek için son sentez sonucunun konuşma hızını ve tonunu ayarlayın.", + "是否直接对上次合成结果调整语速和音色": "Rastgeleliği önlemek için son sentez sonucunun konuşma hızını ve tonunu ayarlayın.", "显卡信息": "Ekran kartı bilgisi", "未下载模型": "Model İndirilmedi", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "Bu yazılım MIT lisansı ile açık kaynak olarak sunulmuştur, yazar yazılım üzerinde herhangi bir kontrol sahibi değildir, yazılımı kullanan veya yazılımın çıktısını dağıtan kişiler tüm sorumluluğu üstlenir.", diff --git a/tools/i18n/locale/zh_CN.json b/tools/i18n/locale/zh_CN.json index 8ede7647..d56e0572 100644 --- a/tools/i18n/locale/zh_CN.json +++ b/tools/i18n/locale/zh_CN.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:去除延迟效果。Aggressive 比 Normal 去除得更彻底,DeReverb 额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。", - "*实验/模型名": "*实验/模型名", - "*文本标注文件": "*文本标注文件", - "*训练集音频文件目录": "*训练集音频文件目录", - "*请上传并填写参考信息": "*请上传并填写参考信息", - "*请填写需要合成的目标文本和语种模式": "*请填写需要合成的目标文本和语种模式", + "实验/模型名": "实验/模型名", + "文本标注文件": "文本标注文件", + "训练集音频文件目录": "训练集音频文件目录", + "请上传并填写参考信息": "请上传并填写参考信息", + "请填写需要合成的目标文本和语种模式": "请填写需要合成的目标文本和语种模式", ".限制范围越小判别效果越好。": ".限制范围越小判别效果越好。", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT 训练: 模型权重文件在 GPT_weights/", "GPT模型列表": "GPT模型列表", "GPT训练": "GPT训练", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT采样参数(无参考文本时不要太低。不懂就用默认):", + "GPT采样参数(不懂就用默认):": "GPT采样参数(不懂就用默认):", "GPU卡号,只能填1个整数": "GPU卡号,只能填1个整数", "GPU卡号以-分割,每个卡号一个进程": "GPU卡号以-分割,每个卡号一个进程", "LoRA秩": "LoRA秩", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "是否仅保存最新的权重文件以节省硬盘空间", "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹", "是否开启DPO训练选项(实验性)": "是否开启DPO训练选项(实验性)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "是否直接对上次合成结果调整语速和音色。防止随机性。", + "是否直接对上次合成结果调整语速和音色": "是否直接对上次合成结果调整语速和音色", "显卡信息": "显卡信息", "未下载模型": "未下载模型", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.", @@ -222,5 +222,6 @@ "预训练SoVITS-D模型路径": "预训练SoVITS-D模型路径", "预训练SoVITS-G模型路径": "预训练SoVITS-G模型路径", "预训练中文BERT模型路径": "预训练中文BERT模型路径", - "预训练模型路径": "预训练模型路径" + "预训练模型路径": "预训练模型路径", + "推理后端": "推理后端" } diff --git a/tools/i18n/locale/zh_HK.json b/tools/i18n/locale/zh_HK.json index eb9fc810..6acc9ca4 100644 --- a/tools/i18n/locale/zh_HK.json +++ b/tools/i18n/locale/zh_HK.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最佳選擇,但不能去除單通道混響;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho: 去除延遲效果。Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但對高頻重的板式混響去不乾淨。", - "*实验/模型名": "*實驗/模型名", - "*文本标注文件": "*文本標注文件", - "*训练集音频文件目录": "*訓練集音頻文件目錄", - "*请上传并填写参考信息": "*請上傳並填寫參考信息", - "*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", + "实验/模型名": "實驗/模型名", + "文本标注文件": "文本標注文件", + "训练集音频文件目录": "訓練集音頻文件目錄", + "请上传并填写参考信息": "請上傳並填寫參考信息", + "请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", ".限制范围越小判别效果越好。": ".限制范围越小判别效果越好。", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的處理時間是另外兩個 DeEcho 模型的接近兩倍;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT 訓練: 模型權重檔案在 GPT_weights/", "GPT模型列表": "GPT模型列表", "GPT训练": "GPT訓練", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", + "GPT采样参数(不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", "GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數", "GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程", "LoRA秩": "LoRA秩", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "是否僅保存最新的權重文件以節省硬碟空間", "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾", "是否开启DPO训练选项(实验性)": "是否開啟DPO訓練選項(實驗性)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", + "是否直接对上次合成结果调整语速和音色": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", "显卡信息": "顯卡信息", "未下载模型": "未下載模型", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "本軟體以MIT協議開源, 作者不對軟體具備任何控制力, 使用軟體者、傳播軟體導出的聲音者自負全責.", diff --git a/tools/i18n/locale/zh_SG.json b/tools/i18n/locale/zh_SG.json index d2ca6ae9..d948be42 100644 --- a/tools/i18n/locale/zh_SG.json +++ b/tools/i18n/locale/zh_SG.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最好的選擇,不能去除單通道混響;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:去除延遲效果。Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但是對高頻重的板式混響去不乾淨。", - "*实验/模型名": "*實驗/模型名", - "*文本标注文件": "*文本標註文件", - "*训练集音频文件目录": "*訓練集音頻文件目錄", - "*请上传并填写参考信息": "*請上傳並填寫參考信息", - "*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", + "实验/模型名": "實驗/模型名", + "文本标注文件": "文本標註文件", + "训练集音频文件目录": "訓練集音頻文件目錄", + "请上传并填写参考信息": "請上傳並填寫參考信息", + "请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", ".限制范围越小判别效果越好。": ".限制范围越小判别效果越好。", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的耗時是另外兩個 DeEcho 模型的接近兩倍;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT 训练: 模型权重文件在 GPT_weights/ 目錄下", "GPT模型列表": "GPT模型列表", "GPT训练": "GPT訓練", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", + "GPT采样参数(不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", "GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數", "GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程", "LoRA秩": "LoRA秩", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "是否僅保存最新的權重文件以節省硬碟空間", "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾", "是否开启DPO训练选项(实验性)": "是否開啟DPO訓練選項(實驗性)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", + "是否直接对上次合成结果调整语速和音色": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", "显卡信息": "顯卡資訊", "未下载模型": "未下載模型", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "本軟體以MIT協議開源, 作者不對軟體具備任何控制力, 使用軟體者、傳播軟體導出的聲音者自負全責.", diff --git a/tools/i18n/locale/zh_TW.json b/tools/i18n/locale/zh_TW.json index e78e6df3..c2006725 100644 --- a/tools/i18n/locale/zh_TW.json +++ b/tools/i18n/locale/zh_TW.json @@ -1,11 +1,11 @@ { "(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;": "(1)MDX-Net(onnx_dereverb):對於雙通道混響是最好的選擇,不能去除單通道混響;", "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。": "(234)DeEcho:去除延遲效果。Aggressive 比 Normal 去除得更徹底,DeReverb 額外去除混響,可去除單聲道混響,但是對高頻重的板式混響去不乾淨。", - "*实验/模型名": "*實驗/模型名", - "*文本标注文件": "*文本標注文件", - "*训练集音频文件目录": "*訓練集音頻文件目錄", - "*请上传并填写参考信息": "*請上傳並填寫參考資訊", - "*请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", + "实验/模型名": "實驗/模型名", + "文本标注文件": "文本標注文件", + "训练集音频文件目录": "訓練集音頻文件目錄", + "请上传并填写参考信息": "請上傳並填寫參考資訊", + "请填写需要合成的目标文本和语种模式": "請填寫需要合成的目標文本和語言模式", ".限制范围越小判别效果越好。": ".限制范围越小判别效果越好。", "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;": "1、DeEcho-DeReverb 模型的耗時是另外兩個 DeEcho 模型的接近兩倍;", @@ -22,7 +22,7 @@ "GPT 训练: 模型权重文件在 GPT_weights/": "GPT 訓練: 模型權重文件在 GPT_weights/", "GPT模型列表": "GPT模型列表", "GPT训练": "GPT訓練", - "GPT采样参数(无参考文本时不要太低。不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", + "GPT采样参数(不懂就用默认):": "GPT 采样参数(无参考文本时不要太低。不懂就用默认):", "GPU卡号,只能填1个整数": "GPU卡號,只能填1個整數", "GPU卡号以-分割,每个卡号一个进程": "GPU卡號以-分割,每個卡號一個進程", "LoRA秩": "LoRA階", @@ -142,7 +142,7 @@ "是否仅保存最新的权重文件以节省硬盘空间": "是否僅保存最新的權重文件以節省硬盤空間", "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights文件夾", "是否开启DPO训练选项(实验性)": "是否開啟DPO訓練選項(實驗性)", - "是否直接对上次合成结果调整语速和音色。防止随机性。": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", + "是否直接对上次合成结果调整语速和音色": "是否直接對上次合成結果調整語速和音色,以防止隨機性。", "显卡信息": "顯卡資訊", "未下载模型": "未下載模型", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。", diff --git a/tools/my_utils.py b/tools/my_utils.py index 04f1a98a..12ab7dc0 100644 --- a/tools/my_utils.py +++ b/tools/my_utils.py @@ -1,12 +1,15 @@ import ctypes +import io import os import sys from pathlib import Path +from typing import IO, Union import ffmpeg import gradio as gr import numpy as np import pandas as pd +from torch.serialization import _opener from tools.i18n.i18n import I18nAuto @@ -37,18 +40,16 @@ def load_audio(file, sr): return np.frombuffer(out, np.float32).flatten() -def clean_path(path_str: str): +def clean_path(path_str: str) -> str: if path_str.endswith(("\\", "/")): return clean_path(path_str[0:-1]) path_str = path_str.replace("/", os.sep).replace("\\", os.sep) - return path_str.strip( - " '\n\"\u202a" - ) # path_str.strip(" ").strip('\'').strip("\n").strip('"').strip(" ").strip("\u202a") + return path_str.strip(" '\n\"\u202a") def check_for_existance(file_list: list = None, is_train=False, is_dataset_processing=False): files_status = [] - if is_train == True and file_list: + if is_train is True and file_list: file_list.append(os.path.join(file_list[0], "2-name2text.txt")) file_list.append(os.path.join(file_list[0], "3-bert")) file_list.append(os.path.join(file_list[0], "4-cnhubert")) @@ -229,3 +230,127 @@ def load_nvrtc(): print(f"[INFO] Loaded: {so_path}") except OSError as e: print(f"[WARNING] Failed to load {so_path}: {e}") + + +class DictToAttrRecursive(dict): + def __init__(self, input_dict): + super().__init__(input_dict) + for key, value in input_dict.items(): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + self[key] = value + setattr(self, key, value) + + def __getattr__(self, item): + try: + return self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + def __setattr__(self, key, value): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + super(DictToAttrRecursive, self).__setitem__(key, value) + super().__setattr__(key, value) + + def __delattr__(self, item): + try: + del self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + +class _HeadOverlay(io.IOBase, IO): + def __init__(self, base: IO[bytes], patch: bytes = b"PK", offset: int = 0): + super(io.IOBase, self).__init__() + if not base.readable(): + raise ValueError("Base stream must be readable") + + self._base = base + self._patch = patch + self._off = offset + + def readable(self) -> bool: + return True + + def writable(self) -> bool: + return False + + def seekable(self) -> bool: + try: + return self._base.seekable() + except Exception: + return False + + def tell(self) -> int: + return self._base.tell() + + def seek(self, pos: int, whence: int = os.SEEK_SET) -> int: + return self._base.seek(pos, whence) + + def read(self, size: int = -1) -> bytes: + start = self._base.tell() + data = self._base.read(size) + if not data: + return data + + end = start + len(data) + ps, pe = self._off, self._off + len(self._patch) + a, b = max(start, ps), min(end, pe) + if a < b: + buf = bytearray(data) + s_rel = a - start + e_rel = b - start + p_rel = a - ps + buf[s_rel:e_rel] = self._patch[p_rel : p_rel + (e_rel - s_rel)] + return bytes(buf) + return data + + def readinto(self, b) -> int: + start: int = self._base.tell() + nread = self._base.readinto(b) # type: ignore + + end = start + nread + ps, pe = self._off, self._off + len(self._patch) + a, c = max(start, ps), min(end, pe) + if a < c: + mv = memoryview(b) + s_rel = a - start + e_rel = c - start + p_rel = a - ps + mv[s_rel:e_rel] = self._patch[p_rel : p_rel + (e_rel - s_rel)] + + return nread + + def close(self) -> None: + try: + self._base.close() + finally: + super().close() + + def flush(self) -> None: + try: + self._base.flush() + except Exception: + pass + + def write(self, b) -> int: + raise io.UnsupportedOperation("not writable") + + @property + def raw(self): + return self._base + + def __getattr__(self, name): + return None + + +class _open_file(_opener[IO[bytes]]): + def __init__(self, name: Union[str, os.PathLike[str]], mode: str) -> None: + f = open(name, mode) + if "r" in mode: + f = _HeadOverlay(f, b"PK", 0) + super().__init__(f) + + def __exit__(self, *args): + self.file_like.close() diff --git a/tools/subfix_webui.py b/tools/subfix_webui.py index 51a7dfad..68bad58e 100644 --- a/tools/subfix_webui.py +++ b/tools/subfix_webui.py @@ -1,26 +1,20 @@ -import sys -from tools.i18n.i18n import I18nAuto, scan_language_list - -language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto" -i18n = I18nAuto(language=language) import argparse import copy import json import os +import sys import uuid -try: - import gradio.analytics as analytics - - analytics.version_check = lambda: None -except: - ... - import gradio as gr import librosa import numpy as np import soundfile +from tools.i18n.i18n import I18nAuto, scan_language_list + +language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto" +i18n = I18nAuto(language=language) + g_json_key_text = "" g_json_key_path = "" g_load_file = "" @@ -113,7 +107,7 @@ def b_delete_audio(*checkbox_list): change = False for i, checkbox in reversed(list(enumerate(checkbox_list))): if g_index + i < len(g_data_json): - if checkbox == True: + if checkbox is True: g_data_json.pop(g_index + i) change = True @@ -150,7 +144,7 @@ def b_audio_split(audio_breakpoint, *checkbox_list): global g_data_json, g_max_json_index checked_index = [] for i, checkbox in enumerate(checkbox_list): - if checkbox == True and g_index + i < len(g_data_json): + if checkbox is True and g_index + i < len(g_data_json): checked_index.append(g_index + i) if len(checked_index) == 1: index = checked_index[0] @@ -182,7 +176,7 @@ def b_merge_audio(interval_r, *checkbox_list): audios_path = [] audios_text = [] for i, checkbox in enumerate(checkbox_list): - if checkbox == True and g_index + i < len(g_data_json): + if checkbox is True and g_index + i < len(g_data_json): checked_index.append(g_index + i) if len(checked_index) > 1: @@ -314,7 +308,7 @@ if __name__ == "__main__": "Submit Text: 将当前页所有文本框内容手工保存到内存和文件(翻页前后或者退出标注页面前如果没点这个按钮,你再翻回来就回滚了,白忙活。)" ) ) - with gr.Row(): + with gr.Row(equal_height=True): btn_change_index = gr.Button("Change Index") btn_submit_change = gr.Button("Submit Text") btn_merge_audio = gr.Button("Merge Audio") @@ -322,7 +316,7 @@ if __name__ == "__main__": btn_previous_index = gr.Button("Previous Index") btn_next_index = gr.Button("Next Index") - with gr.Row(): + with gr.Row(equal_height=True): index_slider = gr.Slider(minimum=0, maximum=g_max_json_index, value=g_index, step=1, label="Index", scale=3) splitpoint_slider = gr.Slider( minimum=0, maximum=120.0, value=0, step=0.1, label="Audio Split Point(s)", scale=3 @@ -331,18 +325,23 @@ if __name__ == "__main__": btn_save_json = gr.Button("Save File", visible=True, scale=1) btn_invert_selection = gr.Button("Invert Selection", scale=1) - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(): for _ in range(0, g_batch): - with gr.Row(): + with gr.Row(equal_height=True): text = gr.Textbox(label="Text", visible=True, scale=5) - audio_output = gr.Audio(label="Output Audio", visible=True, scale=5) + audio_output = gr.Audio( + label="Output Audio", + visible=True, + scale=5, + waveform_options={"show_recording_waveform": False}, + ) audio_check = gr.Checkbox(label="Yes", show_label=True, info="Choose Audio", scale=1) g_text_list.append(text) g_audio_list.append(audio_output) g_checkbox_list.append(audio_check) - with gr.Row(): + with gr.Row(equal_height=True): batchsize_slider = gr.Slider( minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False ) diff --git a/tools/uvr5/bs_roformer/__init__.py b/tools/uvr5/bs_roformer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tools/uvr5/bsroformer.py b/tools/uvr5/bsroformer.py index ddcbfa74..5076ef0b 100644 --- a/tools/uvr5/bsroformer.py +++ b/tools/uvr5/bsroformer.py @@ -1,6 +1,5 @@ # This code is modified from https://github.com/ZFTurbo/ import os -import warnings import librosa import numpy as np @@ -10,8 +9,6 @@ import torch.nn as nn import yaml from tqdm import tqdm -warnings.filterwarnings("ignore") - class Roformer_Loader: def get_config(self, config_path): @@ -295,7 +292,7 @@ class Roformer_Loader: state_dict = torch.load(model_path, map_location="cpu") model.load_state_dict(state_dict) - if is_half == False: + if is_half is False: self.model = model.to(device) else: self.model = model.half().to(device) diff --git a/tools/uvr5/lib/lib_v5/dataset.py b/tools/uvr5/lib/lib_v5/dataset.py deleted file mode 100644 index 1a30eec7..00000000 --- a/tools/uvr5/lib/lib_v5/dataset.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [os.path.join(mix_dir, fname) for fname in os.listdir(mix_dir) if os.path.splitext(fname)[1] in input_exts] - ) - y_list = sorted( - [os.path.join(inst_dir, fname) for fname in os.listdir(inst_dir) if os.path.splitext(fname)[1] in input_exts] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [pair for pair in filelist if list(pair) not in val_filelist] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError("The `val_filelist` option is not available in `subdirs` mode") - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively(X[idx], y[idx], reduction_mask) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(cropsize, sr, hop_length, n_fft, offset) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/tools/uvr5/lib/lib_v5/layers.py b/tools/uvr5/lib/lib_v5/layers.py deleted file mode 100644 index 2b9101e0..00000000 --- a/tools/uvr5/lib/lib_v5/layers.py +++ /dev/null @@ -1,106 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/tools/uvr5/lib/lib_v5/layers_123812KB.py b/tools/uvr5/lib/lib_v5/layers_123812KB.py deleted file mode 100644 index 2b9101e0..00000000 --- a/tools/uvr5/lib/lib_v5/layers_123812KB.py +++ /dev/null @@ -1,106 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/tools/uvr5/lib/lib_v5/layers_33966KB.py b/tools/uvr5/lib/lib_v5/layers_33966KB.py deleted file mode 100644 index 43977778..00000000 --- a/tools/uvr5/lib/lib_v5/layers_33966KB.py +++ /dev/null @@ -1,110 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/tools/uvr5/lib/lib_v5/layers_537227KB.py b/tools/uvr5/lib/lib_v5/layers_537227KB.py deleted file mode 100644 index 43977778..00000000 --- a/tools/uvr5/lib/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,110 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/tools/uvr5/lib/lib_v5/layers_537238KB.py b/tools/uvr5/lib/lib_v5/layers_537238KB.py deleted file mode 100644 index 43977778..00000000 --- a/tools/uvr5/lib/lib_v5/layers_537238KB.py +++ /dev/null @@ -1,110 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/tools/uvr5/lib/lib_v5/layers_new.py b/tools/uvr5/lib/lib_v5/layers_new.py deleted file mode 100644 index 7d7005c0..00000000 --- a/tools/uvr5/lib/lib_v5/layers_new.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv(nin, nout, 3, 1, dilations[0], dilations[0], activ=activ) - self.conv4 = Conv2DBNActiv(nin, nout, 3, 1, dilations[1], dilations[1], activ=activ) - self.conv5 = Conv2DBNActiv(nin, nout, 3, 1, dilations[2], dilations[2], activ=activ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM(input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True) - self.dense = nn.Sequential(nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json deleted file mode 100644 index 72cb4499..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr16000_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 16000, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 16000, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json deleted file mode 100644 index 3c00ecf0..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr32000_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 32000, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "kaiser_fast" - } - }, - "sr": 32000, - "pre_filter_start": 1000, - "pre_filter_stop": 1021 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json deleted file mode 100644 index 55666ac9..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr33075_hl384.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 33075, - "hl": 384, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 33075, - "pre_filter_start": 1000, - "pre_filter_stop": 1021 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json deleted file mode 100644 index 665abe20..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl1024.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 1024, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json deleted file mode 100644 index 0e8b16f8..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl256.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 256, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 256, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 256, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 256, - "pre_filter_stop": 256 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json deleted file mode 100644 index 3b38fcaf..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512_cut.json b/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512_cut.json deleted file mode 100644 index 630df352..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/1band_sr44100_hl512_cut.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 700, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 700 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/2band_32000.json b/tools/uvr5/lib/lib_v5/modelparams/2band_32000.json deleted file mode 100644 index ab9cf115..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/2band_32000.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 705, - "band": { - "1": { - "sr": 6000, - "hl": 66, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 240, - "lpf_start": 60, - "lpf_stop": 118, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 32000, - "hl": 352, - "n_fft": 1024, - "crop_start": 22, - "crop_stop": 505, - "hpf_start": 44, - "hpf_stop": 23, - "res_type": "sinc_medium" - } - }, - "sr": 32000, - "pre_filter_start": 710, - "pre_filter_stop": 731 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/2band_44100_lofi.json b/tools/uvr5/lib/lib_v5/modelparams/2band_44100_lofi.json deleted file mode 100644 index 7faa216d..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/2band_44100_lofi.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 512, - "unstable_bins": 7, - "reduction_bins": 510, - "band": { - "1": { - "sr": 11025, - "hl": 160, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 192, - "lpf_start": 41, - "lpf_stop": 139, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 44100, - "hl": 640, - "n_fft": 1024, - "crop_start": 10, - "crop_stop": 320, - "hpf_start": 47, - "hpf_stop": 15, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 510, - "pre_filter_stop": 512 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/2band_48000.json b/tools/uvr5/lib/lib_v5/modelparams/2band_48000.json deleted file mode 100644 index 7e781750..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/2band_48000.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 705, - "band": { - "1": { - "sr": 6000, - "hl": 66, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 240, - "lpf_start": 60, - "lpf_stop": 240, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 48000, - "hl": 528, - "n_fft": 1536, - "crop_start": 22, - "crop_stop": 505, - "hpf_start": 82, - "hpf_stop": 22, - "res_type": "sinc_medium" - } - }, - "sr": 48000, - "pre_filter_start": 710, - "pre_filter_stop": 731 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/3band_44100.json b/tools/uvr5/lib/lib_v5/modelparams/3band_44100.json deleted file mode 100644 index d881d767..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/3band_44100.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 5, - "reduction_bins": 733, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 278, - "lpf_start": 28, - "lpf_stop": 140, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 256, - "n_fft": 768, - "crop_start": 14, - "crop_stop": 322, - "hpf_start": 70, - "hpf_stop": 14, - "lpf_start": 283, - "lpf_stop": 314, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 131, - "crop_stop": 313, - "hpf_start": 154, - "hpf_stop": 141, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 757, - "pre_filter_stop": 768 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/3band_44100_mid.json b/tools/uvr5/lib/lib_v5/modelparams/3band_44100_mid.json deleted file mode 100644 index 77ec1985..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/3band_44100_mid.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side": true, - "bins": 768, - "unstable_bins": 5, - "reduction_bins": 733, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 278, - "lpf_start": 28, - "lpf_stop": 140, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 256, - "n_fft": 768, - "crop_start": 14, - "crop_stop": 322, - "hpf_start": 70, - "hpf_stop": 14, - "lpf_start": 283, - "lpf_stop": 314, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 131, - "crop_stop": 313, - "hpf_start": 154, - "hpf_stop": 141, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 757, - "pre_filter_stop": 768 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/3band_44100_msb2.json b/tools/uvr5/lib/lib_v5/modelparams/3band_44100_msb2.json deleted file mode 100644 index 85ee8a7d..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/3band_44100_msb2.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side_b2": true, - "bins": 640, - "unstable_bins": 7, - "reduction_bins": 565, - "band": { - "1": { - "sr": 11025, - "hl": 108, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 187, - "lpf_start": 92, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 216, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 212, - "hpf_start": 68, - "hpf_stop": 34, - "lpf_start": 174, - "lpf_stop": 209, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 432, - "n_fft": 640, - "crop_start": 66, - "crop_stop": 307, - "hpf_start": 86, - "hpf_stop": 72, - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 639, - "pre_filter_stop": 640 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100.json deleted file mode 100644 index df123754..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_mid.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100_mid.json deleted file mode 100644 index e91b699e..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_mid.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "mid_side": true, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb.json deleted file mode 100644 index f852f280..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "mid_side_b": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb2.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb2.json deleted file mode 100644 index f852f280..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_msb2.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "mid_side_b": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_reverse.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100_reverse.json deleted file mode 100644 index 7a07d554..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_reverse.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "reverse": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_sw.json b/tools/uvr5/lib/lib_v5/modelparams/4band_44100_sw.json deleted file mode 100644 index ba0cf342..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_44100_sw.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "stereo_w": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/4band_v2_sn.json b/tools/uvr5/lib/lib_v5/modelparams/4band_v2_sn.json deleted file mode 100644 index 2e5c770f..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/4band_v2_sn.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "bins": 672, - "unstable_bins": 8, - "reduction_bins": 637, - "band": { - "1": { - "sr": 7350, - "hl": 80, - "n_fft": 640, - "crop_start": 0, - "crop_stop": 85, - "lpf_start": 25, - "lpf_stop": 53, - "res_type": "polyphase" - }, - "2": { - "sr": 7350, - "hl": 80, - "n_fft": 320, - "crop_start": 4, - "crop_stop": 87, - "hpf_start": 25, - "hpf_stop": 12, - "lpf_start": 31, - "lpf_stop": 62, - "res_type": "polyphase" - }, - "3": { - "sr": 14700, - "hl": 160, - "n_fft": 512, - "crop_start": 17, - "crop_stop": 216, - "hpf_start": 48, - "hpf_stop": 24, - "lpf_start": 139, - "lpf_stop": 210, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 480, - "n_fft": 960, - "crop_start": 78, - "crop_stop": 383, - "hpf_start": 130, - "hpf_stop": 86, - "convert_channels": "stereo_n", - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 668, - "pre_filter_stop": 672 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/modelparams/ensemble.json b/tools/uvr5/lib/lib_v5/modelparams/ensemble.json deleted file mode 100644 index ee69beb4..00000000 --- a/tools/uvr5/lib/lib_v5/modelparams/ensemble.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side_b2": true, - "bins": 1280, - "unstable_bins": 7, - "reduction_bins": 565, - "band": { - "1": { - "sr": 11025, - "hl": 108, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 374, - "lpf_start": 92, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 216, - "n_fft": 1536, - "crop_start": 0, - "crop_stop": 424, - "hpf_start": 68, - "hpf_stop": 34, - "lpf_start": 348, - "lpf_stop": 418, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 432, - "n_fft": 1280, - "crop_start": 132, - "crop_stop": 614, - "hpf_start": 172, - "hpf_stop": 144, - "res_type": "polyphase" - } - }, - "sr": 44100, - "pre_filter_start": 1280, - "pre_filter_stop": 1280 -} \ No newline at end of file diff --git a/tools/uvr5/lib/lib_v5/nets.py b/tools/uvr5/lib/lib_v5/nets.py deleted file mode 100644 index 42d7807a..00000000 --- a/tools/uvr5/lib/lib_v5/nets.py +++ /dev/null @@ -1,121 +0,0 @@ -import layers -import torch -import torch.nn.functional as F -from torch import nn - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_123812KB.py b/tools/uvr5/lib/lib_v5/nets_123812KB.py deleted file mode 100644 index 167d4cb2..00000000 --- a/tools/uvr5/lib/lib_v5/nets_123812KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_123821KB.py b/tools/uvr5/lib/lib_v5/nets_123821KB.py deleted file mode 100644 index 167d4cb2..00000000 --- a/tools/uvr5/lib/lib_v5/nets_123821KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_33966KB.py b/tools/uvr5/lib/lib_v5/nets_33966KB.py deleted file mode 100644 index 73a5b836..00000000 --- a/tools/uvr5/lib/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_537227KB.py b/tools/uvr5/lib/lib_v5/nets_537227KB.py deleted file mode 100644 index 9bb1df1e..00000000 --- a/tools/uvr5/lib/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_537238KB.py b/tools/uvr5/lib/lib_v5/nets_537238KB.py deleted file mode 100644 index 9bb1df1e..00000000 --- a/tools/uvr5/lib/lib_v5/nets_537238KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/tools/uvr5/lib/lib_v5/nets_new.py b/tools/uvr5/lib/lib_v5/nets_new.py deleted file mode 100644 index ba1a5599..00000000 --- a/tools/uvr5/lib/lib_v5/nets_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_new - - -class BaseNet(nn.Module): - def __init__(self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))): - super(BaseNet, self).__init__() - self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) - self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) - self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) - self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) - self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) - - self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) - - self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) - self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) - self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) - self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) - self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) - - def __call__(self, x): - e1 = self.enc1(x) - e2 = self.enc2(e1) - e3 = self.enc3(e2) - e4 = self.enc4(e3) - e5 = self.enc5(e4) - - h = self.aspp(e5) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = torch.cat([h, self.lstm_dec2(h)], dim=1) - h = self.dec1(h, e1) - - return h - - -class CascadedNet(nn.Module): - def __init__(self, n_fft, nout=32, nout_lstm=128): - super(CascadedNet, self).__init__() - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - self.nin_lstm = self.max_bin // 2 - self.offset = 64 - - self.stg1_low_band_net = nn.Sequential( - BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), - ) - - self.stg1_high_band_net = BaseNet(2, nout // 4, self.nin_lstm // 2, nout_lstm // 2) - - self.stg2_low_band_net = nn.Sequential( - BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), - ) - self.stg2_high_band_net = BaseNet(nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2) - - self.stg3_full_band_net = BaseNet(3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm) - - self.out = nn.Conv2d(nout, 2, 1, bias=False) - self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) - - def forward(self, x): - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - l1_in = x[:, :, :bandw] - h1_in = x[:, :, bandw:] - l1 = self.stg1_low_band_net(l1_in) - h1 = self.stg1_high_band_net(h1_in) - aux1 = torch.cat([l1, h1], dim=2) - - l2_in = torch.cat([l1_in, l1], dim=1) - h2_in = torch.cat([h1_in, h1], dim=1) - l2 = self.stg2_low_band_net(l2_in) - h2 = self.stg2_high_band_net(h2_in) - aux2 = torch.cat([l2, h2], dim=2) - - f3_in = torch.cat([x, aux1, aux2], dim=1) - f3 = self.stg3_full_band_net(f3_in) - - mask = torch.sigmoid(self.out(f3)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux = torch.cat([aux1, aux2], dim=1) - aux = torch.sigmoid(self.aux_out(aux)) - aux = F.pad( - input=aux, - pad=(0, 0, 0, self.output_bin - aux.size()[2]), - mode="replicate", - ) - return mask, aux - else: - return mask - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset : -self.offset] - assert mask.size()[3] > 0 - - return mask - - def predict(self, x, aggressiveness=None): - mask = self.forward(x) - pred_mag = x * mask - - if self.offset > 0: - pred_mag = pred_mag[:, :, :, self.offset : -self.offset] - assert pred_mag.size()[3] > 0 - - return pred_mag diff --git a/tools/uvr5/lib/name_params.json b/tools/uvr5/lib/name_params.json deleted file mode 100644 index 4e5ee7be..00000000 --- a/tools/uvr5/lib/name_params.json +++ /dev/null @@ -1,263 +0,0 @@ -{ - "equivalent" : [ - { - "model_hash_name" : [ - { - "hash_name": "47939caf0cfe52a0e81442b85b971dfd", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "4e4ecb9764c50a8c414fee6e10395bbe", - "model_params": "lib/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "ca106edd563e034bde0bdec4bb7a4b36", - "model_params": "lib/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "e60a1e84803ce4efc0a6551206cc4b71", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "a82f14e75892e55e994376edbf0c8435", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "6dd9eaa6f0420af9f1d403aaafa4cc06", - "model_params": "lib/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "08611fb99bd59eaa79ad27c58d137727", - "model_params": "lib/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "5c7bbca45a187e81abbbd351606164e5", - "model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - }, - { - "hash_name": "d6b2cb685a058a091e5e7098192d3233", - "model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - }, - { - "hash_name": "c1b9f38170a7c90e96f027992eb7c62b", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "c3448ec923fa0edf3d03a19e633faa53", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "68aa2c8093d0080704b200d140f59e54", - "model_params": "lib/lib_v5/modelparams/3band_44100.json", - "param_name": "3band_44100" - }, - { - "hash_name": "fdc83be5b798e4bd29fe00fe6600e147", - "model_params": "lib/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid.json" - }, - { - "hash_name": "2ce34bc92fd57f55db16b7a4def3d745", - "model_params": "lib/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid.json" - }, - { - "hash_name": "52fdca89576f06cf4340b74a4730ee5f", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100.json" - }, - { - "hash_name": "41191165b05d38fc77f072fa9e8e8a30", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100.json" - }, - { - "hash_name": "89e83b511ad474592689e562d5b1f80e", - "model_params": "lib/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000.json" - }, - { - "hash_name": "0b954da81d453b716b114d6d7c95177f", - "model_params": "lib/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000.json" - } - - ], - "v4 Models": [ - { - "hash_name": "6a00461c51c2920fd68937d4609ed6c8", - "model_params": "lib/lib_v5/modelparams/1band_sr16000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "0ab504864d20f1bd378fe9c81ef37140", - "model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "7dd21065bf91c10f7fccb57d7d83b07f", - "model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "80ab74d65e515caa3622728d2de07d23", - "model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "edc115e7fc523245062200c00caa847f", - "model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "28063e9f6ab5b341c5f6d3c67f2045b7", - "model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "b58090534c52cbc3e9b5104bad666ef2", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "0cdab9947f1b0928705f518f3c78ea8f", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "ae702fed0238afb5346db8356fe25f13", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl1024.json", - "param_name": "1band_sr44100_hl1024" - } - ] - } - ], - "User Models" : [ - { - "1 Band": [ - { - "hash_name": "1band_sr16000_hl512", - "model_params": "lib/lib_v5/modelparams/1band_sr16000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "1band_sr32000_hl512", - "model_params": "lib/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "1band_sr33075_hl384", - "model_params": "lib/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "1band_sr44100_hl256", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl256.json", - "param_name": "1band_sr44100_hl256" - }, - { - "hash_name": "1band_sr44100_hl512", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "1band_sr44100_hl1024", - "model_params": "lib/lib_v5/modelparams/1band_sr44100_hl1024.json", - "param_name": "1band_sr44100_hl1024" - } - ], - "2 Band": [ - { - "hash_name": "2band_44100_lofi", - "model_params": "lib/lib_v5/modelparams/2band_44100_lofi.json", - "param_name": "2band_44100_lofi" - }, - { - "hash_name": "2band_32000", - "model_params": "lib/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000" - }, - { - "hash_name": "2band_48000", - "model_params": "lib/lib_v5/modelparams/2band_48000.json", - "param_name": "2band_48000" - } - ], - "3 Band": [ - { - "hash_name": "3band_44100", - "model_params": "lib/lib_v5/modelparams/3band_44100.json", - "param_name": "3band_44100" - }, - { - "hash_name": "3band_44100_mid", - "model_params": "lib/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid" - }, - { - "hash_name": "3band_44100_msb2", - "model_params": "lib/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - } - ], - "4 Band": [ - { - "hash_name": "4band_44100", - "model_params": "lib/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "4band_44100_mid", - "model_params": "lib/lib_v5/modelparams/4band_44100_mid.json", - "param_name": "4band_44100_mid" - }, - { - "hash_name": "4band_44100_msb", - "model_params": "lib/lib_v5/modelparams/4band_44100_msb.json", - "param_name": "4band_44100_msb" - }, - { - "hash_name": "4band_44100_msb2", - "model_params": "lib/lib_v5/modelparams/4band_44100_msb2.json", - "param_name": "4band_44100_msb2" - }, - { - "hash_name": "4band_44100_reverse", - "model_params": "lib/lib_v5/modelparams/4band_44100_reverse.json", - "param_name": "4band_44100_reverse" - }, - { - "hash_name": "4band_44100_sw", - "model_params": "lib/lib_v5/modelparams/4band_44100_sw.json", - "param_name": "4band_44100_sw" - }, - { - "hash_name": "4band_v2", - "model_params": "lib/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "4band_v2_sn", - "model_params": "lib/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "tmodelparam", - "model_params": "lib/lib_v5/modelparams/tmodelparam.json", - "param_name": "User Model Param Set" - } - ] - } - ] -} \ No newline at end of file diff --git a/tools/uvr5/mdxnet.py b/tools/uvr5/mdxnet.py index 98c75c1f..1e7e785c 100644 --- a/tools/uvr5/mdxnet.py +++ b/tools/uvr5/mdxnet.py @@ -1,7 +1,5 @@ -import os import logging - -logger = logging.getLogger(__name__) +import os import librosa import numpy as np @@ -9,6 +7,8 @@ import soundfile as sf import torch from tqdm import tqdm +logger = logging.getLogger(__name__) + cpu = torch.device("cpu") diff --git a/tools/uvr5/vr.py b/tools/uvr5/vr.py index 45429cca..68c39112 100644 --- a/tools/uvr5/vr.py +++ b/tools/uvr5/vr.py @@ -1,19 +1,19 @@ -import os - -parent_directory = os.path.dirname(os.path.abspath(__file__)) import logging - -logger = logging.getLogger(__name__) +import os import librosa import numpy as np import soundfile as sf import torch -from lib.lib_v5 import nets_61968KB as Nets -from lib.lib_v5 import spec_utils -from lib.lib_v5.model_param_init import ModelParameters -from lib.lib_v5.nets_new import CascadedNet -from lib.utils import inference + +from tools.uvr5.lib.lib_v5 import nets_61968KB as Nets +from tools.uvr5.lib.lib_v5 import spec_utils +from tools.uvr5.lib.lib_v5.model_param_init import ModelParameters +from tools.uvr5.lib.lib_v5.nets_new import CascadedNet +from tools.uvr5.lib.utils import inference + +logger = logging.getLogger(__name__) +parent_directory = os.path.dirname(os.path.abspath(__file__)) class AudioPre: @@ -106,7 +106,7 @@ class AudioPre: y_spec_m = pred * X_phase v_spec_m = X_spec_m - y_spec_m - if is_hp3 == True: + if is_hp3 is True: ins_root, vocal_root = vocal_root, ins_root if ins_root is not None: @@ -118,7 +118,7 @@ class AudioPre: else: wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) logger.info("%s instruments done" % name) - if is_hp3 == True: + if is_hp3 is True: head = "vocal_" else: head = "instrument_" @@ -149,7 +149,7 @@ class AudioPre: except: pass if vocal_root is not None: - if is_hp3 == True: + if is_hp3 is True: head = "instrument_" else: head = "vocal_" diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index f5f8d3f6..ed47ce10 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -1,23 +1,19 @@ -import logging import os +import sys import traceback +import ffmpeg import gradio as gr +import torch from tools.i18n.i18n import I18nAuto from tools.my_utils import clean_path +from tools.uvr5.bsroformer import Roformer_Loader +from tools.uvr5.mdxnet import MDXNetDereverb +from tools.uvr5.vr import AudioPre, AudioPreDeEcho i18n = I18nAuto() -logger = logging.getLogger(__name__) -import sys - -import ffmpeg -import torch -from bsroformer import Roformer_Loader -from mdxnet import MDXNetDereverb -from vr import AudioPre, AudioPreDeEcho - weight_uvr5_root = "tools/uvr5/uvr5_weights" uvr5_names = [] for name in os.listdir(weight_uvr5_root): @@ -78,7 +74,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format paths = [path.name for path in paths] for path in paths: inp_path = os.path.join(inp_root, path) - if os.path.isfile(inp_path) == False: + if os.path.isfile(inp_path) is False: continue need_reformat = 1 done = 0 @@ -168,7 +164,7 @@ with gr.Blocks(title="UVR5 WebUI", analytics_enabled=False) as app: "h4", ) ) - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(): model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) dir_wav_input = gr.Textbox( @@ -197,9 +193,9 @@ with gr.Blocks(title="UVR5 WebUI", analytics_enabled=False) as app: interactive=True, ) with gr.Column(): - with gr.Row(): + with gr.Row(equal_height=True): but2 = gr.Button(i18n("转换"), variant="primary") - with gr.Row(): + with gr.Row(equal_height=True): vc_output4 = gr.Textbox(label=i18n("输出信息"), lines=3) but2.click( uvr, diff --git a/webui.py b/webui.py index 9a6aae5f..f81ac802 100644 --- a/webui.py +++ b/webui.py @@ -1,23 +1,91 @@ -import os -import sys - -os.environ["version"] = version = "v2Pro" -now_dir = os.getcwd() -sys.path.insert(0, now_dir) -import warnings - -warnings.filterwarnings("ignore") +import argparse import json +import os import platform +import re import shutil -import signal +import traceback +from functools import partial +from multiprocessing import cpu_count +from subprocess import Popen +import gradio as gr import psutil import torch import yaml +from config import ( + GPU_INDEX, + GPU_INFOS, + IS_GPU, + GPT_weight_root, + GPT_weight_version2root, + SoVITS_weight_root, + SoVITS_weight_version2root, + change_choices, + exp_root, + get_weights_names, + infer_device, + is_half, + is_share, + memset, + pretrained_gpt_name, + pretrained_sovits_name, + python_exec, + webui_port_infer_tts, + webui_port_main, + webui_port_subfix, + webui_port_uvr5, +) +from GPT_SoVITS.Accelerate import backends, console, logger +from tools import my_utils +from tools.asr.config import asr_dict +from tools.assets import css, js, top_html +from tools.i18n.i18n import I18nAuto, scan_language_list +from tools.my_utils import check_details, check_for_existance + +os.environ["PYTHONPATH"] = now_dir = os.getcwd() +os.environ["version"] = version = "v2Pro" os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO" -torch.manual_seed(233333) +os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" +os.environ["all_proxy"] = "" + + +backends_gradio = [(b.replace("-", " "), b) for b in backends] + +_LANG_RE = re.compile(r"^[a-z]{2}[_-][A-Z]{2}$") + + +def lang_type(text: str) -> str: + if text == "Auto": + return text + if not _LANG_RE.match(text): + raise argparse.ArgumentTypeError(f"Unspported Format: {text}, Expected ll_CC/ll-CC") + ll, cc = re.split(r"[_-]", text) + language = f"{ll}_{cc}" + if language in scan_language_list(): + return language + else: + return "en_US" + + +def build_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + prog="python -s webui.py", + description="python -s webui.py zh_CN", + ) + p.add_argument( + "language", + nargs="?", + default="Auto", + type=lang_type, + help="Language Code, Such as zh_CN, en-US", + ) + return p + + +args = build_parser().parse_args() + tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp @@ -25,72 +93,18 @@ if os.path.exists(tmp): for name in os.listdir(tmp): if name == "jieba.cache": continue - path = "%s/%s" % (tmp, name) + path = f"{tmp}/{name}" delete = os.remove if os.path.isfile(path) else shutil.rmtree try: delete(path) except Exception as e: - print(str(e)) + console.print(e) pass -import site -import traceback -site_packages_roots = [] -for path in site.getsitepackages(): - if "packages" in path: - site_packages_roots.append(path) -if site_packages_roots == []: - site_packages_roots = ["%s/runtime/Lib/site-packages" % now_dir] -# os.environ["OPENBLAS_NUM_THREADS"] = "4" -os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -os.environ["all_proxy"] = "" -for site_packages_root in site_packages_roots: - if os.path.exists(site_packages_root): - try: - with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - # "%s\n%s/runtime\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - "%s\n%s/GPT_SoVITS/BigVGAN\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir, now_dir) - ) - break - except PermissionError: - traceback.print_exc() -import shutil -import subprocess -from subprocess import Popen -from tools.assets import css, js, top_html -from tools.i18n.i18n import I18nAuto, scan_language_list - -language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto" -os.environ["language"] = language +language = str(args.language) i18n = I18nAuto(language=language) -from multiprocessing import cpu_count - -from config import ( - GPU_INDEX, - GPU_INFOS, - IS_GPU, - exp_root, - infer_device, - is_half, - is_share, - memset, - python_exec, - webui_port_infer_tts, - webui_port_main, - webui_port_subfix, - webui_port_uvr5, -) -from tools import my_utils -from tools.my_utils import check_details, check_for_existance - -os.environ["HF_ENDPOINT"] = "https://hf-mirror.com" -os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" - -# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu -import gradio as gr +change_choice = partial(change_choices, i18n=i18n) n_cpu = cpu_count() @@ -101,6 +115,8 @@ is_gpu_ok = IS_GPU v3v4set = {"v3", "v4"} +sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt" + def set_default(): global \ @@ -139,32 +155,9 @@ def set_default(): set_default() -gpus = "-".join(map(str, GPU_INDEX)) default_gpu_numbers = infer_device.index -def fix_gpu_number(input): # 将越界的number强制改到界内 - try: - if int(input) not in set_gpu_numbers: - return default_gpu_numbers - except: - return input - return input - - -def fix_gpu_numbers(inputs): - output = [] - try: - for input in inputs.split(","): - output.append(str(fix_gpu_number(input))) - return ",".join(output) - except: - return inputs - - -from config import pretrained_gpt_name, pretrained_sovits_name - - def check_pretrained_is_exist(version): pretrained_model_list = ( pretrained_sovits_name[version], @@ -175,71 +168,52 @@ def check_pretrained_is_exist(version): ) _ = "" for i in pretrained_model_list: - if "s2Dv3" not in i and "s2Dv4" not in i and os.path.exists(i) == False: + if "s2Dv3" not in i and "s2Dv4" not in i and os.path.exists(i) is False: _ += f"\n {i}" if _: - print("warning: ", i18n("以下模型不存在:") + _) + logger.warning(i18n("以下模型不存在:") + _) check_pretrained_is_exist(version) for key in pretrained_sovits_name.keys(): - if os.path.exists(pretrained_sovits_name[key]) == False: + if os.path.exists(pretrained_sovits_name[key]) is False: pretrained_sovits_name[key] = "" for key in pretrained_gpt_name.keys(): - if os.path.exists(pretrained_gpt_name[key]) == False: + if os.path.exists(pretrained_gpt_name[key]) is False: pretrained_gpt_name[key] = "" -from config import ( - GPT_weight_root, - GPT_weight_version2root, - SoVITS_weight_root, - SoVITS_weight_version2root, - change_choices, - get_weights_names, -) for root in SoVITS_weight_root + GPT_weight_root: os.makedirs(root, exist_ok=True) -SoVITS_names, GPT_names = get_weights_names() +SoVITS_names, GPT_names = get_weights_names(i18n) -p_label = None -p_uvr5 = None -p_asr = None -p_denoise = None -p_tts_inference = None +p_label: Popen | None = None +p_uvr5: Popen | None = None +p_asr: Popen | None = None +p_denoise: Popen | None = None +p_tts_inference: Popen | None = None -def kill_proc_tree(pid, including_parent=True): +def kill_process(pid: int, process_name=""): try: - parent = psutil.Process(pid) + p = psutil.Process(pid) except psutil.NoSuchProcess: - # Process already terminated return - children = parent.children(recursive=True) - for child in children: + for c in p.children(recursive=False): try: - os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL - except OSError: - pass - if including_parent: - try: - os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL - except OSError: + c.kill() + c.wait(timeout=5) + except (psutil.NoSuchProcess, psutil.TimeoutExpired): pass + try: + p.kill() + p.wait(timeout=5) + except (psutil.NoSuchProcess, psutil.TimeoutExpired): + pass -system = platform.system() - - -def kill_process(pid, process_name=""): - if system == "Windows": - cmd = "taskkill /t /f /pid %s" % pid - # os.system(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - else: - kill_proc_tree(pid) - print(process_name + i18n("进程已终止")) + console.print(process_name + i18n("进程已终止")) def process_info(process_name="", indicator=""): @@ -281,18 +255,18 @@ def change_label(path_list): ) yield ( process_info(process_name_subfix, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - print(cmd) + console.print(cmd) p_label = Popen(cmd, shell=True) else: kill_process(p_label.pid, process_name_subfix) p_label = None yield ( process_info(process_name_subfix, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) @@ -311,61 +285,80 @@ def change_uvr5(): ) yield ( process_info(process_name_uvr5, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - print(cmd) + console.print(cmd) p_uvr5 = Popen(cmd, shell=True) else: kill_process(p_uvr5.pid, process_name_uvr5) p_uvr5 = None yield ( process_info(process_name_uvr5, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) process_name_tts = i18n("TTS推理WebUI") -def change_tts_inference(bert_path, cnhubert_base_path, gpu_number, gpt_path, sovits_path, batched_infer_enabled): +def change_tts_inference( + gpu_number: int, + gpt_path: str, + sovits_path: str, + batched_infer_enabled: bool, + backends_dropdown: str, +): + console.print(gpt_path, sovits_path) global p_tts_inference + env = os.environ.copy() + cmd: list[str] = [python_exec, "-s"] if batched_infer_enabled: - cmd = '"%s" -s GPT_SoVITS/inference_webui_fast.py "%s"' % (python_exec, language) + # fmt: off + cmd.extend( + [ + "GPT_SoVITS/inference_webui_fast.py", language, + "-d", f"{infer_device.type}:{gpu_number}", + "-p", str(webui_port_infer_tts), + "--gpt", gpt_path, + "--sovits", sovits_path, + ] + ) # fmt: on else: - cmd = '"%s" -s GPT_SoVITS/inference_webui.py "%s"' % (python_exec, language) - # #####v3暂不支持加速推理 - # if version=="v3": - # cmd = '"%s" GPT_SoVITS/inference_webui.py "%s"'%(python_exec, language) + # fmt: off + cmd.extend( + [ + "GPT_SoVITS/inference_webui.py", language, + "-b", backends_dropdown, + "-d", f"{infer_device.type}:{gpu_number}", + "-p", str(webui_port_infer_tts), + "--gpt", gpt_path, + "--sovits", sovits_path, + ] + ) # fmt: on + + if is_share: + cmd.append("-s") + if p_tts_inference is None: - os.environ["gpt_path"] = gpt_path - os.environ["sovits_path"] = sovits_path - os.environ["cnhubert_base_path"] = cnhubert_base_path - os.environ["bert_path"] = bert_path - os.environ["_CUDA_VISIBLE_DEVICES"] = fix_gpu_number(gpu_number) - os.environ["is_half"] = str(is_half) - os.environ["infer_ttswebui"] = str(webui_port_infer_tts) - os.environ["is_share"] = str(is_share) yield ( process_info(process_name_tts, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - print(cmd) - p_tts_inference = Popen(cmd, shell=True) + console.print(" ".join(cmd)) + p_tts_inference = Popen(cmd, env=env) else: kill_process(p_tts_inference.pid, process_name_tts) p_tts_inference = None yield ( process_info(process_name_tts, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -from tools.asr.config import asr_dict - process_name_asr = i18n("语音识别") @@ -386,32 +379,32 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_ output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list") yield ( process_info(process_name_asr, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), + gr.skip(), ) - print(cmd) + console.print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr = None yield ( process_info(process_name_asr, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - {"__type__": "update", "value": output_file_path}, - {"__type__": "update", "value": output_file_path}, - {"__type__": "update", "value": asr_inp_dir}, + gr.update(visible=True), + gr.update(visible=False), + gr.update(value=output_file_path), + gr.update(value=output_file_path), + gr.update(value=asr_inp_dir), ) else: yield ( process_info(process_name_asr, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), + gr.skip(), ) @@ -422,68 +415,12 @@ def close_asr(): p_asr = None return ( process_info(process_name_asr, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -process_name_denoise = i18n("语音降噪") - - -def open_denoise(denoise_inp_dir, denoise_opt_dir): - global p_denoise - if p_denoise == None: - denoise_inp_dir = my_utils.clean_path(denoise_inp_dir) - denoise_opt_dir = my_utils.clean_path(denoise_opt_dir) - check_for_existance([denoise_inp_dir]) - cmd = '"%s" -s tools/cmd-denoise.py -i "%s" -o "%s" -p %s' % ( - python_exec, - denoise_inp_dir, - denoise_opt_dir, - "float16" if is_half == True else "float32", - ) - - yield ( - process_info(process_name_denoise, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - ) - print(cmd) - p_denoise = Popen(cmd, shell=True) - p_denoise.wait() - p_denoise = None - yield ( - process_info(process_name_denoise, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - {"__type__": "update", "value": denoise_opt_dir}, - {"__type__": "update", "value": denoise_opt_dir}, - ) - else: - yield ( - process_info(process_name_denoise, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - ) - - -def close_denoise(): - global p_denoise - if p_denoise is not None: - kill_process(p_denoise.pid, process_name_denoise) - p_denoise = None - return ( - process_info(process_name_denoise, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) - - -p_train_SoVITS = None +p_train_SoVITS: Popen | None = None process_name_sovits = i18n("SoVITS训练") @@ -503,7 +440,7 @@ def open1Ba( lora_rank, ): global p_train_SoVITS - if p_train_SoVITS == None: + if p_train_SoVITS is None: exp_name = exp_name.rstrip(" ") config_file = ( "GPT_SoVITS/configs/s2.json" @@ -511,13 +448,13 @@ def open1Ba( else f"GPT_SoVITS/configs/s2{version}.json" ) with open(config_file) as f: - data = f.read() - data = json.loads(data) - s2_dir = "%s/%s" % (exp_root, exp_name) - os.makedirs("%s/logs_s2_%s" % (s2_dir, version), exist_ok=True) + config = f.read() + data: dict = json.loads(config) + s2_dir = f"{exp_root}/{exp_name}" + os.makedirs(f"{s2_dir}/logs_s2_{version}", exist_ok=True) if check_for_existance([s2_dir], is_train=True): check_details([s2_dir], is_train=True) - if is_half == False: + if is_half is False: data["train"]["fp16_run"] = False batch_size = max(1, batch_size // 2) data["train"]["batch_size"] = batch_size @@ -528,7 +465,6 @@ def open1Ba( data["train"]["if_save_latest"] = if_save_latest data["train"]["if_save_every_weights"] = if_save_every_weights data["train"]["save_every_epoch"] = save_every_epoch - data["train"]["gpu_numbers"] = gpu_numbers1Ba data["train"]["grad_ckpt"] = if_grad_ckpt data["train"]["lora_rank"] = lora_rank data["model"]["version"] = version @@ -536,55 +472,95 @@ def open1Ba( data["save_weight_dir"] = SoVITS_weight_version2root[version] data["name"] = exp_name data["version"] = version - tmp_config_path = "%s/tmp_s2.json" % tmp + tmp_config_path = f"{tmp}/tmp_s2.json" with open(tmp_config_path, "w") as f: f.write(json.dumps(data)) + + env = os.environ.copy() + env["CUDA_VISIBLE_DEVICES"] = str(gpu_numbers1Ba).strip("[]").replace(" ", "") + if version in ["v1", "v2", "v2Pro", "v2ProPlus"]: - cmd = '"%s" -s GPT_SoVITS/s2_train.py --config "%s"' % (python_exec, tmp_config_path) + cmd = [ + python_exec, + "-s", + "GPT_SoVITS/s2_train.py", + "--config", + tmp_config_path, + ] else: - cmd = '"%s" -s GPT_SoVITS/s2_train_v3_lora.py --config "%s"' % (python_exec, tmp_config_path) + cmd = [ + python_exec, + "-s", + "GPT_SoVITS/s2_train_v3_lora.py", + "--config", + tmp_config_path, + ] + console.print(" ".join(cmd)) + + p = Popen(cmd, env=env) + p_train_SoVITS = p + yield ( process_info(process_name_sovits, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) - print(cmd) - p_train_SoVITS = Popen(cmd, shell=True) - p_train_SoVITS.wait() + + code = p.wait() p_train_SoVITS = None - SoVITS_dropdown_update, GPT_dropdown_update = change_choices() + + if code == 0: + yield ( + process_info(process_name_sovits, "finish"), + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), + ) + else: + yield ( + process_info(process_name_sovits, "failed"), + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), + ) + return (gr.skip() for i in range(5)) + + SoVITS_dropdown_update, GPT_dropdown_update = change_choice() + yield ( - process_info(process_name_sovits, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.skip(), + gr.skip(), + gr.skip(), SoVITS_dropdown_update, GPT_dropdown_update, ) else: yield ( process_info(process_name_sovits, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) def close1Ba(): global p_train_SoVITS - if p_train_SoVITS is not None: + if p_train_SoVITS: kill_process(p_train_SoVITS.pid, process_name_sovits) p_train_SoVITS = None return ( process_info(process_name_sovits, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -p_train_GPT = None +p_train_GPT: Popen | None = None process_name_gpt = i18n("GPT训练") @@ -600,18 +576,19 @@ def open1Bb( pretrained_s1, ): global p_train_GPT - if p_train_GPT == None: + if p_train_GPT is None: exp_name = exp_name.rstrip(" ") with open( "GPT_SoVITS/configs/s1longer.yaml" if version == "v1" else "GPT_SoVITS/configs/s1longer-v2.yaml" ) as f: - data = f.read() - data = yaml.load(data, Loader=yaml.FullLoader) - s1_dir = "%s/%s" % (exp_root, exp_name) - os.makedirs("%s/logs_s1" % (s1_dir), exist_ok=True) + config = f.read() + data: dict = yaml.load(config, Loader=yaml.FullLoader) + s1_dir = f"{exp_root}/{exp_name}" + os.makedirs(f"{s1_dir}/logs_s1", exist_ok=True) if check_for_existance([s1_dir], is_train=True): check_details([s1_dir], is_train=True) - if is_half == False: + + if is_half is False or torch.mps.is_available(): data["train"]["precision"] = "32" batch_size = max(1, batch_size // 2) data["train"]["batch_size"] = batch_size @@ -623,44 +600,69 @@ def open1Bb( data["train"]["if_dpo"] = if_dpo data["train"]["half_weights_save_dir"] = GPT_weight_version2root[version] data["train"]["exp_name"] = exp_name - data["train_semantic_path"] = "%s/6-name2semantic.tsv" % s1_dir - data["train_phoneme_path"] = "%s/2-name2text.txt" % s1_dir - data["output_dir"] = "%s/logs_s1_%s" % (s1_dir, version) - # data["version"]=version + data["train_semantic_path"] = f"{s1_dir}/6-name2semantic.tsv" + data["train_phoneme_path"] = f"{s1_dir}/2-name2text.txt" + data["output_dir"] = f"{s1_dir}/logs_s1_{version}" - os.environ["_CUDA_VISIBLE_DEVICES"] = fix_gpu_numbers(gpu_numbers.replace("-", ",")) - os.environ["hz"] = "25hz" - tmp_config_path = "%s/tmp_s1.yaml" % tmp + env = os.environ.copy() + env["CUDA_VISIBLE_DEVICES"] = str(gpu_numbers).strip("[]").replace(" ", "") + + tmp_config_path = f"{tmp}/tmp_s1.yaml" with open(tmp_config_path, "w") as f: f.write(yaml.dump(data, default_flow_style=False)) - # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) - cmd = '"%s" -s GPT_SoVITS/s1_train.py --config_file "%s" ' % (python_exec, tmp_config_path) + + cmd = [python_exec, "-s", "GPT_SoVITS/s1_train.py", "--config_file", tmp_config_path] + + console.print(" ".join(cmd)) + + p = Popen(cmd, env=env) + p_train_GPT = p + yield ( process_info(process_name_gpt, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) - print(cmd) - p_train_GPT = Popen(cmd, shell=True) - p_train_GPT.wait() + + code = p.wait() p_train_GPT = None - SoVITS_dropdown_update, GPT_dropdown_update = change_choices() + + if code == 0: + yield ( + process_info(process_name_gpt, "finish"), + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), + ) + else: + yield ( + process_info(process_name_gpt, "failed"), + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), + ) + return (gr.skip() for i in range(5)) + + SoVITS_dropdown_update, GPT_dropdown_update = change_choice() + yield ( - process_info(process_name_gpt, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.skip(), + gr.skip(), + gr.skip(), SoVITS_dropdown_update, GPT_dropdown_update, ) else: yield ( process_info(process_name_gpt, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) @@ -671,8 +673,8 @@ def close1Bb(): p_train_GPT = None return ( process_info(process_name_gpt, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) @@ -685,14 +687,13 @@ def open_slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max inp = my_utils.clean_path(inp) opt_root = my_utils.clean_path(opt_root) check_for_existance([inp]) - if os.path.exists(inp) == False: + if os.path.exists(inp) is False: yield ( i18n("输入路径不存在"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), ) return if os.path.isfile(inp): @@ -702,11 +703,10 @@ def open_slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max else: yield ( i18n("输入路径存在但不可用"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=True), + gr.update(visible=False), + gr.skip(), + gr.skip(), ) return if ps_slice == []: @@ -725,36 +725,33 @@ def open_slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max i_part, n_parts, ) - print(cmd) + console.print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) yield ( process_info(process_name_slice, "opened"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) for p in ps_slice: p.wait() ps_slice = [] yield ( process_info(process_name_slice, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - {"__type__": "update", "value": opt_root}, - {"__type__": "update", "value": opt_root}, - {"__type__": "update", "value": opt_root}, + gr.update(visible=True), + gr.update(visible=False), + gr.update(value=opt_root), + gr.update(value=opt_root), ) else: yield ( process_info(process_name_slice, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - {"__type__": "update"}, - {"__type__": "update"}, - {"__type__": "update"}, + gr.update(visible=False), + gr.update(visible=True), + gr.skip(), + gr.skip(), ) @@ -764,297 +761,307 @@ def close_slice(): for p_slice in ps_slice: try: kill_process(p_slice.pid, process_name_slice) - except: + except Exception as _: traceback.print_exc() ps_slice = [] return ( process_info(process_name_slice, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -ps1a = [] +ps1a: None | Popen = None process_name_1a = i18n("文本分词与特征提取") -def open1a(inp_text, inp_wav_dir, exp_name, gpu_numbers, bert_pretrained_dir): +def open1a( + inp_text: str, + inp_wav_dir: str, + exp_name: str, + gpu_numbers: list[int], + bert_pretrained_dir: str, + version: str, + nproc: int = 1, +): global ps1a inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") - if ps1a == []: - opt_dir = "%s/%s" % (exp_root, exp_name) - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": opt_dir, - "bert_pretrained_dir": bert_pretrained_dir, - } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - "is_half": str(is_half), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/1-get-text.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1a.append(p) + if ps1a is None: + opt_dir = f"{exp_root}/{exp_name}" + + env = os.environ.copy() + env["PYTHONPATH"] = os.getcwd() + + # fmt: off + cmd = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/1-get-text.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--bert", bert_pretrained_dir, + "--version", version, + "--device", infer_device.type, + "--device-id", str(gpu_numbers).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if is_half: + cmd.append("--fp16") + else: + cmd.append("--no-fp16") + + console.print(" ".join(cmd)) + p = Popen(cmd, env=env) + yield ( process_info(process_name_1a, "running"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - for p in ps1a: - p.wait() - opt = [] - for i_part in range(all_parts): - txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) - with open(txt_path, "r", encoding="utf8") as f: - opt += f.read().strip("\n").split("\n") - os.remove(txt_path) - path_text = "%s/2-name2text.txt" % opt_dir - with open(path_text, "w", encoding="utf8") as f: - f.write("\n".join(opt) + "\n") - ps1a = [] - if len("".join(opt)) > 0: + + code = p.wait() + ps1a = None + + if code == 0: yield ( process_info(process_name_1a, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) else: yield ( process_info(process_name_1a, "failed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) else: yield ( process_info(process_name_1a, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) def close1a(): global ps1a - if ps1a != []: - for p1a in ps1a: - try: - kill_process(p1a.pid, process_name_1a) - except: - traceback.print_exc() - ps1a = [] + if ps1a: + try: + kill_process(ps1a.pid, process_name_1a) + except Exception as _: + traceback.print_exc() + ps1a = None return ( process_info(process_name_1a, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt" -ps1b = [] +ps1b: None | Popen = None process_name_1b = i18n("语音自监督特征提取") -def open1b(version, inp_text, inp_wav_dir, exp_name, gpu_numbers, ssl_pretrained_dir): +def open1b( + version: str, + inp_text: str, + inp_wav_dir: str, + exp_name: str, + gpu_numbers: list[int], + ssl_pretrained_dir: str, + nproc: int = 1, +): global ps1b inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") - if ps1b == []: - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": "%s/%s" % (exp_root, exp_name), - "cnhubert_base_dir": ssl_pretrained_dir, - "sv_path": sv_path, - "is_half": str(is_half), - } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1b.append(p) + if ps1b is None: + opt_dir = f"{exp_root}/{exp_name}" + + env = os.environ.copy() + env["PYTHONPATH"] = os.getcwd() + + # fmt: off + cmd = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--cnhubert", ssl_pretrained_dir, + "--device", infer_device.type, + "--device-id", str(gpu_numbers).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if inp_wav_dir: + cmd.extend(["--wav-dir", inp_wav_dir]) + + if "Pro" in version: + cmd.extend(["--sv", sv_path]) + + if is_half: + cmd.append("--fp16") + else: + cmd.append("--no-fp16") + + console.print(" ".join(cmd)) + p = Popen(cmd, env=env) + + ps1b = p + yield ( process_info(process_name_1b, "running"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - ) - for p in ps1b: - p.wait() - ps1b = [] - if "Pro" in version: - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-sv.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1b.append(p) - for p in ps1b: - p.wait() - ps1b = [] - yield ( - process_info(process_name_1b, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=False), + gr.update(visible=True), ) + + code = p.wait() + ps1b = None + + if code == 0: + yield ( + process_info(process_name_1b, "finish"), + gr.update(visible=True), + gr.update(visible=False), + ) + else: + yield ( + process_info(process_name_1b, "failed"), + gr.update(visible=True), + gr.update(visible=False), + ) else: yield ( process_info(process_name_1b, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) def close1b(): global ps1b - if ps1b != []: - for p1b in ps1b: - try: - kill_process(p1b.pid, process_name_1b) - except: - traceback.print_exc() - ps1b = [] + if ps1b: + try: + kill_process(ps1b.pid, process_name_1b) + except Exception as _: + traceback.print_exc() + ps1b = None return ( process_info(process_name_1b, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -ps1c = [] +ps1c: None | Popen = None process_name_1c = i18n("语义Token提取") -def open1c(version, inp_text, inp_wav_dir, exp_name, gpu_numbers, pretrained_s2G_path): +def open1c( + inp_text: str, + exp_name: str, + gpu_numbers: list[int], + pretrained_s2G_path: str, + nproc: int = 1, +): global ps1c inp_text = my_utils.clean_path(inp_text) - if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): - check_details([inp_text, inp_wav_dir], is_dataset_processing=True) + check_for_existance([inp_text], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") - if ps1c == []: - opt_dir = "%s/%s" % (exp_root, exp_name) - config_file = ( - "GPT_SoVITS/configs/s2.json" - if version not in {"v2Pro", "v2ProPlus"} - else f"GPT_SoVITS/configs/s2{version}.json" - ) - config = { - "inp_text": inp_text, - "exp_name": exp_name, - "opt_dir": opt_dir, - "pretrained_s2G": pretrained_s2G_path, - "s2config_path": config_file, - "is_half": str(is_half), - } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/3-get-semantic.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1c.append(p) + if ps1c is None: + opt_dir = f"{exp_root}/{exp_name}" + + env = os.environ.copy() + env["PYTHONPATH"] = os.getcwd() + + # fmt: off + cmd = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/3-get-semantic.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--pretrained-s2g", pretrained_s2G_path, + "--device", infer_device.type, + "--device-id", str(gpu_numbers).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if is_half: + cmd.append("--fp16") + else: + cmd.append("--no-fp16") + + console.print(" ".join(cmd)) + p = Popen(cmd, env=env) + + ps1c = p + yield ( process_info(process_name_1c, "running"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - ) - for p in ps1c: - p.wait() - opt = ["item_name\tsemantic_audio"] - path_semantic = "%s/6-name2semantic.tsv" % opt_dir - for i_part in range(all_parts): - semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) - with open(semantic_path, "r", encoding="utf8") as f: - opt += f.read().strip("\n").split("\n") - os.remove(semantic_path) - with open(path_semantic, "w", encoding="utf8") as f: - f.write("\n".join(opt) + "\n") - ps1c = [] - yield ( - process_info(process_name_1c, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=False), + gr.update(visible=True), ) + + code = p.wait() + ps1c = None + + if code == 0: + yield ( + process_info(process_name_1c, "finish"), + gr.update(visible=True), + gr.update(visible=False), + ) + else: + yield ( + process_info(process_name_1c, "failed"), + gr.update(visible=True), + gr.update(visible=False), + ) + else: yield ( process_info(process_name_1c, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) def close1c(): global ps1c - if ps1c != []: - for p1c in ps1c: - try: - kill_process(p1c.pid, process_name_1c) - except: - traceback.print_exc() - ps1c = [] + if ps1c: + try: + kill_process(ps1c.pid, process_name_1c) + except Exception as _: + traceback.print_exc() + ps1c = None return ( process_info(process_name_1c, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) -ps1abc = [] +ps1abc: list[None | Popen] = [None] * 3 process_name_1abc = i18n("训练集格式化一键三连") def open1abc( - version, - inp_text, - inp_wav_dir, - exp_name, - gpu_numbers1a, - gpu_numbers1Ba, - gpu_numbers1c, - bert_pretrained_dir, - ssl_pretrained_dir, - pretrained_s2G_path, + version: str, + inp_text: str, + inp_wav_dir: str, + exp_name: str, + gpu_numbers_1: list[int], + gpu_numbers_2: list[int], + gpu_numbers_3: list[int], + bert_pretrained_dir: str, + ssl_pretrained_dir: str, + pretrained_s2G_path: str, + nproc: int = 1, ): global ps1abc inp_text = my_utils.clean_path(inp_text) @@ -1062,203 +1069,177 @@ def open1abc( if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") - if ps1abc == []: - opt_dir = "%s/%s" % (exp_root, exp_name) - try: - #############################1a - path_text = "%s/2-name2text.txt" % opt_dir - if os.path.exists(path_text) == False or ( - os.path.exists(path_text) == True - and len(open(path_text, "r", encoding="utf8").read().strip("\n").split("\n")) < 2 - ): - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": opt_dir, - "bert_pretrained_dir": bert_pretrained_dir, - "is_half": str(is_half), - } - gpu_names = gpu_numbers1a.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/1-get-text.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1abc.append(p) - yield ( - i18n("进度") + ": 1A-Doing", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - ) - for p in ps1abc: - p.wait() + if not all(ps1abc): + opt_dir = f"{exp_root}/{exp_name}" - opt = [] - for i_part in range(all_parts): # txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part) - txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) - with open(txt_path, "r", encoding="utf8") as f: - opt += f.read().strip("\n").split("\n") - os.remove(txt_path) - with open(path_text, "w", encoding="utf8") as f: - f.write("\n".join(opt) + "\n") - assert len("".join(opt)) > 0, process_info(process_name_1a, "failed") + env = os.environ.copy() + env["PYTHONPATH"] = os.getcwd() + + # Step 1 + # fmt: off + cmd_1 = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/1-get-text.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--bert", bert_pretrained_dir, + "--version", version, + "--device", infer_device.type, + "--device-id", str(gpu_numbers_1).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if is_half: + cmd_1.append("--fp16") + else: + cmd_1.append("--no-fp16") + + console.print(" ".join(cmd_1)) + p = Popen(cmd_1, env=env) + ps1abc[0] = p + + yield ( + i18n("进度") + ": 1A-Doing", + gr.update(visible=False), + gr.update(visible=True), + ) + + code = p.wait() + ps1abc[0] = None + + if code == 0: yield ( i18n("进度") + ": 1A-Done", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - ps1abc = [] - #############################1b - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": opt_dir, - "cnhubert_base_dir": ssl_pretrained_dir, - "sv_path": sv_path, - } - gpu_names = gpu_numbers1Ba.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1abc.append(p) + else: yield ( - i18n("进度") + ": 1A-Done, 1B-Doing", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + i18n("进度") + ": 1A-Failed", + gr.update(visible=True), + gr.update(visible=False), ) - for p in ps1abc: - p.wait() - ps1abc = [] - if "Pro" in version: - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-sv.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1abc.append(p) - for p in ps1abc: - p.wait() - ps1abc = [] + return (gr.skip() for i in range(3)) + + # Step 2 + # fmt: off + cmd_2 = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--cnhubert", ssl_pretrained_dir, + "--device", infer_device.type, + "--device-id", str(gpu_numbers_2).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if inp_wav_dir: + cmd_2.extend(["--wav-dir", inp_wav_dir]) + + if "Pro" in version: + cmd_2.extend(["--sv", sv_path]) + + if is_half: + cmd_2.append("--fp16") + else: + cmd_2.append("--no-fp16") + + console.print(" ".join(cmd_2)) + p = Popen(cmd_2, env=env) + ps1abc[1] = p + + yield ( + i18n("进度") + ": 1A-Done, 1B-Doing", + gr.update(visible=False), + gr.update(visible=True), + ) + + code = p.wait() + ps1abc[1] = None + + if code == 0: yield ( i18n("进度") + ": 1A-Done, 1B-Done", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) - #############################1c - path_semantic = "%s/6-name2semantic.tsv" % opt_dir - if os.path.exists(path_semantic) == False or ( - os.path.exists(path_semantic) == True and os.path.getsize(path_semantic) < 31 - ): - config_file = ( - "GPT_SoVITS/configs/s2.json" - if version not in {"v2Pro", "v2ProPlus"} - else f"GPT_SoVITS/configs/s2{version}.json" - ) - config = { - "inp_text": inp_text, - "exp_name": exp_name, - "opt_dir": opt_dir, - "pretrained_s2G": pretrained_s2G_path, - "s2config_path": config_file, - } - gpu_names = gpu_numbers1c.split("-") - all_parts = len(gpu_names) - for i_part in range(all_parts): - config.update( - { - "i_part": str(i_part), - "all_parts": str(all_parts), - "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]), - } - ) - os.environ.update(config) - cmd = '"%s" -s GPT_SoVITS/prepare_datasets/3-get-semantic.py' % python_exec - print(cmd) - p = Popen(cmd, shell=True) - ps1abc.append(p) - yield ( - i18n("进度") + ": 1A-Done, 1B-Done, 1C-Doing", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - ) - for p in ps1abc: - p.wait() + else: + yield ( + i18n("进度") + ": 1A-Done, 1B-Failed", + gr.update(visible=True), + gr.update(visible=False), + ) + return (gr.skip() for i in range(3)) - opt = ["item_name\tsemantic_audio"] - for i_part in range(all_parts): - semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) - with open(semantic_path, "r", encoding="utf8") as f: - opt += f.read().strip("\n").split("\n") - os.remove(semantic_path) - with open(path_semantic, "w", encoding="utf8") as f: - f.write("\n".join(opt) + "\n") - yield ( - i18n("进度") + ": 1A-Done, 1B-Done, 1C-Done", - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, - ) - ps1abc = [] + # Step 3 + # fmt: off + cmd_3 = [ + python_exec, "-s", "GPT_SoVITS/prepare_datasets/3-get-semantic.py", + "--inp-list", inp_text, + "--opt", opt_dir, + "--pretrained-s2g", pretrained_s2G_path, + "--device", infer_device.type, + "--device-id", str(gpu_numbers_3).strip("[]").replace(" ",""), + "--nproc", str(nproc), + ] + # fmt: on + + if is_half: + cmd_3.append("--fp16") + else: + cmd_3.append("--no-fp16") + + console.print(" ".join(cmd_3)) + p = Popen(cmd_3, env=env) + ps1abc[2] = p + + yield ( + i18n("进度") + ": 1A-Done, 1B-Done, 1C-Doing", + gr.update(visible=False), + gr.update(visible=True), + ) + + code = p.wait() + ps1abc[2] = None + + if code == 0: yield ( process_info(process_name_1abc, "finish"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=False), + gr.update(visible=True), ) - except: - traceback.print_exc() - close1abc() + else: yield ( - process_info(process_name_1abc, "failed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + i18n("进度") + ": 1A-Done, 1B-Done, 1C-Failed", + gr.update(visible=True), + gr.update(visible=False), ) + return (gr.skip() for i in range(3)) + else: yield ( process_info(process_name_1abc, "occupy"), - {"__type__": "update", "visible": False}, - {"__type__": "update", "visible": True}, + gr.update(visible=False), + gr.update(visible=True), ) def close1abc(): global ps1abc - if ps1abc != []: + if any(ps1abc): for p1abc in ps1abc: + if p1abc is None: + continue try: kill_process(p1abc.pid, process_name_1abc) - except: + except Exception as _: traceback.print_exc() - ps1abc = [] + ps1abc = [None] * 3 return ( process_info(process_name_1abc, "closed"), - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, + gr.update(visible=True), + gr.update(visible=False), ) @@ -1272,37 +1253,39 @@ def switch_version(version_): gr.Warning(i18n("未下载模型") + ": " + version.upper()) set_default() return ( - {"__type__": "update", "value": pretrained_sovits_name[version]}, - {"__type__": "update", "value": pretrained_sovits_name[version].replace("s2G", "s2D")}, - {"__type__": "update", "value": pretrained_gpt_name[version]}, - {"__type__": "update", "value": pretrained_gpt_name[version]}, - {"__type__": "update", "value": pretrained_sovits_name[version]}, - {"__type__": "update", "value": default_batch_size, "maximum": default_max_batch_size}, - {"__type__": "update", "value": default_sovits_epoch, "maximum": max_sovits_epoch}, - {"__type__": "update", "value": default_sovits_save_every_epoch, "maximum": max_sovits_save_every_epoch}, - {"__type__": "update", "visible": True if version not in v3v4set else False}, - { - "__type__": "update", - "value": False if not if_force_ckpt else True, - "interactive": True if not if_force_ckpt else False, - }, - {"__type__": "update", "interactive": True, "value": False}, - {"__type__": "update", "visible": True if version in v3v4set else False}, - ) # {'__type__': 'update', "interactive": False if version in v3v4set else True, "value": False}, \ ####batch infer - - -if os.path.exists("GPT_SoVITS/text/G2PWModel"): - ... -else: - cmd = '"%s" -s GPT_SoVITS/download.py' % python_exec - p = Popen(cmd, shell=True) - p.wait() + gr.update(value=pretrained_sovits_name[version]), + gr.update(value=pretrained_sovits_name[version].replace("s2G", "s2D")), + gr.update(value=pretrained_gpt_name[version]), + gr.update(value=pretrained_gpt_name[version]), + gr.update(value=pretrained_sovits_name[version]), + gr.update(value=default_batch_size, maximum=default_max_batch_size), + gr.update(value=default_sovits_epoch, maximum=max_sovits_epoch), + gr.update(value=default_sovits_save_every_epoch, maximum=max_sovits_save_every_epoch), + gr.update(visible=False if version in v3v4set else True), + gr.update( + visible=False if version not in v3v4set else True, + value=False if not if_force_ckpt else True, + interactive=True if not if_force_ckpt else False, + ), + gr.update(value=False, interactive=True), + gr.update(visible=True if version in v3v4set else False), + ) def sync(text): - return {"__type__": "update", "value": text} + return gr.update(value=text) +def changeBackend(flag: bool): + if flag: + return gr.update(choices=["Torch Varlen"], value="Torch Varlen") + else: + return gr.update(choices=backends_gradio, value=backends_gradio[-1][-1]) + + +GPU_INDEX.add(0) +GPU_INDEX_LIST = list(GPU_INDEX) +GPU_INDEX_LIST.sort() with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css) as app: gr.HTML( top_html.format( @@ -1313,11 +1296,11 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ) with gr.Tabs(): - with gr.TabItem("0-" + i18n("前置数据集获取工具")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 + with gr.TabItem("0-" + i18n("前置数据集获取工具")): with gr.Accordion(label="0a-" + i18n("UVR5人声伴奏分离&去混响去延迟工具")): - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(scale=3): - with gr.Row(): + with gr.Row(equal_height=True): uvr5_info = gr.Textbox(label=process_info(process_name_uvr5, "info")) open_uvr5 = gr.Button( value=process_info(process_name_uvr5, "open"), variant="primary", visible=True @@ -1327,14 +1310,19 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ) with gr.Accordion(label="0b-" + i18n("语音切分工具")): - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(scale=3): - with gr.Row(): - slice_inp_path = gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"), value="") + with gr.Row(equal_height=True): + slice_inp_path = gr.Textbox( + label=i18n("音频自动切分输入路径,可文件可文件夹"), + placeholder="D:/InputAudioFolder" + if platform.system() == "Windows" + else "~/InputAudioFolder", + ) slice_opt_root = gr.Textbox( label=i18n("切分后的子音频的输出根目录"), value="output/slicer_opt" ) - with gr.Row(): + with gr.Row(equal_height=True): threshold = gr.Textbox( label=i18n("threshold:音量小于这个值视作静音的备选切割点"), value="-34" ) @@ -1348,7 +1336,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css value="10", ) max_sil_kept = gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"), value="500") - with gr.Row(): + with gr.Row(equal_height=True): _max = gr.Slider( minimum=0, maximum=1, @@ -1365,7 +1353,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css value=0.25, interactive=True, ) - with gr.Row(): + with gr.Row(equal_height=True): n_process = gr.Slider( minimum=1, maximum=n_cpu, @@ -1382,32 +1370,19 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css value=process_info(process_name_slice, "close"), variant="primary", visible=False ) - # gr.Markdown(value="0bb-" + i18n("语音降噪工具")+i18n("(不稳定,先别用,可能劣化模型效果!)")) - with gr.Row(visible=False): - with gr.Column(scale=3): - with gr.Row(): - denoise_input_dir = gr.Textbox(label=i18n("输入文件夹路径"), value="") - denoise_output_dir = gr.Textbox(label=i18n("输出文件夹路径"), value="output/denoise_opt") - with gr.Row(): - denoise_info = gr.Textbox(label=process_info(process_name_denoise, "info")) - open_denoise_button = gr.Button( - value=process_info(process_name_denoise, "open"), variant="primary", visible=True - ) - close_denoise_button = gr.Button( - value=process_info(process_name_denoise, "close"), variant="primary", visible=False - ) - with gr.Accordion(label="0c-" + i18n("语音识别工具")): - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(scale=3): - with gr.Row(): + with gr.Row(equal_height=True): asr_inp_dir = gr.Textbox( - label=i18n("输入文件夹路径"), value="D:\\GPT-SoVITS\\raw\\xxx", interactive=True + label=i18n("输入文件夹路径"), + value="output/silcer_opt", + interactive=True, ) asr_opt_dir = gr.Textbox( label=i18n("输出文件夹路径"), value="output/asr_opt", interactive=True ) - with gr.Row(): + with gr.Row(equal_height=True): asr_model = gr.Dropdown( label=i18n("ASR 模型"), choices=list(asr_dict.keys()), @@ -1423,7 +1398,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css asr_precision = gr.Dropdown( label=i18n("数据类型精度"), choices=["float32"], interactive=True, value="float32" ) - with gr.Row(): + with gr.Row(equal_height=True): asr_info = gr.Textbox(label=process_info(process_name_asr, "info")) open_asr_button = gr.Button( value=process_info(process_name_asr, "open"), variant="primary", visible=True @@ -1433,10 +1408,10 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ) def change_lang_choices(key): # 根据选择的模型修改可选的语言 - return {"__type__": "update", "choices": asr_dict[key]["lang"], "value": asr_dict[key]["lang"][0]} + return gr.update(value=asr_dict[key]["lang"][0], choices=asr_dict[key]["lang"]) def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸 - return {"__type__": "update", "choices": asr_dict[key]["size"], "value": asr_dict[key]["size"][-1]} + return gr.update(value=asr_dict[key]["size"][-1], choices=asr_dict[key]["size"]) def change_precision_choices(key): # 根据选择的模型修改可选的语言 if key == "Faster Whisper (多语种)": @@ -1448,19 +1423,19 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css precision = "float32" else: precision = "float32" - return {"__type__": "update", "choices": asr_dict[key]["precision"], "value": precision} + return gr.update(value=precision, choices=asr_dict[key]["precision"]) asr_model.change(change_lang_choices, [asr_model], [asr_lang]) asr_model.change(change_size_choices, [asr_model], [asr_size]) asr_model.change(change_precision_choices, [asr_model], [asr_precision]) with gr.Accordion(label="0d-" + i18n("语音文本校对标注工具")): - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(scale=3): - with gr.Row(): + with gr.Row(equal_height=True): path_list = gr.Textbox( label=i18n("标注文件路径 (含文件后缀 *.list)"), - value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list", + value="output/asr_opt/slicer_opt.list", interactive=True, ) label_info = gr.Textbox(label=process_info(process_name_subfix, "info")) @@ -1478,14 +1453,15 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css with gr.TabItem(i18n("1-GPT-SoVITS-TTS")): with gr.Accordion(i18n("微调模型信息")): - with gr.Row(): - with gr.Row(equal_height=True): + with gr.Row(equal_height=True): + with gr.Column(): exp_name = gr.Textbox( - label=i18n("*实验/模型名"), + label=i18n("实验/模型名"), value="xxx", interactive=True, scale=3, ) + with gr.Column(): gpu_info_box = gr.Textbox( label=i18n("显卡信息"), value=gpu_info, @@ -1493,14 +1469,24 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css interactive=False, scale=5, ) - version_checkbox = gr.Radio( + with gr.Column(): + version_checkbox = gr.Dropdown( label=i18n("训练模型的版本"), value=version, - choices=["v1", "v2", "v4", "v2Pro", "v2ProPlus"], + choices=[ + ("V1", "v1"), + ("V2", "v2"), + ("V4", "v4"), + ("V2 Pro", "v2Pro"), + ("V2 Pro Plus", "v2ProPlus"), + ], scale=5, ) + with gr.Column(): + n_processes = gr.Slider(0, 6, 2, step=1, label=i18n("每卡预处理进程数")) + with gr.Accordion(label=i18n("预训练模型路径"), open=False): - with gr.Row(): + with gr.Row(equal_height=True): with gr.Row(equal_height=True): pretrained_s1 = gr.Textbox( label=i18n("预训练GPT模型路径"), @@ -1529,15 +1515,15 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css with gr.TabItem("1A-" + i18n("训练集格式化工具")): with gr.Accordion(label=i18n("输出logs/实验名目录下应有23456开头的文件和文件夹")): - with gr.Row(): - with gr.Row(): + with gr.Row(equal_height=True): + with gr.Row(equal_height=True): inp_text = gr.Textbox( label=i18n("*文本标注文件"), - value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list", + value=r"output/asr_opt/slicer_opt.list", interactive=True, scale=10, ) - with gr.Row(): + with gr.Row(equal_height=True): inp_wav_dir = gr.Textbox( label=i18n("*训练集音频文件目录"), # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", @@ -1549,90 +1535,99 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css ) with gr.Accordion(label="1Aa-" + process_name_1a): - with gr.Row(): - with gr.Row(): - gpu_numbers1a = gr.Textbox( - label=i18n("GPU卡号以-分割,每个卡号一个进程"), - value="%s-%s" % (gpus, gpus), + with gr.Row(equal_height=True): + with gr.Row(equal_height=True): + gpu_numbers1a = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=GPU_INDEX_LIST, interactive=True, + multiselect=True, + allow_custom_value=False, ) - with gr.Row(): + with gr.Row(equal_height=True): bert_pretrained_dir = gr.Textbox( label=i18n("预训练中文BERT模型路径"), value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", interactive=False, lines=2, ) - with gr.Row(): + with gr.Row(equal_height=True): button1a_open = gr.Button( value=process_info(process_name_1a, "open"), variant="primary", visible=True ) button1a_close = gr.Button( value=process_info(process_name_1a, "close"), variant="primary", visible=False ) - with gr.Row(): + with gr.Row(equal_height=True): info1a = gr.Textbox(label=process_info(process_name_1a, "info")) with gr.Accordion(label="1Ab-" + process_name_1b): - with gr.Row(): - with gr.Row(): - gpu_numbers1Ba = gr.Textbox( - label=i18n("GPU卡号以-分割,每个卡号一个进程"), - value="%s-%s" % (gpus, gpus), + with gr.Row(equal_height=True): + with gr.Row(equal_height=True): + gpu_numbers1b = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=GPU_INDEX_LIST, interactive=True, + multiselect=True, + allow_custom_value=False, ) - with gr.Row(): + with gr.Row(equal_height=True): cnhubert_base_dir = gr.Textbox( label=i18n("预训练SSL模型路径"), value="GPT_SoVITS/pretrained_models/chinese-hubert-base", interactive=False, lines=2, ) - with gr.Row(): + with gr.Row(equal_height=True): button1b_open = gr.Button( value=process_info(process_name_1b, "open"), variant="primary", visible=True ) button1b_close = gr.Button( value=process_info(process_name_1b, "close"), variant="primary", visible=False ) - with gr.Row(): + with gr.Row(equal_height=True): info1b = gr.Textbox(label=process_info(process_name_1b, "info")) with gr.Accordion(label="1Ac-" + process_name_1c): - with gr.Row(): - with gr.Row(): - gpu_numbers1c = gr.Textbox( - label=i18n("GPU卡号以-分割,每个卡号一个进程"), - value="%s-%s" % (gpus, gpus), + with gr.Row(equal_height=True): + with gr.Row(equal_height=True): + gpu_numbers1c = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=GPU_INDEX_LIST, interactive=True, + multiselect=True, + allow_custom_value=False, ) - with gr.Row(): + with gr.Row(equal_height=True): pretrained_s2G_ = gr.Textbox( label=i18n("预训练SoVITS-G模型路径"), value=pretrained_sovits_name[version], interactive=False, lines=2, ) - with gr.Row(): + with gr.Row(equal_height=True): button1c_open = gr.Button( value=process_info(process_name_1c, "open"), variant="primary", visible=True ) button1c_close = gr.Button( value=process_info(process_name_1c, "close"), variant="primary", visible=False ) - with gr.Row(): + with gr.Row(equal_height=True): info1c = gr.Textbox(label=process_info(process_name_1c, "info")) with gr.Accordion(label="1Aabc-" + process_name_1abc): - with gr.Row(): - with gr.Row(): + with gr.Row(equal_height=True): + with gr.Row(equal_height=True): button1abc_open = gr.Button( value=process_info(process_name_1abc, "open"), variant="primary", visible=True ) button1abc_close = gr.Button( value=process_info(process_name_1abc, "close"), variant="primary", visible=False ) - with gr.Row(): + with gr.Row(equal_height=True): info1abc = gr.Textbox(label=process_info(process_name_1abc, "info")) pretrained_s2G.change(sync, [pretrained_s2G], [pretrained_s2G_]) @@ -1656,31 +1651,25 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css alpha, n_process, ], - [slicer_info, open_slicer_button, close_slicer_button, asr_inp_dir, denoise_input_dir, inp_wav_dir], + [slicer_info, open_slicer_button, close_slicer_button, asr_inp_dir, inp_wav_dir], ) close_slicer_button.click(close_slice, [], [slicer_info, open_slicer_button, close_slicer_button]) - open_denoise_button.click( - open_denoise, - [denoise_input_dir, denoise_output_dir], - [denoise_info, open_denoise_button, close_denoise_button, asr_inp_dir, inp_wav_dir], - ) - close_denoise_button.click(close_denoise, [], [denoise_info, open_denoise_button, close_denoise_button]) button1a_open.click( open1a, - [inp_text, inp_wav_dir, exp_name, gpu_numbers1a, bert_pretrained_dir], + [inp_text, inp_wav_dir, exp_name, gpu_numbers1a, bert_pretrained_dir, version_checkbox, n_processes], [info1a, button1a_open, button1a_close], ) button1a_close.click(close1a, [], [info1a, button1a_open, button1a_close]) button1b_open.click( open1b, - [version_checkbox, inp_text, inp_wav_dir, exp_name, gpu_numbers1Ba, cnhubert_base_dir], + [version_checkbox, inp_text, inp_wav_dir, exp_name, gpu_numbers1b, cnhubert_base_dir, n_processes], [info1b, button1b_open, button1b_close], ) button1b_close.click(close1b, [], [info1b, button1b_open, button1b_close]) button1c_open.click( open1c, - [version_checkbox, inp_text, inp_wav_dir, exp_name, gpu_numbers1c, pretrained_s2G], + [inp_text, exp_name, gpu_numbers1c, pretrained_s2G, n_processes], [info1c, button1c_open, button1c_close], ) button1c_close.click(close1c, [], [info1c, button1c_open, button1c_close]) @@ -1692,11 +1681,12 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css inp_wav_dir, exp_name, gpu_numbers1a, - gpu_numbers1Ba, + gpu_numbers1b, gpu_numbers1c, bert_pretrained_dir, cnhubert_base_dir, pretrained_s2G, + n_processes, ], [info1abc, button1abc_open, button1abc_close], ) @@ -1704,149 +1694,152 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css with gr.TabItem("1B-" + i18n("微调训练")): with gr.Accordion(label="1Ba-" + i18n("SoVITS 训练: 模型权重文件在 SoVITS_weights/")): - with gr.Row(): + with gr.Row(equal_height=True): + batch_size = gr.Slider( + minimum=1, + maximum=default_max_batch_size, + step=1, + label=i18n("每张显卡的batch_size"), + value=default_batch_size, + interactive=True, + ) + total_epoch = gr.Slider( + minimum=1, + maximum=max_sovits_epoch, + step=1, + label=i18n("总训练轮数total_epoch,不建议太高"), + value=default_sovits_epoch, + interactive=True, + ) + with gr.Column(scale=2): + if_save_latest = gr.Checkbox( + label=i18n("是否仅保存最新的权重文件以节省硬盘空间"), + value=True, + interactive=True, + show_label=True, + ) + if_save_every_weights = gr.Checkbox( + label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), + value=True, + interactive=True, + show_label=True, + ) + if_grad_ckpt = gr.Checkbox( + label="v3是否开启梯度检查点节省显存占用", + value=False, + interactive=True if version in v3v4set else False, + show_label=True, + visible=False, + ) # 只有V3s2可以用 + with gr.Row(equal_height=True): + text_low_lr_rate = gr.Slider( + minimum=0.2, + maximum=0.6, + step=0.05, + label=i18n("文本模块学习率权重"), + value=0.4, + visible=True if version not in v3v4set else False, + ) # v3v4 not need + lora_rank = gr.Radio( + label=i18n("LoRA秩"), + value="32", + choices=["16", "32", "64", "128"], + visible=True if version in v3v4set else False, + ) # v1v2 not need + save_every_epoch = gr.Slider( + minimum=1, + maximum=max_sovits_save_every_epoch, + step=1, + label=i18n("保存频率save_every_epoch"), + value=default_sovits_save_every_epoch, + interactive=True, + ) + with gr.Column(scale=3): + gpu_numbers1Ba = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=GPU_INDEX_LIST, + interactive=True, + multiselect=True, + allow_custom_value=False, + ) + with gr.Row(equal_height=True): with gr.Column(): - with gr.Row(): - batch_size = gr.Slider( - minimum=1, - maximum=default_max_batch_size, - step=1, - label=i18n("每张显卡的batch_size"), - value=default_batch_size, - interactive=True, - ) - total_epoch = gr.Slider( - minimum=1, - maximum=max_sovits_epoch, - step=1, - label=i18n("总训练轮数total_epoch,不建议太高"), - value=default_sovits_epoch, - interactive=True, - ) - with gr.Row(): - text_low_lr_rate = gr.Slider( - minimum=0.2, - maximum=0.6, - step=0.05, - label=i18n("文本模块学习率权重"), - value=0.4, - visible=True if version not in v3v4set else False, - ) # v3v4 not need - lora_rank = gr.Radio( - label=i18n("LoRA秩"), - value="32", - choices=["16", "32", "64", "128"], - visible=True if version in v3v4set else False, - ) # v1v2 not need - save_every_epoch = gr.Slider( - minimum=1, - maximum=max_sovits_save_every_epoch, - step=1, - label=i18n("保存频率save_every_epoch"), - value=default_sovits_save_every_epoch, - interactive=True, - ) - with gr.Column(): - with gr.Column(): - if_save_latest = gr.Checkbox( - label=i18n("是否仅保存最新的权重文件以节省硬盘空间"), - value=True, - interactive=True, - show_label=True, - ) - if_save_every_weights = gr.Checkbox( - label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), - value=True, - interactive=True, - show_label=True, - ) - if_grad_ckpt = gr.Checkbox( - label="v3是否开启梯度检查点节省显存占用", - value=False, - interactive=True if version in v3v4set else False, - show_label=True, - visible=False, - ) # 只有V3s2可以用 - with gr.Row(): - gpu_numbers1Ba = gr.Textbox( - label=i18n("GPU卡号以-分割,每个卡号一个进程"), - value="%s" % (gpus), - interactive=True, - ) - with gr.Row(): - with gr.Row(): button1Ba_open = gr.Button( value=process_info(process_name_sovits, "open"), variant="primary", visible=True ) button1Ba_close = gr.Button( value=process_info(process_name_sovits, "close"), variant="primary", visible=False ) - with gr.Row(): + with gr.Column(): info1Ba = gr.Textbox(label=process_info(process_name_sovits, "info")) with gr.Accordion(label="1Bb-" + i18n("GPT 训练: 模型权重文件在 GPT_weights/")): - with gr.Row(): + with gr.Row(equal_height=True): + batch_size1Bb = gr.Slider( + minimum=1, + maximum=40, + step=1, + label=i18n("每张显卡的batch_size"), + value=default_batch_size_s1, + interactive=True, + ) + total_epoch1Bb = gr.Slider( + minimum=2, + maximum=50, + step=1, + label=i18n("总训练轮数total_epoch"), + value=15, + interactive=True, + ) + with gr.Column(scale=2): + if_save_latest1Bb = gr.Checkbox( + label=i18n("是否仅保存最新的权重文件以节省硬盘空间"), + value=True, + interactive=True, + show_label=True, + ) + if_save_every_weights1Bb = gr.Checkbox( + label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), + value=True, + interactive=True, + show_label=True, + ) + with gr.Row(equal_height=True): + # with gr.Column(): + save_every_epoch1Bb = gr.Slider( + minimum=1, + maximum=50, + step=1, + label=i18n("保存频率save_every_epoch"), + value=5, + interactive=True, + ) + # with gr.Column(): + if_dpo = gr.Checkbox( + label=i18n("是否开启DPO训练选项(实验性)"), + value=False, + interactive=True, + show_label=True, + ) + with gr.Column(scale=2): + gpu_numbers1Bb = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=GPU_INDEX_LIST, + interactive=True, + multiselect=True, + allow_custom_value=False, + ) + with gr.Row(equal_height=True): with gr.Column(): - with gr.Row(): - batch_size1Bb = gr.Slider( - minimum=1, - maximum=40, - step=1, - label=i18n("每张显卡的batch_size"), - value=default_batch_size_s1, - interactive=True, + with gr.Row(equal_height=True): + button1Bb_open = gr.Button( + value=process_info(process_name_gpt, "open"), variant="primary", visible=True ) - total_epoch1Bb = gr.Slider( - minimum=2, - maximum=50, - step=1, - label=i18n("总训练轮数total_epoch"), - value=15, - interactive=True, - ) - with gr.Row(): - save_every_epoch1Bb = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("保存频率save_every_epoch"), - value=5, - interactive=True, - ) - if_dpo = gr.Checkbox( - label=i18n("是否开启DPO训练选项(实验性)"), - value=False, - interactive=True, - show_label=True, + button1Bb_close = gr.Button( + value=process_info(process_name_gpt, "close"), variant="primary", visible=False ) with gr.Column(): - with gr.Column(): - if_save_latest1Bb = gr.Checkbox( - label=i18n("是否仅保存最新的权重文件以节省硬盘空间"), - value=True, - interactive=True, - show_label=True, - ) - if_save_every_weights1Bb = gr.Checkbox( - label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), - value=True, - interactive=True, - show_label=True, - ) - with gr.Row(): - gpu_numbers1Bb = gr.Textbox( - label=i18n("GPU卡号以-分割,每个卡号一个进程"), - value="%s" % (gpus), - interactive=True, - ) - with gr.Row(): - with gr.Row(): - button1Bb_open = gr.Button( - value=process_info(process_name_gpt, "open"), variant="primary", visible=True - ) - button1Bb_close = gr.Button( - value=process_info(process_name_gpt, "close"), variant="primary", visible=False - ) - with gr.Row(): info1Bb = gr.Textbox(label=process_info(process_name_gpt, "info")) button1Ba_close.click(close1Ba, [], [info1Ba, button1Ba_open, button1Ba_close]) @@ -1858,62 +1851,81 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的几个是底模,体验5秒Zero Shot TTS不训练推理用。" ) ) - with gr.Row(): + with gr.Row(equal_height=True): with gr.Column(scale=2): - with gr.Row(): - GPT_dropdown = gr.Dropdown( - label=i18n("GPT模型列表"), - choices=GPT_names, - value=GPT_names[-1], - interactive=True, - ) - SoVITS_dropdown = gr.Dropdown( - label=i18n("SoVITS模型列表"), - choices=SoVITS_names, - value=SoVITS_names[0], - interactive=True, - ) + with gr.Row(equal_height=True): + with gr.Column(): + GPT_dropdown = gr.Dropdown( + label=i18n("GPT模型列表"), + choices=GPT_names, + value=GPT_names[0][-1], + interactive=True, + ) + with gr.Column(): + SoVITS_dropdown = gr.Dropdown( + label=i18n("SoVITS模型列表"), + choices=SoVITS_names, + value=SoVITS_names[0][-1], + interactive=True, + ) with gr.Column(scale=2): - with gr.Row(): - gpu_number_1C = gr.Textbox( - label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True + with gr.Row(equal_height=True): + gpu_number_1C = gr.Dropdown( + label=i18n("GPU卡号"), + choices=GPU_INDEX_LIST, + value=infer_device.index, + interactive=True, + multiselect=False, + allow_custom_value=False, ) refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") - refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) + refresh_button.click(fn=change_choice, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) with gr.Row(equal_height=True): - with gr.Row(): - batched_infer_enabled = gr.Checkbox( - label=i18n("启用并行推理版本"), value=False, interactive=True, show_label=True - ) + with gr.Row(equal_height=True): + with gr.Column(): + batched_infer_enabled = gr.Checkbox( + label=i18n("启用并行推理版本"), value=False, interactive=True, show_label=True + ) + with gr.Column(): + backends_dropdown = gr.Dropdown( + choices=backends_gradio, + label=i18n("推理后端"), + value=backends_gradio[-1][-1], + interactive=True, + ) + with gr.Row(equal_height=True): + tts_info = gr.Textbox(label=process_info(process_name_tts, "info")) open_tts = gr.Button( value=process_info(process_name_tts, "open"), variant="primary", visible=True ) close_tts = gr.Button( value=process_info(process_name_tts, "close"), variant="primary", visible=False ) - with gr.Column(): - tts_info = gr.Textbox(label=process_info(process_name_tts, "info"), scale=2) + + batched_infer_enabled.change( + changeBackend, + [batched_infer_enabled], + [backends_dropdown], + ) open_tts.click( change_tts_inference, [ - bert_pretrained_dir, - cnhubert_base_dir, gpu_number_1C, GPT_dropdown, SoVITS_dropdown, batched_infer_enabled, + backends_dropdown, ], [tts_info, open_tts, close_tts], ) close_tts.click( change_tts_inference, [ - bert_pretrained_dir, - cnhubert_base_dir, gpu_number_1C, GPT_dropdown, SoVITS_dropdown, batched_infer_enabled, + backends_dropdown, ], [tts_info, open_tts, close_tts], ) From 26d5eaf1b4903199f5206eb990f1ee4ffcdcf901 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Mon, 8 Sep 2025 19:30:35 +0800 Subject: [PATCH 2/2] . --- .gitignore | 2 +- GPT_SoVITS/AR/data/dataset.py | 4 ++-- .../Accelerate/MLX/backends/mlx_quantized.py | 2 +- .../Accelerate/MLX/backends/mlx_static.py | 2 +- .../Accelerate/MLX/backends/mlx_varlen.py | 2 +- GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py | 2 +- GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py | 6 +++--- .../backends/flash_attn_varlen_cuda_graph.py | 2 +- .../PyTorch/backends/mps_flash_attn_varlen.py | 2 +- .../backends/sage_attn_varlen_cuda_graph.py | 2 +- .../backends/torch_static_cuda_graph.py | 2 +- .../PyTorch/backends/torch_varlen.py | 2 +- GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py | 2 +- .../Accelerate/PyTorch/t2s_model_abc.py | 11 +++++----- GPT_SoVITS/inference_webui.py | 11 +++++++++- .../{1-get-text.py => 1_get_text.py} | 15 +++----------- ...sv-wav32k.py => 2_get_hubert_sv_wav32k.py} | 13 ++---------- .../{3-get-semantic.py => 3_get_semantic.py} | 17 ++++------------ README.md | 4 ++-- docs/cn/README.md | 2 +- docs/ja/README.md | 4 ++-- docs/ko/README.md | 2 +- docs/tr/README.md | 2 +- go-webui.bat | 1 - go-webui.ps1 | 1 - webui.py | 20 ++++++++----------- 26 files changed, 56 insertions(+), 79 deletions(-) rename GPT_SoVITS/prepare_datasets/{1-get-text.py => 1_get_text.py} (92%) rename GPT_SoVITS/prepare_datasets/{2-get-hubert-sv-wav32k.py => 2_get_hubert_sv_wav32k.py} (93%) rename GPT_SoVITS/prepare_datasets/{3-get-semantic.py => 3_get_semantic.py} (90%) diff --git a/.gitignore b/.gitignore index 37fcdd5a..ae31dad9 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ speakers.json ref_audios tools/AP_BWE/24kto48k/* !tools/AP_BWE/24kto48k/readme.txt -onnx +onnx_export # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/GPT_SoVITS/AR/data/dataset.py b/GPT_SoVITS/AR/data/dataset.py index e3164865..35f669f0 100644 --- a/GPT_SoVITS/AR/data/dataset.py +++ b/GPT_SoVITS/AR/data/dataset.py @@ -11,10 +11,10 @@ import pandas as pd import torch from torch.utils.data import DataLoader, Dataset -version = os.environ.get("version", None) - from GPT_SoVITS.text import cleaned_text_to_sequence +version = os.environ.get("version", None) + # from config import exp_dir diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py index a624b4a5..36ef9295 100644 --- a/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_quantized.py @@ -152,7 +152,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config: dict, - max_seq_length: int = 1800, + max_seq_length: int = 2000, max_batch_size: int = 10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py index 6716fb32..24300693 100644 --- a/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_static.py @@ -87,7 +87,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config: dict, - max_seq_length: int = 1800, + max_seq_length: int = 2000, max_batch_size: int = 10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py b/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py index 3f07f6e2..d33f349c 100644 --- a/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py +++ b/GPT_SoVITS/Accelerate/MLX/backends/mlx_varlen.py @@ -91,7 +91,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config: dict, - max_seq_length: int = 1800, + max_seq_length: int = 2000, max_batch_size: int = 10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py b/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py index 390c57cb..0fc000f8 100644 --- a/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py +++ b/GPT_SoVITS/Accelerate/MLX/t2s_engine_mlx.py @@ -75,7 +75,7 @@ class T2SEngine(T2SEngineProtocol): transient=True, ) as progress, ): - max_token = min(1800 - int(session.input_pos.max()), 1500) + max_token = min(2000 - int(session.input_pos.max()), 1500) task = progress.add_task("T2S Decoding", total=max_token) for idx in range(1500): diff --git a/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py b/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py index fb295179..04fc04cf 100644 --- a/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py +++ b/GPT_SoVITS/Accelerate/MLX/t2s_model_abc.py @@ -43,7 +43,7 @@ class SinePositionalEmbedding(nn.Module): embedding_dim: int, scale: bool = False, max_batch_size: int = 10, - max_seq_len: int = 1800, + max_seq_len: int = 2000, ): super().__init__() self.embedding_dim = embedding_dim @@ -278,7 +278,7 @@ class AttentionABC(ABC, nn.Module): def prefill(self, x: Array, kv_cache: KVCache | KVCacheQ, attn_mask: Array): bsz, seqlen, _ = cast(tuple[int, ...], x.shape) - q, k, v = self.in_proj(mx.expand_dims(x, 0)).split(3, axis=-1) + q, k, v = self.in_proj(x).split(3, axis=-1) q, k, v = map(lambda x: x.reshape(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) @@ -413,7 +413,7 @@ class T2SDecoderABC(nn.Module, T2SDecoderProtocol): def __init__( self, config: dict, - max_seq_length: int = 1800, + max_seq_length: int = 2000, max_batch_size: int = 10, ) -> None: super().__init__() diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py index 62a187da..3343e948 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/flash_attn_varlen_cuda_graph.py @@ -100,7 +100,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config, - max_seq_length=1800, + max_seq_length=2000, max_batch_size=10, ) -> None: assert torch.cuda.is_available() diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py b/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py index 7d50dae0..f8b5d0a1 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/mps_flash_attn_varlen.py @@ -78,7 +78,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config, - max_seq_length=1800, + max_seq_length=2000, max_batch_size=10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py index 05db87ee..6ff762ed 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/sage_attn_varlen_cuda_graph.py @@ -94,7 +94,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config, - max_seq_length=1800, + max_seq_length=2000, max_batch_size=10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py index f9ac2cd5..7bd1bd70 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_static_cuda_graph.py @@ -78,7 +78,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config, - max_seq_length=1800, + max_seq_length=2000, max_batch_size=10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py index a0d1be61..3618376e 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py +++ b/GPT_SoVITS/Accelerate/PyTorch/backends/torch_varlen.py @@ -86,7 +86,7 @@ class T2SDecoder(T2SDecoderABC): def __init__( self, config, - max_seq_length=1800, + max_seq_length=2000, max_batch_size=10, ) -> None: super().__init__(config, max_seq_length, max_batch_size) diff --git a/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py b/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py index 47961c05..423f4953 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py +++ b/GPT_SoVITS/Accelerate/PyTorch/t2s_engine.py @@ -57,7 +57,7 @@ class T2SEngine(T2SEngineProtocol): transient=True, ) as progress, ): - max_token = int(min(1800 - session.input_pos.max(), 1500)) + max_token = int(min(2000 - session.input_pos.max(), 1500)) task = progress.add_task("T2S Decoding", total=max_token) for idx in range(max_token): diff --git a/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py b/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py index 56032753..fe8bf75e 100644 --- a/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py +++ b/GPT_SoVITS/Accelerate/PyTorch/t2s_model_abc.py @@ -55,7 +55,7 @@ class SinePositionalEmbedding(nn.Module): scale: bool = False, alpha: bool = False, max_batch_size: int = 10, - max_seq_len: int = 1800, + max_seq_len: int = 2000, ): super().__init__() self.embedding_dim = embedding_dim @@ -106,8 +106,9 @@ class SinePositionalEmbedding(nn.Module): embedded_x (Tensor): [batch_size, seq_len, embed_dim] """ - pe_values = self.pe[:, : x.shape[-2]] - return x * self.x_scale + self.alpha.item() * pe_values + batch_size = x.shape[0] + pe_values = self.pe[:batch_size, : x.shape[-2]] + return x * self.x_scale + self.alpha * pe_values class KVCacheABC(nn.Module, ABC, KVCacheProtocol): @@ -290,7 +291,7 @@ class AttentionABC(nn.Module, ABC): def prefill(self, x: Tensor, kv_cache: KVCacheProtocol, attn_mask: Tensor) -> Tensor: bsz, seqlen, _ = x.shape - q, k, v = self.in_proj(x.unsqueeze(0)).chunk(3, dim=-1) + q, k, v = self.in_proj(x).chunk(3, dim=-1) q, k, v = map(lambda x: x.contiguous().view(bsz, seqlen, self.n_head, self.head_dim), (q, k, v)) @@ -416,7 +417,7 @@ class T2SDecoderABC(nn.Module, ABC, T2SDecoderProtocol): def __init__( self, config: dict, - max_seq_length: int = 1800, + max_seq_length: int = 2000, max_batch_size: int = 10, ) -> None: super().__init__() diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index e3e44c24..3ff001d2 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -60,6 +60,7 @@ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) logging.getLogger("multipart.multipart").setLevel(logging.ERROR) os.environ["TOKENIZERS_PARALLELISM"] = "false" +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def set_high_priority(): @@ -90,7 +91,7 @@ def lang_type(text: str) -> str: def build_parser() -> argparse.ArgumentParser: p = argparse.ArgumentParser( prog="inference_webui", - description=f"PYTHONPATH=. python -s GPT_SoVITS/inference_webui.py zh_CN -b {backends[-1]}", + description=f"python -s -m GPT_SoVITS.inference_webui zh_CN -b {backends[-1]}", ) p.add_argument( "language", @@ -691,6 +692,8 @@ def get_tts_wav( pause_second=0.3, ): torch.set_grad_enabled(False) + ttfb_time = ttime() + if ref_wav_path: pass else: @@ -918,6 +921,8 @@ def get_tts_wav( with torch.inference_mode(): wav_gen = vocoder_model(cfm_res) # type: ignore audio = wav_gen[0][0] + if i_text == 0: + ttfb_time = ttime() - ttfb_time max_audio = torch.abs(audio).max() # 简单防止16bit爆音 if max_audio > 1: audio = audio / max_audio @@ -954,6 +959,10 @@ def get_tts_wav( console.print(f">> Time Stamps: {t0:.3f}\t{t1:.3f}\t{t2:.3f}\t{t3:.3f}") console.print(f">> Infer Speed: {infer_speed_avg:.2f} Token/s") console.print(f">> RTF: {rtf_value:.2f}") + if ttfb_time > 2: + console.print(f">> TTFB: {ttfb_time:.3f} s") + else: + console.print(f">> TTFB: {ttfb_time * 1000:.3f} ms") gr.Info(f"{infer_speed_avg:.2f} Token/s", title="Infer Speed") gr.Info(f"{rtf_value:.2f}", title="RTF") diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1_get_text.py similarity index 92% rename from GPT_SoVITS/prepare_datasets/1-get-text.py rename to GPT_SoVITS/prepare_datasets/1_get_text.py index 9d4ac605..26b7bece 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1_get_text.py @@ -1,7 +1,6 @@ import enum import os import os.path as osp -import platform import queue import sys import time @@ -16,7 +15,7 @@ from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn from torch.multiprocessing.spawn import spawn from transformers import BertForMaskedLM, BertTokenizerFast -from GPT_SoVITS.Accelerate.logger import console, logger, SpeedColumnIteration +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration, console, logger from GPT_SoVITS.text.cleaner import clean_text from tools.my_utils import clean_path @@ -302,16 +301,8 @@ def is_powershell_env(env: dict) -> bool: def get_prog_name() -> str: - system = platform.system() - env = os.environ.copy() - script_rel = osp.join("GPT_SoVITS", "prepare_datasets", osp.basename(__file__)) - if system == "Windows": - if is_powershell_env(env): - return rf"$env:PYTHONPATH='.'; python -s {script_rel}" - else: - return rf"set PYTHONPATH=. && python -s {script_rel}" - else: - return f"PYTHONPATH=. python -s {script_rel}" + script_rel = ".".join(["GPT_SoVITS", "prepare_datasets", osp.basename(__file__)]).strip(".py") + return f"python -s -m {script_rel}" if __name__ == "__main__": diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py b/GPT_SoVITS/prepare_datasets/2_get_hubert_sv_wav32k.py similarity index 93% rename from GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py rename to GPT_SoVITS/prepare_datasets/2_get_hubert_sv_wav32k.py index 4cd4f5ef..8f839f9e 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2_get_hubert_sv_wav32k.py @@ -1,7 +1,6 @@ import enum import os import os.path as osp -import platform import queue import sys import time @@ -405,16 +404,8 @@ def is_powershell_env(env: dict) -> bool: def get_prog_name() -> str: - system = platform.system() - env = os.environ.copy() - script_rel = os.path.join("GPT_SoVITS", "prepare_datasets", os.path.basename(__file__)) - if system == "Windows": - if is_powershell_env(env): - return rf"$env:PYTHONPATH='.'; python -s {script_rel}" - else: - return rf"set PYTHONPATH=. && python -s {script_rel}" - else: - return f"PYTHONPATH=. python -s {script_rel}" + script_rel = ".".join(["GPT_SoVITS", "prepare_datasets", osp.basename(__file__)]).strip(".py") + return f"python -s -m {script_rel}" if __name__ == "__main__": diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3_get_semantic.py similarity index 90% rename from GPT_SoVITS/prepare_datasets/3-get-semantic.py rename to GPT_SoVITS/prepare_datasets/3_get_semantic.py index 27ca5f6d..698f3f97 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3_get_semantic.py @@ -2,7 +2,6 @@ import enum import gc import os import os.path as osp -import platform import queue import sys import time @@ -12,10 +11,10 @@ from typing import List, Tuple import torch import torch.multiprocessing as tmp import typer -from rich.progress import BarColumn, Progress, TimeRemainingColumn, TextColumn +from rich.progress import BarColumn, Progress, TextColumn, TimeRemainingColumn from torch.multiprocessing.spawn import spawn -from GPT_SoVITS.Accelerate.logger import console, logger, SpeedColumnIteration +from GPT_SoVITS.Accelerate.logger import SpeedColumnIteration, console, logger from GPT_SoVITS.module.models import SynthesizerTrn, SynthesizerTrnV3 from GPT_SoVITS.process_ckpt import inspect_version from tools.my_utils import DictToAttrRecursive, clean_path @@ -295,16 +294,8 @@ def is_powershell_env(env: dict) -> bool: def get_prog_name() -> str: - system = platform.system() - env = os.environ.copy() - script_rel = osp.join("GPT_SoVITS", "prepare_datasets", osp.basename(__file__)) - if system == "Windows": - if is_powershell_env(env): - return rf"$env:PYTHONPATH='.'; python -s {script_rel}" - else: - return rf"set PYTHONPATH=. && python -s {script_rel}" - else: - return f"PYTHONPATH=. python -s {script_rel}" + script_rel = ".".join(["GPT_SoVITS", "prepare_datasets", osp.basename(__file__)]).strip(".py") + return f"python -s -m {script_rel}" if __name__ == "__main__": diff --git a/README.md b/README.md index f85556b2..3fdecf4d 100644 --- a/README.md +++ b/README.md @@ -273,13 +273,13 @@ Double-click `go-webui-v2.bat` or use `go-webui-v2.ps1` ,then open the inference #### Others ```bash -PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p +python -m GPT_SoVITS.inference_webui -b -p ``` OR ```bash -PYTHONPATH=. python webui.py +python webui.py ``` then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference` diff --git a/docs/cn/README.md b/docs/cn/README.md index afc18369..1f3f82c5 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -265,7 +265,7 @@ python webui.py #### 其他 ```bash -PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p +python -m GPT_SoVITS.inference_webui -b -p ``` 或者 diff --git a/docs/ja/README.md b/docs/ja/README.md index a37b817d..59ed6489 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -251,13 +251,13 @@ python webui.py <言語(オプション)> #### その他 ```bash -PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p +python -m GPT_SoVITS.inference_webui -b -p ``` または ```bash -PYTHONPATH=. python webui.py +python webui.py ``` その後、`1-GPT-SoVITS-TTS/1C-inference`で推論 webui を開きます. diff --git a/docs/ko/README.md b/docs/ko/README.md index afd85d4d..1afbcbee 100644 --- a/docs/ko/README.md +++ b/docs/ko/README.md @@ -259,7 +259,7 @@ python webui.py <언어(옵션)> #### 기타 ```bash -PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p +python -m GPT_SoVITS.inference_webui -b -p ``` 또는 diff --git a/docs/tr/README.md b/docs/tr/README.md index 51a090cd..e56110a6 100644 --- a/docs/tr/README.md +++ b/docs/tr/README.md @@ -259,7 +259,7 @@ python webui.py #### Diğerleri ```text -PYTHONPATH=. python GPT_SoVITS/inference_webui.py -b -p +python -m GPT_SoVITS.inference_webui -b -p ``` VEYA diff --git a/go-webui.bat b/go-webui.bat index 6918cc5f..d2e3c10a 100644 --- a/go-webui.bat +++ b/go-webui.bat @@ -2,6 +2,5 @@ set "SCRIPT_DIR=%~dp0" set "SCRIPT_DIR=%SCRIPT_DIR:~0,-1%" cd /d "%SCRIPT_DIR%" set "PATH=%SCRIPT_DIR%\runtime" -set "PYTHONPATH=%SCRIPT_DIR%" runtime\python.exe -I webui.py zh_CN pause diff --git a/go-webui.ps1 b/go-webui.ps1 index 42d336e3..30206849 100644 --- a/go-webui.ps1 +++ b/go-webui.ps1 @@ -3,6 +3,5 @@ chcp 65001 Set-Location $PSScriptRoot $runtimePath = Join-Path $PSScriptRoot "runtime" $env:PATH = "$runtimePath" -$env:PYTHONPATH = "$runtimePath" & "$runtimePath\python.exe" -I "$PSScriptRoot\webui.py" zh_CN pause diff --git a/webui.py b/webui.py index f81ac802..12939dd6 100644 --- a/webui.py +++ b/webui.py @@ -44,11 +44,11 @@ from tools.assets import css, js, top_html from tools.i18n.i18n import I18nAuto, scan_language_list from tools.my_utils import check_details, check_for_existance -os.environ["PYTHONPATH"] = now_dir = os.getcwd() os.environ["version"] = version = "v2Pro" os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" os.environ["all_proxy"] = "" +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" backends_gradio = [(b.replace("-", " "), b) for b in backends] @@ -86,7 +86,7 @@ def build_parser() -> argparse.ArgumentParser: args = build_parser().parse_args() -tmp = os.path.join(now_dir, "TEMP") +tmp = "TEMP" os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp if os.path.exists(tmp): @@ -794,11 +794,10 @@ def open1a( opt_dir = f"{exp_root}/{exp_name}" env = os.environ.copy() - env["PYTHONPATH"] = os.getcwd() # fmt: off cmd = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/1-get-text.py", + python_exec, "-s", "-m", "GPT_SoVITS.prepare_datasets.1_get_text", "--inp-list", inp_text, "--opt", opt_dir, "--bert", bert_pretrained_dir, @@ -884,11 +883,10 @@ def open1b( opt_dir = f"{exp_root}/{exp_name}" env = os.environ.copy() - env["PYTHONPATH"] = os.getcwd() # fmt: off cmd = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py", + python_exec, "-s", "GPT_SoVITS/prepare_datasets/2_get_hubert_sv_wav32k.py", "--inp-list", inp_text, "--opt", opt_dir, "--cnhubert", ssl_pretrained_dir, @@ -977,11 +975,10 @@ def open1c( opt_dir = f"{exp_root}/{exp_name}" env = os.environ.copy() - env["PYTHONPATH"] = os.getcwd() # fmt: off cmd = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/3-get-semantic.py", + python_exec, "-s", "GPT_SoVITS/prepare_datasets/3_get_semantic.py", "--inp-list", inp_text, "--opt", opt_dir, "--pretrained-s2g", pretrained_s2G_path, @@ -1073,12 +1070,11 @@ def open1abc( opt_dir = f"{exp_root}/{exp_name}" env = os.environ.copy() - env["PYTHONPATH"] = os.getcwd() # Step 1 # fmt: off cmd_1 = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/1-get-text.py", + python_exec, "-s", "GPT_SoVITS/prepare_datasets/1_get_text.py", "--inp-list", inp_text, "--opt", opt_dir, "--bert", bert_pretrained_dir, @@ -1124,7 +1120,7 @@ def open1abc( # Step 2 # fmt: off cmd_2 = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/2-get-hubert-sv-wav32k.py", + python_exec, "-s", "GPT_SoVITS/prepare_datasets/2_get_hubert_sv_wav32k.py", "--inp-list", inp_text, "--opt", opt_dir, "--cnhubert", ssl_pretrained_dir, @@ -1175,7 +1171,7 @@ def open1abc( # Step 3 # fmt: off cmd_3 = [ - python_exec, "-s", "GPT_SoVITS/prepare_datasets/3-get-semantic.py", + python_exec, "-s", "GPT_SoVITS/prepare_datasets/3_get_semantic.py", "--inp-list", inp_text, "--opt", opt_dir, "--pretrained-s2g", pretrained_s2G_path,