From 37f5abfcb4a6553652235909db2e124b6f8ff3a5 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:52:27 +0800 Subject: [PATCH 1/4] Fix Issues with libstdcxx and conda sysroot (#2482) --- install.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/install.sh b/install.sh index c3d20852..ea8d2e2d 100644 --- a/install.sh +++ b/install.sh @@ -170,7 +170,13 @@ if ! $USE_HF && ! $USE_HF_MIRROR && ! $USE_MODELSCOPE; then exit 1 fi -# 安装构建工具 +case "$(uname -m)" in + x86_64|amd64) SYSROOT_PKG="sysroot_linux-64>=2.28" ;; + aarch64|arm64) SYSROOT_PKG="sysroot_linux-aarch64>=2.28" ;; + ppc64le) SYSROOT_PKG="sysroot_linux-ppc64le>=2.28" ;; + *) echo "Unsupported architecture: $(uname -m)"; exit 1 ;; +esac + # Install build tools echo -e "${INFO}Detected system: $(uname -s) $(uname -r) $(uname -m)" if [ "$(uname)" != "Darwin" ]; then @@ -178,10 +184,14 @@ if [ "$(uname)" != "Darwin" ]; then if [ "$gcc_major_version" -lt 11 ]; then echo -e "${INFO}Installing GCC & G++..." run_conda_quiet gcc=11 gxx=11 + run_conda_quiet "$SYSROOT_PKG" echo -e "${SUCCESS}GCC & G++ Installed..." else echo -e "${INFO}Detected GCC Version: $gcc_major_version" echo -e "${INFO}Skip Installing GCC & G++ From Conda-Forge" + echo -e "${INFO}Installing libstdcxx-ng From Conda-Forge" + run_conda_quiet "libstdcxx-ng>=$gcc_major_version" + echo -e "${SUCCESS}libstdcxx-ng=$gcc_major_version Installed..." fi else if ! xcode-select -p &>/dev/null; then From 4839e8214862808bea45b86c7c26ff643b0175ee Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Fri, 27 Jun 2025 01:04:18 +0800 Subject: [PATCH 2/4] Add Windows Install Powershell Scripts (#2487) --- README.md | 8 ++ docs/cn/README.md | 6 ++ docs/ko/README.md | 6 ++ docs/tr/README.md | 6 ++ install.ps1 | 241 ++++++++++++++++++++++++++++++++++++++++++++++ install.sh | 28 +++--- 6 files changed, 283 insertions(+), 12 deletions(-) create mode 100644 install.ps1 diff --git a/README.md b/README.md index 978bb3ca..e67288ea 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,14 @@ If you are a Windows user (tested with win>=10), you can [download the integrate **Users in China can [download the package here](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e/dkxgpiy9zb96hob4#KTvnO).** +Install the program by running the following commands: + +```pwsh +conda create -n GPTSoVits python=3.10 +conda activate GPTSoVits +pwsh -F install.ps1 --Device --Source [--DownloadUVR5] +``` + ### Linux ```bash diff --git a/docs/cn/README.md b/docs/cn/README.md index 70ce4285..77841b99 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -62,6 +62,12 @@ **中国地区的用户可以[在此处下载整合包](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e/dkxgpiy9zb96hob4#KTvnO).** +```pwsh +conda create -n GPTSoVits python=3.10 +conda activate GPTSoVits +pwsh -F install.ps1 --Device --Source [--DownloadUVR5] +``` + ### Linux ```bash diff --git a/docs/ko/README.md b/docs/ko/README.md index bfc70395..1028c00a 100644 --- a/docs/ko/README.md +++ b/docs/ko/README.md @@ -58,6 +58,12 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- Windows 사용자라면 (win>=10에서 테스트됨), [통합 패키지를 다운로드](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-v3lora-20250228.7z?download=true)한 후 압축을 풀고 _go-webui.bat_ 파일을 더블 클릭하면 GPT-SoVITS-WebUI를 시작할 수 있습니다. +```pwsh +conda create -n GPTSoVits python=3.10 +conda activate GPTSoVits +pwsh -F install.ps1 --Device --Source [--DownloadUVR5] +``` + ### Linux ```bash diff --git a/docs/tr/README.md b/docs/tr/README.md index 2ce02b60..dd5d79b3 100644 --- a/docs/tr/README.md +++ b/docs/tr/README.md @@ -58,6 +58,12 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- Eğer bir Windows kullanıcısıysanız (win>=10 ile test edilmiştir), [entegre paketi indirin](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-v3lora-20250228.7z?download=true) ve _go-webui.bat_ dosyasına çift tıklayarak GPT-SoVITS-WebUI'yi başlatın. +```pwsh +conda create -n GPTSoVits python=3.10 +conda activate GPTSoVits +pwsh -F install.ps1 --Device --Source [--DownloadUVR5] +``` + ### Linux ```bash diff --git a/install.ps1 b/install.ps1 new file mode 100644 index 00000000..9c33ace8 --- /dev/null +++ b/install.ps1 @@ -0,0 +1,241 @@ +Param ( + [Parameter(Mandatory=$true)][ValidateSet("CU126", "CU128", "CPU")][string]$Device, + [Parameter(Mandatory=$true)][ValidateSet("HF", "HF-Mirror", "ModelScope")][string]$Source, + [switch]$DownloadUVR5 +) + +$global:ErrorActionPreference = 'Stop' + +trap { + Write-ErrorLog $_ +} + +function Write-ErrorLog { + param ( + [System.Management.Automation.ErrorRecord]$ErrorRecord + ) + + Write-Host "`n[ERROR] Command failed:" -ForegroundColor Red + if (-not $ErrorRecord.Exception.Message){ + } else { + Write-Host "Message:" -ForegroundColor Red + $ErrorRecord.Exception.Message -split "`n" | ForEach-Object { + Write-Host " $_" + } + } + + Write-Host "Command:" -ForegroundColor Red -NoNewline + Write-Host " $($ErrorRecord.InvocationInfo.Line)".Replace("`r", "").Replace("`n", "") + Write-Host "Location:" -ForegroundColor Red -NoNewline + Write-Host " $($ErrorRecord.InvocationInfo.ScriptName):$($ErrorRecord.InvocationInfo.ScriptLineNumber)" + Write-Host "Call Stack:" -ForegroundColor DarkRed + $ErrorRecord.ScriptStackTrace -split "`n" | ForEach-Object { + Write-Host " $_" -ForegroundColor DarkRed + } + + exit 1 +} + +function Write-Info($msg) { + Write-Host "[INFO]:" -ForegroundColor Green -NoNewline + Write-Host " $msg" +} +function Write-Success($msg) { + Write-Host "[SUCCESS]:" -ForegroundColor Blue -NoNewline + Write-Host " $msg" +} + + +function Invoke-Conda { + param ( + [Parameter(ValueFromRemainingArguments = $true)] + [string[]]$Args + ) + + $output = & conda install -y -q -c conda-forge @Args 2>&1 + $exitCode = $LASTEXITCODE + + if ($exitCode -ne 0) { + Write-Host "Conda Install $Args Failed" -ForegroundColor Red + $errorMessages = @() + foreach ($item in $output) { + if ($item -is [System.Management.Automation.ErrorRecord]) { + $msg = $item.Exception.Message + Write-Host "$msg" -ForegroundColor Red + $errorMessages += $msg + } + else { + Write-Host $item + $errorMessages += $item + } + } + throw [System.Exception]::new(($errorMessages -join "`n")) + } +} + +function Invoke-Pip { + param ( + [Parameter(ValueFromRemainingArguments = $true)] + [string[]]$Args + ) + + $output = & pip install @Args 2>&1 + $exitCode = $LASTEXITCODE + + if ($exitCode -ne 0) { + $errorMessages = @() + Write-Host "Pip Install $Args Failed" -ForegroundColor Red + foreach ($item in $output) { + if ($item -is [System.Management.Automation.ErrorRecord]) { + $msg = $item.Exception.Message + Write-Host "$msg" -ForegroundColor Red + $errorMessages += $msg + } + else { + Write-Host $item + $errorMessages += $item + } + } + throw [System.Exception]::new(($errorMessages -join "`n")) + } +} + +function Invoke-Download { + param ( + [Parameter(Mandatory = $true)] + [string]$Uri, + + [Parameter()] + [string]$OutFile + ) + + try { + $params = @{ + Uri = $Uri + } + + if ($OutFile) { + $params["OutFile"] = $OutFile + } + + $null = Invoke-WebRequest @params -ErrorAction Stop + + } catch { + Write-Host "Failed to download:" -ForegroundColor Red + Write-Host " $Uri" + throw + } +} + +function Invoke-Unzip { + param($ZipPath, $DestPath) + Expand-Archive -Path $ZipPath -DestinationPath $DestPath -Force + Remove-Item $ZipPath -Force +} + +chcp 65001 +Set-Location $PSScriptRoot + +Write-Info "Installing FFmpeg & CMake..." +Invoke-Conda ffmpeg cmake +Write-Success "FFmpeg & CMake Installed" + +$PretrainedURL = "" +$G2PWURL = "" +$UVR5URL = "" +$NLTKURL = "" +$OpenJTalkURL = "" + +switch ($Source) { + "HF" { + Write-Info "Download Model From HuggingFace" + $PretrainedURL = "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/pretrained_models.zip" + $G2PWURL = "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/G2PWModel.zip" + $UVR5URL = "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/uvr5_weights.zip" + $NLTKURL = "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/nltk_data.zip" + $OpenJTalkURL = "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/open_jtalk_dic_utf_8-1.11.tar.gz" + } + "HF-Mirror" { + Write-Info "Download Model From HuggingFace-Mirror" + $PretrainedURL = "https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/pretrained_models.zip" + $G2PWURL = "https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/G2PWModel.zip" + $UVR5URL = "https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/uvr5_weights.zip" + $NLTKURL = "https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/nltk_data.zip" + $OpenJTalkURL = "https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/open_jtalk_dic_utf_8-1.11.tar.gz" + } + "ModelScope" { + Write-Info "Download Model From ModelScope" + $PretrainedURL = "https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/pretrained_models.zip" + $G2PWURL = "https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/G2PWModel.zip" + $UVR5URL = "https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/uvr5_weights.zip" + $NLTKURL = "https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/nltk_data.zip" + $OpenJTalkURL = "https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/open_jtalk_dic_utf_8-1.11.tar.gz" + } +} + +if (-not (Test-Path "GPT_SoVITS/pretrained_models/sv")) { + Write-Info "Downloading Pretrained Models..." + Invoke-Download -Uri $PretrainedURL -OutFile "pretrained_models.zip" + Invoke-Unzip "pretrained_models.zip" "GPT_SoVITS" + Write-Success "Pretrained Models Downloaded" +} else { + Write-Info "Pretrained Model Exists" + Write-Info "Skip Downloading Pretrained Models" +} + + +if (-not (Test-Path "GPT_SoVITS/text/G2PWModel")) { + Write-Info "Downloading G2PWModel..." + Invoke-Download -Uri $G2PWURL -OutFile "G2PWModel.zip" + Invoke-Unzip "G2PWModel.zip" "GPT_SoVITS/text" + Write-Success "G2PWModel Downloaded" +} else { + Write-Info "G2PWModel Exists" + Write-Info "Skip Downloading G2PWModel" +} + +if ($DownloadUVR5) { + if (-not (Test-Path "tools/uvr5/uvr5_weights")) { + Write-Info "Downloading UVR5 Models..." + Invoke-Download -Uri $UVR5URL -OutFile "uvr5_weights.zip" + Invoke-Unzip "uvr5_weights.zip" "tools/uvr5" + Write-Success "UVR5 Models Downloaded" + } else { + Write-Info "UVR5 Models Exists" + Write-Info "Skip Downloading UVR5 Models" + } +} + +switch ($Device) { + "CU128" { + Write-Info "Installing PyTorch For CUDA 12.8..." + Invoke-Pip torch torchaudio --index-url "https://download.pytorch.org/whl/cu128" + } + "CU126" { + Write-Info "Installing PyTorch For CUDA 12.6..." + Invoke-Pip torch torchaudio --index-url "https://download.pytorch.org/whl/cu126" + } + "CPU" { + Write-Info "Installing PyTorch For CPU..." + Invoke-Pip torch torchaudio --index-url "https://download.pytorch.org/whl/cpu" + } +} +Write-Success "PyTorch Installed" + +Write-Info "Installing Python Dependencies From requirements.txt..." +Invoke-Pip -r extra-req.txt --no-deps +Invoke-Pip -r requirements.txt +Write-Success "Python Dependencies Installed" + +Write-Info "Downloading NLTK Data..." +Invoke-Download -Uri $NLTKURL -OutFile "nltk_data.zip" +Invoke-Unzip "nltk_data.zip" (python -c "import sys; print(sys.prefix)").Trim() + +Write-Info "Downloading Open JTalk Dict..." +Invoke-Download -Uri $OpenJTalkURL -OutFile "open_jtalk_dic_utf_8-1.11.tar.gz" +$target = (python -c "import os, pyopenjtalk; print(os.path.dirname(pyopenjtalk.__file__))").Trim() +tar -xzf open_jtalk_dic_utf_8-1.11.tar.gz -C $target +Remove-Item "open_jtalk_dic_utf_8-1.11.tar.gz" -Force +Write-Success "Open JTalk Dic Downloaded" + +Write-Success "Installation Completed" diff --git a/install.sh b/install.sh index ea8d2e2d..a2fa751e 100644 --- a/install.sh +++ b/install.sh @@ -48,11 +48,12 @@ run_pip_quiet() { } run_wget_quiet() { - local output - output=$(wget --tries=25 --wait=5 --read-timeout=40 --retry-on-http-error=404 "$@" 2>&1) || { - echo -e "${ERROR} Wget failed:\n$output" + if wget --tries=25 --wait=5 --read-timeout=40 -q --show-progress "$@" 2>&1; then + tput cuu1 && tput el + else + echo -e "${ERROR} Wget failed" exit 1 - } + fi } if ! command -v conda &>/dev/null; then @@ -171,10 +172,13 @@ if ! $USE_HF && ! $USE_HF_MIRROR && ! $USE_MODELSCOPE; then fi case "$(uname -m)" in - x86_64|amd64) SYSROOT_PKG="sysroot_linux-64>=2.28" ;; - aarch64|arm64) SYSROOT_PKG="sysroot_linux-aarch64>=2.28" ;; - ppc64le) SYSROOT_PKG="sysroot_linux-ppc64le>=2.28" ;; - *) echo "Unsupported architecture: $(uname -m)"; exit 1 ;; +x86_64 | amd64) SYSROOT_PKG="sysroot_linux-64>=2.28" ;; +aarch64 | arm64) SYSROOT_PKG="sysroot_linux-aarch64>=2.28" ;; +ppc64le) SYSROOT_PKG="sysroot_linux-ppc64le>=2.28" ;; +*) + echo "Unsupported architecture: $(uname -m)" + exit 1 + ;; esac # Install build tools @@ -248,10 +252,7 @@ elif [ "$USE_MODELSCOPE" = "true" ]; then PYOPENJTALK_URL="https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/open_jtalk_dic_utf_8-1.11.tar.gz" fi -if find -L "GPT_SoVITS/pretrained_models" -mindepth 1 ! -name '.gitignore' | grep -q .; then - echo -e "${INFO}Pretrained Model Exists" - echo -e "${INFO}Skip Downloading Pretrained Models" -else +if [ ! -d "GPT_SoVITS/pretrained_models/sv" ]; then echo -e "${INFO}Downloading Pretrained Models..." rm -rf pretrained_models.zip run_wget_quiet "$PRETRINED_URL" @@ -259,6 +260,9 @@ else unzip -q -o pretrained_models.zip -d GPT_SoVITS rm -rf pretrained_models.zip echo -e "${SUCCESS}Pretrained Models Downloaded" +else + echo -e "${INFO}Pretrained Model Exists" + echo -e "${INFO}Skip Downloading Pretrained Models" fi if [ ! -d "GPT_SoVITS/text/G2PWModel" ]; then From 90ebefa78fd544da36eebe0b2003620879c921b0 Mon Sep 17 00:00:00 2001 From: KamioRinn <63162909+KamioRinn@users.noreply.github.com> Date: Fri, 27 Jun 2025 10:41:52 +0800 Subject: [PATCH 3/4] make sure ort providers available (#2489) --- GPT_SoVITS/text/g2pw/onnx_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index 52eed443..1d5e4231 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -93,13 +93,13 @@ class G2PWOnnxConverter: sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL sess_options.intra_op_num_threads = 2 if torch.cuda.is_available() else 0 - try: + if "CUDAExecutionProvider" in onnxruntime.get_available_providers(): self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"), sess_options=sess_options, providers=["CUDAExecutionProvider", "CPUExecutionProvider"], ) - except: + else: self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"), sess_options=sess_options, From 6df61f58e4d18d4c2ad9d1eddd6a1bd690034c23 Mon Sep 17 00:00:00 2001 From: KamioRinn <63162909+KamioRinn@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:58:41 +0800 Subject: [PATCH 4/4] =?UTF-8?q?=E8=AF=AD=E8=A8=80=E5=88=86=E5=89=B2?= =?UTF-8?q?=E5=8F=8A=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BC=98=E5=8C=96=20(#2488?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * better LangSegmenter * add version num2str * better version num2str * sync fast infer * sync api * remove duplicate spaces * remove unnecessary code --------- Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> --- GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py | 126 +++++------ GPT_SoVITS/inference_webui.py | 121 +++++----- .../text/LangSegmenter/langsegmenter.py | 209 +++++++++++------- GPT_SoVITS/text/chinese.py | 14 -- GPT_SoVITS/text/chinese2.py | 14 -- GPT_SoVITS/text/zh_normalization/num.py | 24 +- .../zh_normalization/text_normlization.py | 3 + api.py | 117 +++++----- 8 files changed, 332 insertions(+), 296 deletions(-) diff --git a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py index 9a478d43..283e91c3 100644 --- a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py +++ b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py @@ -121,71 +121,67 @@ class TextPreprocessor: def get_phones_and_bert(self, text: str, language: str, version: str, final: bool = False): with self.bert_lock: - if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}: - # language = language.replace("all_","") - formattext = text - while " " in formattext: - formattext = formattext.replace(" ", " ") - if language == "all_zh": - if re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return self.get_phones_and_bert(formattext, "zh", version) + text = re.sub(r' {2,}', ' ', text) + textlist = [] + langlist = [] + if language == "all_zh": + for tmp in LangSegmenter.getTexts(text,"zh"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_yue": + for tmp in LangSegmenter.getTexts(text,"zh"): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ja": + for tmp in LangSegmenter.getTexts(text,"ja"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ko": + for tmp in LangSegmenter.getTexts(text,"ko"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "en": + langlist.append("en") + textlist.append(text) + elif language == "auto": + for tmp in LangSegmenter.getTexts(text): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "auto_yue": + for tmp in LangSegmenter.getTexts(text): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + else: + for tmp in LangSegmenter.getTexts(text): + if langlist: + if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): + textlist[-1] += tmp["text"] + continue + if tmp["lang"] == "en": + langlist.append(tmp["lang"]) else: - phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version) - bert = self.get_bert_feature(norm_text, word2ph).to(self.device) - elif language == "all_yue" and re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return self.get_phones_and_bert(formattext, "yue", version) - else: - phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version) - bert = torch.zeros( - (1024, len(phones)), - dtype=torch.float32, - ).to(self.device) - elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}: - textlist = [] - langlist = [] - if language == "auto": - for tmp in LangSegmenter.getTexts(text): - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - elif language == "auto_yue": - for tmp in LangSegmenter.getTexts(text): - if tmp["lang"] == "zh": - tmp["lang"] = "yue" - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - else: - for tmp in LangSegmenter.getTexts(text): - if langlist: - if (tmp["lang"] == "en" and langlist[-1] == "en") or ( - tmp["lang"] != "en" and langlist[-1] != "en" - ): - textlist[-1] += tmp["text"] - continue - if tmp["lang"] == "en": - langlist.append(tmp["lang"]) - else: - # 因无法区别中日韩文汉字,以用户输入为准 - langlist.append(language) - textlist.append(tmp["text"]) - # print(textlist) - # print(langlist) - phones_list = [] - bert_list = [] - norm_text_list = [] - for i in range(len(textlist)): - lang = langlist[i] - phones, word2ph, norm_text = self.clean_text_inf(textlist[i], lang, version) - bert = self.get_bert_inf(phones, word2ph, norm_text, lang) - phones_list.append(phones) - norm_text_list.append(norm_text) - bert_list.append(bert) - bert = torch.cat(bert_list, dim=1) - phones = sum(phones_list, []) - norm_text = "".join(norm_text_list) + # 因无法区别中日韩文汉字,以用户输入为准 + langlist.append(language) + textlist.append(tmp["text"]) + # print(textlist) + # print(langlist) + phones_list = [] + bert_list = [] + norm_text_list = [] + for i in range(len(textlist)): + lang = langlist[i] + phones, word2ph, norm_text = self.clean_text_inf(textlist[i], lang, version) + bert = self.get_bert_inf(phones, word2ph, norm_text, lang) + phones_list.append(phones) + norm_text_list.append(norm_text) + bert_list.append(bert) + bert = torch.cat(bert_list, dim=1) + phones = sum(phones_list, []) + norm_text = "".join(norm_text_list) if not final and len(phones) < 6: return self.get_phones_and_bert("." + text, language, version, final=True) @@ -240,4 +236,4 @@ class TextPreprocessor: punctuations = "".join(re.escape(p) for p in punctuation) pattern = f"([{punctuations}])([{punctuations}])+" result = re.sub(pattern, r"\1", text) - return result + return result \ No newline at end of file diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 5c7d0103..643e4054 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -586,68 +586,67 @@ from text import chinese def get_phones_and_bert(text, language, version, final=False): - if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}: - formattext = text - while " " in formattext: - formattext = formattext.replace(" ", " ") - if language == "all_zh": - if re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return get_phones_and_bert(formattext, "zh", version) + text = re.sub(r' {2,}', ' ', text) + textlist = [] + langlist = [] + if language == "all_zh": + for tmp in LangSegmenter.getTexts(text,"zh"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_yue": + for tmp in LangSegmenter.getTexts(text,"zh"): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ja": + for tmp in LangSegmenter.getTexts(text,"ja"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ko": + for tmp in LangSegmenter.getTexts(text,"ko"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "en": + langlist.append("en") + textlist.append(text) + elif language == "auto": + for tmp in LangSegmenter.getTexts(text): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "auto_yue": + for tmp in LangSegmenter.getTexts(text): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + else: + for tmp in LangSegmenter.getTexts(text): + if langlist: + if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): + textlist[-1] += tmp["text"] + continue + if tmp["lang"] == "en": + langlist.append(tmp["lang"]) else: - phones, word2ph, norm_text = clean_text_inf(formattext, language, version) - bert = get_bert_feature(norm_text, word2ph).to(device) - elif language == "all_yue" and re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return get_phones_and_bert(formattext, "yue", version) - else: - phones, word2ph, norm_text = clean_text_inf(formattext, language, version) - bert = torch.zeros( - (1024, len(phones)), - dtype=torch.float16 if is_half == True else torch.float32, - ).to(device) - elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}: - textlist = [] - langlist = [] - if language == "auto": - for tmp in LangSegmenter.getTexts(text): - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - elif language == "auto_yue": - for tmp in LangSegmenter.getTexts(text): - if tmp["lang"] == "zh": - tmp["lang"] = "yue" - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - else: - for tmp in LangSegmenter.getTexts(text): - if langlist: - if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): - textlist[-1] += tmp["text"] - continue - if tmp["lang"] == "en": - langlist.append(tmp["lang"]) - else: - # 因无法区别中日韩文汉字,以用户输入为准 - langlist.append(language) - textlist.append(tmp["text"]) - print(textlist) - print(langlist) - phones_list = [] - bert_list = [] - norm_text_list = [] - for i in range(len(textlist)): - lang = langlist[i] - phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) - bert = get_bert_inf(phones, word2ph, norm_text, lang) - phones_list.append(phones) - norm_text_list.append(norm_text) - bert_list.append(bert) - bert = torch.cat(bert_list, dim=1) - phones = sum(phones_list, []) - norm_text = "".join(norm_text_list) + # 因无法区别中日韩文汉字,以用户输入为准 + langlist.append(language) + textlist.append(tmp["text"]) + print(textlist) + print(langlist) + phones_list = [] + bert_list = [] + norm_text_list = [] + for i in range(len(textlist)): + lang = langlist[i] + phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) + bert = get_bert_inf(phones, word2ph, norm_text, lang) + phones_list.append(phones) + norm_text_list.append(norm_text) + bert_list.append(bert) + bert = torch.cat(bert_list, dim=1) + phones = sum(phones_list, []) + norm_text = "".join(norm_text_list) if not final and len(phones) < 6: return get_phones_and_bert("." + text, language, version, final=True) diff --git a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py index 0187ea69..99b3a422 100644 --- a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py +++ b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py @@ -3,44 +3,38 @@ import re # jieba静音 import jieba - jieba.setLogLevel(logging.CRITICAL) # 更改fast_langdetect大模型位置 from pathlib import Path import fast_langdetect - -fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector( - fast_langdetect.infer.LangDetectConfig( - cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect" - ) -) +fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector(fast_langdetect.infer.LangDetectConfig(cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect")) from split_lang import LangSplitter def full_en(text): - pattern = r"^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$" + pattern = r'^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$' return bool(re.match(pattern, text)) def full_cjk(text): # 来自wiki cjk_ranges = [ - (0x4E00, 0x9FFF), # CJK Unified Ideographs - (0x3400, 0x4DB5), # CJK Extension A - (0x20000, 0x2A6DD), # CJK Extension B - (0x2A700, 0x2B73F), # CJK Extension C - (0x2B740, 0x2B81F), # CJK Extension D - (0x2B820, 0x2CEAF), # CJK Extension E - (0x2CEB0, 0x2EBEF), # CJK Extension F - (0x30000, 0x3134A), # CJK Extension G - (0x31350, 0x323AF), # CJK Extension H - (0x2EBF0, 0x2EE5D), # CJK Extension H + (0x4E00, 0x9FFF), # CJK Unified Ideographs + (0x3400, 0x4DB5), # CJK Extension A + (0x20000, 0x2A6DD), # CJK Extension B + (0x2A700, 0x2B73F), # CJK Extension C + (0x2B740, 0x2B81F), # CJK Extension D + (0x2B820, 0x2CEAF), # CJK Extension E + (0x2CEB0, 0x2EBEF), # CJK Extension F + (0x30000, 0x3134A), # CJK Extension G + (0x31350, 0x323AF), # CJK Extension H + (0x2EBF0, 0x2EE5D), # CJK Extension H ] - pattern = r"[0-9、-〜。!?.!?… /]+$" + pattern = r'[0-9、-〜。!?.!?… /]+$' cjk_text = "" for char in text: @@ -51,7 +45,7 @@ def full_cjk(text): return cjk_text -def split_jako(tag_lang, item): +def split_jako(tag_lang,item): if tag_lang == "ja": pattern = r"([\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]+(?:[0-9、-〜。!?.!?… ]+[\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]*)*)" else: @@ -59,118 +53,165 @@ def split_jako(tag_lang, item): lang_list: list[dict] = [] tag = 0 - for match in re.finditer(pattern, item["text"]): + for match in re.finditer(pattern, item['text']): if match.start() > tag: - lang_list.append({"lang": item["lang"], "text": item["text"][tag : match.start()]}) + lang_list.append({'lang':item['lang'],'text':item['text'][tag:match.start()]}) tag = match.end() - lang_list.append({"lang": tag_lang, "text": item["text"][match.start() : match.end()]}) + lang_list.append({'lang':tag_lang,'text':item['text'][match.start():match.end()]}) - if tag < len(item["text"]): - lang_list.append({"lang": item["lang"], "text": item["text"][tag : len(item["text"])]}) + if tag < len(item['text']): + lang_list.append({'lang':item['lang'],'text':item['text'][tag:len(item['text'])]}) return lang_list def merge_lang(lang_list, item): - if lang_list and item["lang"] == lang_list[-1]["lang"]: - lang_list[-1]["text"] += item["text"] + if lang_list and item['lang'] == lang_list[-1]['lang']: + lang_list[-1]['text'] += item['text'] else: lang_list.append(item) return lang_list -class LangSegmenter: +class LangSegmenter(): # 默认过滤器, 基于gsv目前四种语言 DEFAULT_LANG_MAP = { "zh": "zh", "yue": "zh", # 粤语 "wuu": "zh", # 吴语 "zh-cn": "zh", - "zh-tw": "x", # 繁体设置为x + "zh-tw": "x", # 繁体设置为x "ko": "ko", "ja": "ja", "en": "en", } - def getTexts(text): + def getTexts(text,default_lang = ""): lang_splitter = LangSplitter(lang_map=LangSegmenter.DEFAULT_LANG_MAP) + lang_splitter.merge_across_digit = False substr = lang_splitter.split_by_lang(text=text) lang_list: list[dict] = [] - for _, item in enumerate(substr): - dict_item = {"lang": item.lang, "text": item.text} + have_num = False - # 处理短英文被识别为其他语言的问题 - if full_en(dict_item["text"]): - dict_item["lang"] = "en" - lang_list = merge_lang(lang_list, dict_item) + for _, item in enumerate(substr): + dict_item = {'lang':item.lang,'text':item.text} + + if dict_item['lang'] == 'digit': + if default_lang != "": + dict_item['lang'] = default_lang + else: + have_num = True + lang_list = merge_lang(lang_list,dict_item) continue - # 处理非日语夹日文的问题(不包含CJK) - ja_list: list[dict] = [] - if dict_item["lang"] != "ja": - ja_list = split_jako("ja", dict_item) + # 处理短英文被识别为其他语言的问题 + if full_en(dict_item['text']): + dict_item['lang'] = 'en' + lang_list = merge_lang(lang_list,dict_item) + continue - if not ja_list: - ja_list.append(dict_item) + if default_lang != "": + dict_item['lang'] = default_lang + lang_list = merge_lang(lang_list,dict_item) + continue + else: + # 处理非日语夹日文的问题(不包含CJK) + ja_list: list[dict] = [] + if dict_item['lang'] != 'ja': + ja_list = split_jako('ja',dict_item) - # 处理非韩语夹韩语的问题(不包含CJK) - ko_list: list[dict] = [] - temp_list: list[dict] = [] - for _, ko_item in enumerate(ja_list): - if ko_item["lang"] != "ko": - ko_list = split_jako("ko", ko_item) + if not ja_list: + ja_list.append(dict_item) - if ko_list: - temp_list.extend(ko_list) - else: - temp_list.append(ko_item) + # 处理非韩语夹韩语的问题(不包含CJK) + ko_list: list[dict] = [] + temp_list: list[dict] = [] + for _, ko_item in enumerate(ja_list): + if ko_item["lang"] != 'ko': + ko_list = split_jako('ko',ko_item) - # 未存在非日韩文夹日韩文 - if len(temp_list) == 1: - # 未知语言检查是否为CJK - if dict_item["lang"] == "x": - cjk_text = full_cjk(dict_item["text"]) - if cjk_text: - dict_item = {"lang": "zh", "text": cjk_text} - lang_list = merge_lang(lang_list, dict_item) + if ko_list: + temp_list.extend(ko_list) else: - lang_list = merge_lang(lang_list, dict_item) - continue - else: - lang_list = merge_lang(lang_list, dict_item) - continue + temp_list.append(ko_item) - # 存在非日韩文夹日韩文 - for _, temp_item in enumerate(temp_list): - # 未知语言检查是否为CJK - if temp_item["lang"] == "x": - cjk_text = full_cjk(dict_item["text"]) - if cjk_text: - dict_item = {"lang": "zh", "text": cjk_text} - lang_list = merge_lang(lang_list, dict_item) + # 未存在非日韩文夹日韩文 + if len(temp_list) == 1: + # 未知语言检查是否为CJK + if dict_item['lang'] == 'x': + cjk_text = full_cjk(dict_item['text']) + if cjk_text: + dict_item = {'lang':'zh','text':cjk_text} + lang_list = merge_lang(lang_list,dict_item) + else: + lang_list = merge_lang(lang_list,dict_item) + continue else: - lang_list = merge_lang(lang_list, dict_item) - else: - lang_list = merge_lang(lang_list, temp_item) + lang_list = merge_lang(lang_list,dict_item) + continue + # 存在非日韩文夹日韩文 + for _, temp_item in enumerate(temp_list): + # 未知语言检查是否为CJK + if temp_item['lang'] == 'x': + cjk_text = full_cjk(temp_item['text']) + if cjk_text: + lang_list = merge_lang(lang_list,{'lang':'zh','text':cjk_text}) + else: + lang_list = merge_lang(lang_list,temp_item) + else: + lang_list = merge_lang(lang_list,temp_item) + + # 有数字 + if have_num: + temp_list = lang_list + lang_list = [] + for i, temp_item in enumerate(temp_list): + if temp_item['lang'] == 'digit': + if default_lang: + temp_item['lang'] = default_lang + elif lang_list and i == len(temp_list) - 1: + temp_item['lang'] = lang_list[-1]['lang'] + elif not lang_list and i < len(temp_list) - 1: + temp_item['lang'] = temp_list[1]['lang'] + elif lang_list and i < len(temp_list) - 1: + if lang_list[-1]['lang'] == temp_list[i + 1]['lang']: + temp_item['lang'] = lang_list[-1]['lang'] + elif lang_list[-1]['text'][-1] in [",",".","!","?",",","。","!","?"]: + temp_item['lang'] = temp_list[i + 1]['lang'] + elif temp_list[i + 1]['text'][0] in [",",".","!","?",",","。","!","?"]: + temp_item['lang'] = lang_list[-1]['lang'] + elif temp_item['text'][-1] in ["。","."]: + temp_item['lang'] = lang_list[-1]['lang'] + elif len(lang_list[-1]['text']) >= len(temp_list[i + 1]['text']): + temp_item['lang'] = lang_list[-1]['lang'] + else: + temp_item['lang'] = temp_list[i + 1]['lang'] + else: + temp_item['lang'] = 'zh' + + lang_list = merge_lang(lang_list,temp_item) + + + # 筛X temp_list = lang_list lang_list = [] for _, temp_item in enumerate(temp_list): - if temp_item["lang"] == "x": + if temp_item['lang'] == 'x': if lang_list: - temp_item["lang"] = lang_list[-1]["lang"] + temp_item['lang'] = lang_list[-1]['lang'] elif len(temp_list) > 1: - temp_item["lang"] = temp_list[1]["lang"] + temp_item['lang'] = temp_list[1]['lang'] else: - temp_item["lang"] = "zh" + temp_item['lang'] = 'zh' - lang_list = merge_lang(lang_list, temp_item) + lang_list = merge_lang(lang_list,temp_item) return lang_list - + if __name__ == "__main__": text = "MyGO?,你也喜欢まいご吗?" @@ -178,3 +219,7 @@ if __name__ == "__main__": text = "ねえ、知ってる?最近、僕は天文学を勉強してるんだ。君の瞳が星空みたいにキラキラしてるからさ。" print(LangSegmenter.getTexts(text)) + + text = "当时ThinkPad T60刚刚发布,一同推出的还有一款名为Advanced Dock的扩展坞配件。这款扩展坞通过连接T60底部的插槽,扩展出包括PCIe在内的一大堆接口,并且自带电源,让T60可以安装桌面显卡来提升性能。" + print(LangSegmenter.getTexts(text,"zh")) + print(LangSegmenter.getTexts(text)) \ No newline at end of file diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index ce44215f..944c9cb7 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -181,20 +181,6 @@ def text_normalize(text): return dest_text -# 不排除英文的文本格式化 -def mix_text_normalize(text): - # https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization - tx = TextNormalizer() - sentences = tx.normalize(text) - dest_text = "" - for sentence in sentences: - dest_text += replace_punctuation_with_en(sentence) - - # 避免重复标点引起的参考泄露 - dest_text = replace_consecutive_punctuation(dest_text) - return dest_text - - if __name__ == "__main__": text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏" text = "呣呣呣~就是…大人的鼹鼠党吧?" diff --git a/GPT_SoVITS/text/chinese2.py b/GPT_SoVITS/text/chinese2.py index 612aa3a5..dcce0d96 100644 --- a/GPT_SoVITS/text/chinese2.py +++ b/GPT_SoVITS/text/chinese2.py @@ -326,20 +326,6 @@ def text_normalize(text): return dest_text -# 不排除英文的文本格式化 -def mix_text_normalize(text): - # https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization - tx = TextNormalizer() - sentences = tx.normalize(text) - dest_text = "" - for sentence in sentences: - dest_text += replace_punctuation_with_en(sentence) - - # 避免重复标点引起的参考泄露 - dest_text = replace_consecutive_punctuation(dest_text) - return dest_text - - if __name__ == "__main__": text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏" text = "呣呣呣~就是…大人的鼹鼠党吧?" diff --git a/GPT_SoVITS/text/zh_normalization/num.py b/GPT_SoVITS/text/zh_normalization/num.py index c3af4d6a..14d602b0 100644 --- a/GPT_SoVITS/text/zh_normalization/num.py +++ b/GPT_SoVITS/text/zh_normalization/num.py @@ -256,6 +256,24 @@ def replace_to_range(match) -> str: return result +RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)") +def replace_vrsion_num(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + result = "" + for c in match.group(1): + if c == ".": + result += "点" + else: + result += num2str(c) + return result + + + def _get_value(value_string: str, use_zero: bool = True) -> List[str]: stripped = value_string.lstrip("0") if len(stripped) == 0: @@ -308,7 +326,11 @@ def num2str(value_string: str) -> str: result = verbalize_cardinal(integer) - decimal = decimal.rstrip("0") + if decimal.endswith("0"): + decimal = decimal.rstrip("0") + "0" + else: + decimal = decimal.rstrip("0") + if decimal: # '.22' is verbalized as '零点二二' # '3.20' is verbalized as '三点二 diff --git a/GPT_SoVITS/text/zh_normalization/text_normlization.py b/GPT_SoVITS/text/zh_normalization/text_normlization.py index 099b01bd..1a6cdd0f 100644 --- a/GPT_SoVITS/text/zh_normalization/text_normlization.py +++ b/GPT_SoVITS/text/zh_normalization/text_normlization.py @@ -25,6 +25,7 @@ from .chronology import replace_time from .constants import F2H_ASCII_LETTERS from .constants import F2H_DIGITS from .constants import F2H_SPACE +from .num import RE_VERSION_NUM from .num import RE_DECIMAL_NUM from .num import RE_DEFAULT_NUM from .num import RE_FRAC @@ -36,6 +37,7 @@ from .num import RE_RANGE from .num import RE_TO_RANGE from .num import RE_ASMD from .num import RE_POWER +from .num import replace_vrsion_num from .num import replace_default_num from .num import replace_frac from .num import replace_negative_num @@ -158,6 +160,7 @@ class TextNormalizer: sentence = RE_RANGE.sub(replace_range, sentence) sentence = RE_INTEGER.sub(replace_negative_num, sentence) + sentence = RE_VERSION_NUM.sub(replace_vrsion_num, sentence) sentence = RE_DECIMAL_NUM.sub(replace_number, sentence) sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier, sentence) sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence) diff --git a/api.py b/api.py index dc2e8826..cc0896a2 100644 --- a/api.py +++ b/api.py @@ -543,66 +543,65 @@ from text import chinese def get_phones_and_bert(text, language, version, final=False): - if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}: - formattext = text - while " " in formattext: - formattext = formattext.replace(" ", " ") - if language == "all_zh": - if re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return get_phones_and_bert(formattext, "zh", version) + text = re.sub(r' {2,}', ' ', text) + textlist = [] + langlist = [] + if language == "all_zh": + for tmp in LangSegmenter.getTexts(text,"zh"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_yue": + for tmp in LangSegmenter.getTexts(text,"zh"): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ja": + for tmp in LangSegmenter.getTexts(text,"ja"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "all_ko": + for tmp in LangSegmenter.getTexts(text,"ko"): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "en": + langlist.append("en") + textlist.append(text) + elif language == "auto": + for tmp in LangSegmenter.getTexts(text): + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + elif language == "auto_yue": + for tmp in LangSegmenter.getTexts(text): + if tmp["lang"] == "zh": + tmp["lang"] = "yue" + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + else: + for tmp in LangSegmenter.getTexts(text): + if langlist: + if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): + textlist[-1] += tmp["text"] + continue + if tmp["lang"] == "en": + langlist.append(tmp["lang"]) else: - phones, word2ph, norm_text = clean_text_inf(formattext, language, version) - bert = get_bert_feature(norm_text, word2ph).to(device) - elif language == "all_yue" and re.search(r"[A-Za-z]", formattext): - formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext) - formattext = chinese.mix_text_normalize(formattext) - return get_phones_and_bert(formattext, "yue", version) - else: - phones, word2ph, norm_text = clean_text_inf(formattext, language, version) - bert = torch.zeros( - (1024, len(phones)), - dtype=torch.float16 if is_half == True else torch.float32, - ).to(device) - elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}: - textlist = [] - langlist = [] - if language == "auto": - for tmp in LangSegmenter.getTexts(text): - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - elif language == "auto_yue": - for tmp in LangSegmenter.getTexts(text): - if tmp["lang"] == "zh": - tmp["lang"] = "yue" - langlist.append(tmp["lang"]) - textlist.append(tmp["text"]) - else: - for tmp in LangSegmenter.getTexts(text): - if langlist: - if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): - textlist[-1] += tmp["text"] - continue - if tmp["lang"] == "en": - langlist.append(tmp["lang"]) - else: - # 因无法区别中日韩文汉字,以用户输入为准 - langlist.append(language) - textlist.append(tmp["text"]) - phones_list = [] - bert_list = [] - norm_text_list = [] - for i in range(len(textlist)): - lang = langlist[i] - phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) - bert = get_bert_inf(phones, word2ph, norm_text, lang) - phones_list.append(phones) - norm_text_list.append(norm_text) - bert_list.append(bert) - bert = torch.cat(bert_list, dim=1) - phones = sum(phones_list, []) - norm_text = "".join(norm_text_list) + # 因无法区别中日韩文汉字,以用户输入为准 + langlist.append(language) + textlist.append(tmp["text"]) + phones_list = [] + bert_list = [] + norm_text_list = [] + for i in range(len(textlist)): + lang = langlist[i] + phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) + bert = get_bert_inf(phones, word2ph, norm_text, lang) + phones_list.append(phones) + norm_text_list.append(norm_text) + bert_list.append(bert) + bert = torch.cat(bert_list, dim=1) + phones = sum(phones_list, []) + norm_text = "".join(norm_text_list) if not final and len(phones) < 6: return get_phones_and_bert("." + text, language, version, final=True)