Add New LangSegmenter

This commit is contained in:
KamioRinn 2025-02-14 02:38:25 +08:00
parent c2b3298bed
commit 6976f77572
4 changed files with 11 additions and 10 deletions

View File

@ -18,9 +18,10 @@ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
warnings.simplefilter(action='ignore', category=FutureWarning)
import LangSegment, os, re, sys, json
import os, re, sys, json
import pdb
import torch
from text.LangSegmenter import LangSegmenter
try:
import gradio.analytics as analytics
@ -379,8 +380,7 @@ def get_phones_and_bert(text,language,version,final=False):
if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
language = language.replace("all_","")
if language == "en":
LangSegment.setfilters(["en"])
formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
formattext = text
else:
# 因无法区别中日韩文汉字,以用户输入为准
formattext = text
@ -407,19 +407,18 @@ def get_phones_and_bert(text,language,version,final=False):
elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
textlist=[]
langlist=[]
LangSegment.setfilters(["zh","ja","en","ko"])
if language == "auto":
for tmp in LangSegment.getTexts(text):
for tmp in LangSegmenter.getTexts(text):
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "auto_yue":
for tmp in LangSegment.getTexts(text):
for tmp in LangSegmenter.getTexts(text):
if tmp["lang"] == "zh":
tmp["lang"] = "yue"
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
else:
for tmp in LangSegment.getTexts(text):
for tmp in LangSegmenter.getTexts(text):
if tmp["lang"] == "en":
langlist.append(tmp["lang"])
else:

View File

@ -112,7 +112,7 @@ def replace_phs(phs):
def replace_consecutive_punctuation(text):
punctuations = ''.join(re.escape(p) for p in punctuation)
pattern = f'([{punctuations}])([{punctuations}])+'
pattern = f'([{punctuations}\s])([{punctuations}])+'
result = re.sub(pattern, r'\1', text)
return result
@ -233,6 +233,7 @@ def text_normalize(text):
# 来自 g2p_en 文本格式化处理
# 增加大写兼容
# 增加纯大写单词拆分
text = unicode(text)
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
@ -240,6 +241,7 @@ def text_normalize(text):
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
text = re.sub(r"(?i)i\.e\.", "that is", text)
text = re.sub(r"(?i)e\.g\.", "for example", text)
text = re.sub(r'(?<!^)(?<![\s])([A-Z])', r' \1', text)
# 避免重复标点引起的参考泄露
text = replace_consecutive_punctuation(text)

View File

@ -329,7 +329,7 @@ Special thanks to the following projects and contributors:
- [BigVGAN](https://github.com/NVIDIA/BigVGAN)
### Text Frontend for Inference
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
- [split-lang](https://github.com/DoodleBears/split-lang)
- [g2pW](https://github.com/GitYCC/g2pW)
- [pypinyin-g2pW](https://github.com/mozillazg/pypinyin-g2pW)
- [paddlespeech g2pw](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/g2pw)

View File

@ -23,7 +23,7 @@ PyYAML
psutil
jieba_fast
jieba
LangSegment>=0.2.0
split-lang
Faster_Whisper
wordsegment
rotary_embedding_torch