From e0e6d333b5a8c98643047f0f6678011c90d1b14d Mon Sep 17 00:00:00 2001 From: KamioRinn <63162909+KamioRinn@users.noreply.github.com> Date: Mon, 26 May 2025 11:20:18 +0800 Subject: [PATCH] optimize langdetect (#2408) Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> --- .../text/LangSegmenter/langsegmenter.py | 111 ++++++++++-------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py index 1740a54..88b9379 100644 --- a/GPT_SoVITS/text/LangSegmenter/langsegmenter.py +++ b/GPT_SoVITS/text/LangSegmenter/langsegmenter.py @@ -3,44 +3,38 @@ import re # jieba静音 import jieba - jieba.setLogLevel(logging.CRITICAL) # 更改fast_langdetect大模型位置 from pathlib import Path import fast_langdetect - -fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector( - fast_langdetect.infer.LangDetectConfig( - cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect" - ) -) +fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector(fast_langdetect.infer.LangDetectConfig(cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect")) from split_lang import LangSplitter def full_en(text): - pattern = r"^[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$" + pattern = r'^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$' return bool(re.match(pattern, text)) def full_cjk(text): # 来自wiki cjk_ranges = [ - (0x4E00, 0x9FFF), # CJK Unified Ideographs - (0x3400, 0x4DB5), # CJK Extension A - (0x20000, 0x2A6DD), # CJK Extension B - (0x2A700, 0x2B73F), # CJK Extension C - (0x2B740, 0x2B81F), # CJK Extension D - (0x2B820, 0x2CEAF), # CJK Extension E - (0x2CEB0, 0x2EBEF), # CJK Extension F - (0x30000, 0x3134A), # CJK Extension G - (0x31350, 0x323AF), # CJK Extension H - (0x2EBF0, 0x2EE5D), # CJK Extension H + (0x4E00, 0x9FFF), # CJK Unified Ideographs + (0x3400, 0x4DB5), # CJK Extension A + (0x20000, 0x2A6DD), # CJK Extension B + (0x2A700, 0x2B73F), # CJK Extension C + (0x2B740, 0x2B81F), # CJK Extension D + (0x2B820, 0x2CEAF), # CJK Extension E + (0x2CEB0, 0x2EBEF), # CJK Extension F + (0x30000, 0x3134A), # CJK Extension G + (0x31350, 0x323AF), # CJK Extension H + (0x2EBF0, 0x2EE5D), # CJK Extension H ] - pattern = r"[0-9、-〜。!?.!?… ]+$" + pattern = r'[0-9、-〜。!?.!?… /]+$' cjk_text = "" for char in text: @@ -51,7 +45,7 @@ def full_cjk(text): return cjk_text -def split_jako(tag_lang, item): +def split_jako(tag_lang,item): if tag_lang == "ja": pattern = r"([\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]+(?:[0-9、-〜。!?.!?… ]+[\u3041-\u3096\u3099\u309A\u30A1-\u30FA\u30FC]*)*)" else: @@ -59,40 +53,41 @@ def split_jako(tag_lang, item): lang_list: list[dict] = [] tag = 0 - for match in re.finditer(pattern, item["text"]): + for match in re.finditer(pattern, item['text']): if match.start() > tag: - lang_list.append({"lang": item["lang"], "text": item["text"][tag : match.start()]}) + lang_list.append({'lang':item['lang'],'text':item['text'][tag:match.start()]}) tag = match.end() - lang_list.append({"lang": tag_lang, "text": item["text"][match.start() : match.end()]}) + lang_list.append({'lang':tag_lang,'text':item['text'][match.start():match.end()]}) - if tag < len(item["text"]): - lang_list.append({"lang": item["lang"], "text": item["text"][tag : len(item["text"])]}) + if tag < len(item['text']): + lang_list.append({'lang':item['lang'],'text':item['text'][tag:len(item['text'])]}) return lang_list def merge_lang(lang_list, item): - if lang_list and item["lang"] == lang_list[-1]["lang"]: - lang_list[-1]["text"] += item["text"] + if lang_list and item['lang'] == lang_list[-1]['lang']: + lang_list[-1]['text'] += item['text'] else: lang_list.append(item) return lang_list -class LangSegmenter: +class LangSegmenter(): # 默认过滤器, 基于gsv目前四种语言 DEFAULT_LANG_MAP = { "zh": "zh", "yue": "zh", # 粤语 "wuu": "zh", # 吴语 "zh-cn": "zh", - "zh-tw": "x", # 繁体设置为x + "zh-tw": "x", # 繁体设置为x "ko": "ko", "ja": "ja", "en": "en", } + def getTexts(text): lang_splitter = LangSplitter(lang_map=LangSegmenter.DEFAULT_LANG_MAP) substr = lang_splitter.split_by_lang(text=text) @@ -100,18 +95,18 @@ class LangSegmenter: lang_list: list[dict] = [] for _, item in enumerate(substr): - dict_item = {"lang": item.lang, "text": item.text} + dict_item = {'lang':item.lang,'text':item.text} # 处理短英文被识别为其他语言的问题 - if full_en(dict_item["text"]): - dict_item["lang"] = "en" - lang_list = merge_lang(lang_list, dict_item) + if full_en(dict_item['text']): + dict_item['lang'] = 'en' + lang_list = merge_lang(lang_list,dict_item) continue # 处理非日语夹日文的问题(不包含CJK) ja_list: list[dict] = [] - if dict_item["lang"] != "ja": - ja_list = split_jako("ja", dict_item) + if dict_item['lang'] != 'ja': + ja_list = split_jako('ja',dict_item) if not ja_list: ja_list.append(dict_item) @@ -120,8 +115,8 @@ class LangSegmenter: ko_list: list[dict] = [] temp_list: list[dict] = [] for _, ko_item in enumerate(ja_list): - if ko_item["lang"] != "ko": - ko_list = split_jako("ko", ko_item) + if ko_item["lang"] != 'ko': + ko_list = split_jako('ko',ko_item) if ko_list: temp_list.extend(ko_list) @@ -131,32 +126,50 @@ class LangSegmenter: # 未存在非日韩文夹日韩文 if len(temp_list) == 1: # 未知语言检查是否为CJK - if dict_item["lang"] == "x": - cjk_text = full_cjk(dict_item["text"]) + if dict_item['lang'] == 'x': + cjk_text = full_cjk(dict_item['text']) if cjk_text: - dict_item = {"lang": "zh", "text": cjk_text} - lang_list = merge_lang(lang_list, dict_item) + dict_item = {'lang':'zh','text':cjk_text} + lang_list = merge_lang(lang_list,dict_item) + else: + lang_list = merge_lang(lang_list,dict_item) continue else: - lang_list = merge_lang(lang_list, dict_item) + lang_list = merge_lang(lang_list,dict_item) continue # 存在非日韩文夹日韩文 for _, temp_item in enumerate(temp_list): # 未知语言检查是否为CJK - if temp_item["lang"] == "x": - cjk_text = full_cjk(dict_item["text"]) + if temp_item['lang'] == 'x': + cjk_text = full_cjk(dict_item['text']) if cjk_text: - dict_item = {"lang": "zh", "text": cjk_text} - lang_list = merge_lang(lang_list, dict_item) + dict_item = {'lang':'zh','text':cjk_text} + lang_list = merge_lang(lang_list,dict_item) + else: + lang_list = merge_lang(lang_list,dict_item) else: - lang_list = merge_lang(lang_list, temp_item) - return lang_list + lang_list = merge_lang(lang_list,temp_item) + temp_list = lang_list + lang_list = [] + for _, temp_item in enumerate(temp_list): + if temp_item['lang'] == 'x': + if lang_list: + temp_item['lang'] = lang_list[-1]['lang'] + elif len(temp_list) > 1: + temp_item['lang'] = temp_list[1]['lang'] + else: + temp_item['lang'] = 'zh' + + lang_list = merge_lang(lang_list,temp_item) + + return lang_list + if __name__ == "__main__": text = "MyGO?,你也喜欢まいご吗?" print(LangSegmenter.getTexts(text)) text = "ねえ、知ってる?最近、僕は天文学を勉強してるんだ。君の瞳が星空みたいにキラキラしてるからさ。" - print(LangSegmenter.getTexts(text)) + print(LangSegmenter.getTexts(text)) \ No newline at end of file