mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-06-28 08:41:29 +08:00
语言分割及格式化优化 (#2488)
* better LangSegmenter * add version num2str * better version num2str * sync fast infer * sync api * remove duplicate spaces * remove unnecessary code --------- Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
This commit is contained in:
parent
90ebefa78f
commit
6df61f58e4
@ -121,33 +121,31 @@ class TextPreprocessor:
|
||||
|
||||
def get_phones_and_bert(self, text: str, language: str, version: str, final: bool = False):
|
||||
with self.bert_lock:
|
||||
if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
|
||||
# language = language.replace("all_","")
|
||||
formattext = text
|
||||
while " " in formattext:
|
||||
formattext = formattext.replace(" ", " ")
|
||||
if language == "all_zh":
|
||||
if re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return self.get_phones_and_bert(formattext, "zh", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version)
|
||||
bert = self.get_bert_feature(norm_text, word2ph).to(self.device)
|
||||
elif language == "all_yue" and re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return self.get_phones_and_bert(formattext, "yue", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version)
|
||||
bert = torch.zeros(
|
||||
(1024, len(phones)),
|
||||
dtype=torch.float32,
|
||||
).to(self.device)
|
||||
elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
|
||||
text = re.sub(r' {2,}', ' ', text)
|
||||
textlist = []
|
||||
langlist = []
|
||||
if language == "auto":
|
||||
if language == "all_zh":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_yue":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
if tmp["lang"] == "zh":
|
||||
tmp["lang"] = "yue"
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ja":
|
||||
for tmp in LangSegmenter.getTexts(text,"ja"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ko":
|
||||
for tmp in LangSegmenter.getTexts(text,"ko"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "en":
|
||||
langlist.append("en")
|
||||
textlist.append(text)
|
||||
elif language == "auto":
|
||||
for tmp in LangSegmenter.getTexts(text):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
@ -160,9 +158,7 @@ class TextPreprocessor:
|
||||
else:
|
||||
for tmp in LangSegmenter.getTexts(text):
|
||||
if langlist:
|
||||
if (tmp["lang"] == "en" and langlist[-1] == "en") or (
|
||||
tmp["lang"] != "en" and langlist[-1] != "en"
|
||||
):
|
||||
if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"):
|
||||
textlist[-1] += tmp["text"]
|
||||
continue
|
||||
if tmp["lang"] == "en":
|
||||
|
@ -586,32 +586,31 @@ from text import chinese
|
||||
|
||||
|
||||
def get_phones_and_bert(text, language, version, final=False):
|
||||
if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
|
||||
formattext = text
|
||||
while " " in formattext:
|
||||
formattext = formattext.replace(" ", " ")
|
||||
if language == "all_zh":
|
||||
if re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return get_phones_and_bert(formattext, "zh", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
||||
bert = get_bert_feature(norm_text, word2ph).to(device)
|
||||
elif language == "all_yue" and re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return get_phones_and_bert(formattext, "yue", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
||||
bert = torch.zeros(
|
||||
(1024, len(phones)),
|
||||
dtype=torch.float16 if is_half == True else torch.float32,
|
||||
).to(device)
|
||||
elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
|
||||
text = re.sub(r' {2,}', ' ', text)
|
||||
textlist = []
|
||||
langlist = []
|
||||
if language == "auto":
|
||||
if language == "all_zh":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_yue":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
if tmp["lang"] == "zh":
|
||||
tmp["lang"] = "yue"
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ja":
|
||||
for tmp in LangSegmenter.getTexts(text,"ja"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ko":
|
||||
for tmp in LangSegmenter.getTexts(text,"ko"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "en":
|
||||
langlist.append("en")
|
||||
textlist.append(text)
|
||||
elif language == "auto":
|
||||
for tmp in LangSegmenter.getTexts(text):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
|
@ -3,25 +3,19 @@ import re
|
||||
|
||||
# jieba静音
|
||||
import jieba
|
||||
|
||||
jieba.setLogLevel(logging.CRITICAL)
|
||||
|
||||
# 更改fast_langdetect大模型位置
|
||||
from pathlib import Path
|
||||
import fast_langdetect
|
||||
|
||||
fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector(
|
||||
fast_langdetect.infer.LangDetectConfig(
|
||||
cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect"
|
||||
)
|
||||
)
|
||||
fast_langdetect.infer._default_detector = fast_langdetect.infer.LangDetector(fast_langdetect.infer.LangDetectConfig(cache_dir=Path(__file__).parent.parent.parent / "pretrained_models" / "fast_langdetect"))
|
||||
|
||||
|
||||
from split_lang import LangSplitter
|
||||
|
||||
|
||||
def full_en(text):
|
||||
pattern = r"^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$"
|
||||
pattern = r'^(?=.*[A-Za-z])[A-Za-z0-9\s\u0020-\u007E\u2000-\u206F\u3000-\u303F\uFF00-\uFFEF]+$'
|
||||
return bool(re.match(pattern, text))
|
||||
|
||||
|
||||
@ -40,7 +34,7 @@ def full_cjk(text):
|
||||
(0x2EBF0, 0x2EE5D), # CJK Extension H
|
||||
]
|
||||
|
||||
pattern = r"[0-9、-〜。!?.!?… /]+$"
|
||||
pattern = r'[0-9、-〜。!?.!?… /]+$'
|
||||
|
||||
cjk_text = ""
|
||||
for char in text:
|
||||
@ -59,28 +53,28 @@ def split_jako(tag_lang, item):
|
||||
|
||||
lang_list: list[dict] = []
|
||||
tag = 0
|
||||
for match in re.finditer(pattern, item["text"]):
|
||||
for match in re.finditer(pattern, item['text']):
|
||||
if match.start() > tag:
|
||||
lang_list.append({"lang": item["lang"], "text": item["text"][tag : match.start()]})
|
||||
lang_list.append({'lang':item['lang'],'text':item['text'][tag:match.start()]})
|
||||
|
||||
tag = match.end()
|
||||
lang_list.append({"lang": tag_lang, "text": item["text"][match.start() : match.end()]})
|
||||
lang_list.append({'lang':tag_lang,'text':item['text'][match.start():match.end()]})
|
||||
|
||||
if tag < len(item["text"]):
|
||||
lang_list.append({"lang": item["lang"], "text": item["text"][tag : len(item["text"])]})
|
||||
if tag < len(item['text']):
|
||||
lang_list.append({'lang':item['lang'],'text':item['text'][tag:len(item['text'])]})
|
||||
|
||||
return lang_list
|
||||
|
||||
|
||||
def merge_lang(lang_list, item):
|
||||
if lang_list and item["lang"] == lang_list[-1]["lang"]:
|
||||
lang_list[-1]["text"] += item["text"]
|
||||
if lang_list and item['lang'] == lang_list[-1]['lang']:
|
||||
lang_list[-1]['text'] += item['text']
|
||||
else:
|
||||
lang_list.append(item)
|
||||
return lang_list
|
||||
|
||||
|
||||
class LangSegmenter:
|
||||
class LangSegmenter():
|
||||
# 默认过滤器, 基于gsv目前四种语言
|
||||
DEFAULT_LANG_MAP = {
|
||||
"zh": "zh",
|
||||
@ -93,25 +87,41 @@ class LangSegmenter:
|
||||
"en": "en",
|
||||
}
|
||||
|
||||
def getTexts(text):
|
||||
def getTexts(text,default_lang = ""):
|
||||
lang_splitter = LangSplitter(lang_map=LangSegmenter.DEFAULT_LANG_MAP)
|
||||
lang_splitter.merge_across_digit = False
|
||||
substr = lang_splitter.split_by_lang(text=text)
|
||||
|
||||
lang_list: list[dict] = []
|
||||
|
||||
for _, item in enumerate(substr):
|
||||
dict_item = {"lang": item.lang, "text": item.text}
|
||||
have_num = False
|
||||
|
||||
# 处理短英文被识别为其他语言的问题
|
||||
if full_en(dict_item["text"]):
|
||||
dict_item["lang"] = "en"
|
||||
for _, item in enumerate(substr):
|
||||
dict_item = {'lang':item.lang,'text':item.text}
|
||||
|
||||
if dict_item['lang'] == 'digit':
|
||||
if default_lang != "":
|
||||
dict_item['lang'] = default_lang
|
||||
else:
|
||||
have_num = True
|
||||
lang_list = merge_lang(lang_list,dict_item)
|
||||
continue
|
||||
|
||||
# 处理短英文被识别为其他语言的问题
|
||||
if full_en(dict_item['text']):
|
||||
dict_item['lang'] = 'en'
|
||||
lang_list = merge_lang(lang_list,dict_item)
|
||||
continue
|
||||
|
||||
if default_lang != "":
|
||||
dict_item['lang'] = default_lang
|
||||
lang_list = merge_lang(lang_list,dict_item)
|
||||
continue
|
||||
else:
|
||||
# 处理非日语夹日文的问题(不包含CJK)
|
||||
ja_list: list[dict] = []
|
||||
if dict_item["lang"] != "ja":
|
||||
ja_list = split_jako("ja", dict_item)
|
||||
if dict_item['lang'] != 'ja':
|
||||
ja_list = split_jako('ja',dict_item)
|
||||
|
||||
if not ja_list:
|
||||
ja_list.append(dict_item)
|
||||
@ -120,8 +130,8 @@ class LangSegmenter:
|
||||
ko_list: list[dict] = []
|
||||
temp_list: list[dict] = []
|
||||
for _, ko_item in enumerate(ja_list):
|
||||
if ko_item["lang"] != "ko":
|
||||
ko_list = split_jako("ko", ko_item)
|
||||
if ko_item["lang"] != 'ko':
|
||||
ko_list = split_jako('ko',ko_item)
|
||||
|
||||
if ko_list:
|
||||
temp_list.extend(ko_list)
|
||||
@ -131,10 +141,10 @@ class LangSegmenter:
|
||||
# 未存在非日韩文夹日韩文
|
||||
if len(temp_list) == 1:
|
||||
# 未知语言检查是否为CJK
|
||||
if dict_item["lang"] == "x":
|
||||
cjk_text = full_cjk(dict_item["text"])
|
||||
if dict_item['lang'] == 'x':
|
||||
cjk_text = full_cjk(dict_item['text'])
|
||||
if cjk_text:
|
||||
dict_item = {"lang": "zh", "text": cjk_text}
|
||||
dict_item = {'lang':'zh','text':cjk_text}
|
||||
lang_list = merge_lang(lang_list,dict_item)
|
||||
else:
|
||||
lang_list = merge_lang(lang_list,dict_item)
|
||||
@ -146,26 +156,57 @@ class LangSegmenter:
|
||||
# 存在非日韩文夹日韩文
|
||||
for _, temp_item in enumerate(temp_list):
|
||||
# 未知语言检查是否为CJK
|
||||
if temp_item["lang"] == "x":
|
||||
cjk_text = full_cjk(dict_item["text"])
|
||||
if temp_item['lang'] == 'x':
|
||||
cjk_text = full_cjk(temp_item['text'])
|
||||
if cjk_text:
|
||||
dict_item = {"lang": "zh", "text": cjk_text}
|
||||
lang_list = merge_lang(lang_list, dict_item)
|
||||
lang_list = merge_lang(lang_list,{'lang':'zh','text':cjk_text})
|
||||
else:
|
||||
lang_list = merge_lang(lang_list, dict_item)
|
||||
lang_list = merge_lang(lang_list,temp_item)
|
||||
else:
|
||||
lang_list = merge_lang(lang_list,temp_item)
|
||||
|
||||
# 有数字
|
||||
if have_num:
|
||||
temp_list = lang_list
|
||||
lang_list = []
|
||||
for i, temp_item in enumerate(temp_list):
|
||||
if temp_item['lang'] == 'digit':
|
||||
if default_lang:
|
||||
temp_item['lang'] = default_lang
|
||||
elif lang_list and i == len(temp_list) - 1:
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
elif not lang_list and i < len(temp_list) - 1:
|
||||
temp_item['lang'] = temp_list[1]['lang']
|
||||
elif lang_list and i < len(temp_list) - 1:
|
||||
if lang_list[-1]['lang'] == temp_list[i + 1]['lang']:
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
elif lang_list[-1]['text'][-1] in [",",".","!","?",",","。","!","?"]:
|
||||
temp_item['lang'] = temp_list[i + 1]['lang']
|
||||
elif temp_list[i + 1]['text'][0] in [",",".","!","?",",","。","!","?"]:
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
elif temp_item['text'][-1] in ["。","."]:
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
elif len(lang_list[-1]['text']) >= len(temp_list[i + 1]['text']):
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
else:
|
||||
temp_item['lang'] = temp_list[i + 1]['lang']
|
||||
else:
|
||||
temp_item['lang'] = 'zh'
|
||||
|
||||
lang_list = merge_lang(lang_list,temp_item)
|
||||
|
||||
|
||||
# 筛X
|
||||
temp_list = lang_list
|
||||
lang_list = []
|
||||
for _, temp_item in enumerate(temp_list):
|
||||
if temp_item["lang"] == "x":
|
||||
if temp_item['lang'] == 'x':
|
||||
if lang_list:
|
||||
temp_item["lang"] = lang_list[-1]["lang"]
|
||||
temp_item['lang'] = lang_list[-1]['lang']
|
||||
elif len(temp_list) > 1:
|
||||
temp_item["lang"] = temp_list[1]["lang"]
|
||||
temp_item['lang'] = temp_list[1]['lang']
|
||||
else:
|
||||
temp_item["lang"] = "zh"
|
||||
temp_item['lang'] = 'zh'
|
||||
|
||||
lang_list = merge_lang(lang_list,temp_item)
|
||||
|
||||
@ -178,3 +219,7 @@ if __name__ == "__main__":
|
||||
|
||||
text = "ねえ、知ってる?最近、僕は天文学を勉強してるんだ。君の瞳が星空みたいにキラキラしてるからさ。"
|
||||
print(LangSegmenter.getTexts(text))
|
||||
|
||||
text = "当时ThinkPad T60刚刚发布,一同推出的还有一款名为Advanced Dock的扩展坞配件。这款扩展坞通过连接T60底部的插槽,扩展出包括PCIe在内的一大堆接口,并且自带电源,让T60可以安装桌面显卡来提升性能。"
|
||||
print(LangSegmenter.getTexts(text,"zh"))
|
||||
print(LangSegmenter.getTexts(text))
|
@ -181,20 +181,6 @@ def text_normalize(text):
|
||||
return dest_text
|
||||
|
||||
|
||||
# 不排除英文的文本格式化
|
||||
def mix_text_normalize(text):
|
||||
# https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization
|
||||
tx = TextNormalizer()
|
||||
sentences = tx.normalize(text)
|
||||
dest_text = ""
|
||||
for sentence in sentences:
|
||||
dest_text += replace_punctuation_with_en(sentence)
|
||||
|
||||
# 避免重复标点引起的参考泄露
|
||||
dest_text = replace_consecutive_punctuation(dest_text)
|
||||
return dest_text
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏"
|
||||
text = "呣呣呣~就是…大人的鼹鼠党吧?"
|
||||
|
@ -326,20 +326,6 @@ def text_normalize(text):
|
||||
return dest_text
|
||||
|
||||
|
||||
# 不排除英文的文本格式化
|
||||
def mix_text_normalize(text):
|
||||
# https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization
|
||||
tx = TextNormalizer()
|
||||
sentences = tx.normalize(text)
|
||||
dest_text = ""
|
||||
for sentence in sentences:
|
||||
dest_text += replace_punctuation_with_en(sentence)
|
||||
|
||||
# 避免重复标点引起的参考泄露
|
||||
dest_text = replace_consecutive_punctuation(dest_text)
|
||||
return dest_text
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏"
|
||||
text = "呣呣呣~就是…大人的鼹鼠党吧?"
|
||||
|
@ -256,6 +256,24 @@ def replace_to_range(match) -> str:
|
||||
return result
|
||||
|
||||
|
||||
RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)")
|
||||
def replace_vrsion_num(match) -> str:
|
||||
"""
|
||||
Args:
|
||||
match (re.Match)
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
result = ""
|
||||
for c in match.group(1):
|
||||
if c == ".":
|
||||
result += "点"
|
||||
else:
|
||||
result += num2str(c)
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def _get_value(value_string: str, use_zero: bool = True) -> List[str]:
|
||||
stripped = value_string.lstrip("0")
|
||||
if len(stripped) == 0:
|
||||
@ -308,7 +326,11 @@ def num2str(value_string: str) -> str:
|
||||
|
||||
result = verbalize_cardinal(integer)
|
||||
|
||||
if decimal.endswith("0"):
|
||||
decimal = decimal.rstrip("0") + "0"
|
||||
else:
|
||||
decimal = decimal.rstrip("0")
|
||||
|
||||
if decimal:
|
||||
# '.22' is verbalized as '零点二二'
|
||||
# '3.20' is verbalized as '三点二
|
||||
|
@ -25,6 +25,7 @@ from .chronology import replace_time
|
||||
from .constants import F2H_ASCII_LETTERS
|
||||
from .constants import F2H_DIGITS
|
||||
from .constants import F2H_SPACE
|
||||
from .num import RE_VERSION_NUM
|
||||
from .num import RE_DECIMAL_NUM
|
||||
from .num import RE_DEFAULT_NUM
|
||||
from .num import RE_FRAC
|
||||
@ -36,6 +37,7 @@ from .num import RE_RANGE
|
||||
from .num import RE_TO_RANGE
|
||||
from .num import RE_ASMD
|
||||
from .num import RE_POWER
|
||||
from .num import replace_vrsion_num
|
||||
from .num import replace_default_num
|
||||
from .num import replace_frac
|
||||
from .num import replace_negative_num
|
||||
@ -158,6 +160,7 @@ class TextNormalizer:
|
||||
sentence = RE_RANGE.sub(replace_range, sentence)
|
||||
|
||||
sentence = RE_INTEGER.sub(replace_negative_num, sentence)
|
||||
sentence = RE_VERSION_NUM.sub(replace_vrsion_num, sentence)
|
||||
sentence = RE_DECIMAL_NUM.sub(replace_number, sentence)
|
||||
sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier, sentence)
|
||||
sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence)
|
||||
|
47
api.py
47
api.py
@ -543,32 +543,31 @@ from text import chinese
|
||||
|
||||
|
||||
def get_phones_and_bert(text, language, version, final=False):
|
||||
if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
|
||||
formattext = text
|
||||
while " " in formattext:
|
||||
formattext = formattext.replace(" ", " ")
|
||||
if language == "all_zh":
|
||||
if re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return get_phones_and_bert(formattext, "zh", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
||||
bert = get_bert_feature(norm_text, word2ph).to(device)
|
||||
elif language == "all_yue" and re.search(r"[A-Za-z]", formattext):
|
||||
formattext = re.sub(r"[a-z]", lambda x: x.group(0).upper(), formattext)
|
||||
formattext = chinese.mix_text_normalize(formattext)
|
||||
return get_phones_and_bert(formattext, "yue", version)
|
||||
else:
|
||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
||||
bert = torch.zeros(
|
||||
(1024, len(phones)),
|
||||
dtype=torch.float16 if is_half == True else torch.float32,
|
||||
).to(device)
|
||||
elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
|
||||
text = re.sub(r' {2,}', ' ', text)
|
||||
textlist = []
|
||||
langlist = []
|
||||
if language == "auto":
|
||||
if language == "all_zh":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_yue":
|
||||
for tmp in LangSegmenter.getTexts(text,"zh"):
|
||||
if tmp["lang"] == "zh":
|
||||
tmp["lang"] = "yue"
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ja":
|
||||
for tmp in LangSegmenter.getTexts(text,"ja"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "all_ko":
|
||||
for tmp in LangSegmenter.getTexts(text,"ko"):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
elif language == "en":
|
||||
langlist.append("en")
|
||||
textlist.append(text)
|
||||
elif language == "auto":
|
||||
for tmp in LangSegmenter.getTexts(text):
|
||||
langlist.append(tmp["lang"])
|
||||
textlist.append(tmp["text"])
|
||||
|
Loading…
x
Reference in New Issue
Block a user