mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-04-26 19:46:37 +08:00
Add files via upload
This commit is contained in:
parent
eb7e864118
commit
5addea5695
27
tools/i18n/i18n.py
Normal file
27
tools/i18n/i18n.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import json
|
||||||
|
import locale
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def load_language_list(language):
|
||||||
|
with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f:
|
||||||
|
language_list = json.load(f)
|
||||||
|
return language_list
|
||||||
|
|
||||||
|
|
||||||
|
class I18nAuto:
|
||||||
|
def __init__(self, language=None):
|
||||||
|
if language in ["Auto", None]:
|
||||||
|
language = locale.getdefaultlocale()[
|
||||||
|
0
|
||||||
|
] # getlocale can't identify the system's language ((None, None))
|
||||||
|
if not os.path.exists(f"./i18n/locale/{language}.json"):
|
||||||
|
language = "en_US"
|
||||||
|
self.language = language
|
||||||
|
self.language_map = load_language_list(language)
|
||||||
|
|
||||||
|
def __call__(self, key):
|
||||||
|
return self.language_map.get(key, key)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "Use Language: " + self.language
|
47
tools/i18n/locale_diff.py
Normal file
47
tools/i18n/locale_diff.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
# Define the standard file name
|
||||||
|
standard_file = "locale/zh_CN.json"
|
||||||
|
|
||||||
|
# Find all JSON files in the directory
|
||||||
|
dir_path = "locale/"
|
||||||
|
languages = [
|
||||||
|
os.path.join(dir_path, f)
|
||||||
|
for f in os.listdir(dir_path)
|
||||||
|
if f.endswith(".json") and f != standard_file
|
||||||
|
]
|
||||||
|
|
||||||
|
# Load the standard file
|
||||||
|
with open(standard_file, "r", encoding="utf-8") as f:
|
||||||
|
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
||||||
|
|
||||||
|
# Loop through each language file
|
||||||
|
for lang_file in languages:
|
||||||
|
# Load the language file
|
||||||
|
with open(lang_file, "r", encoding="utf-8") as f:
|
||||||
|
lang_data = json.load(f, object_pairs_hook=OrderedDict)
|
||||||
|
|
||||||
|
# Find the difference between the language file and the standard file
|
||||||
|
diff = set(standard_data.keys()) - set(lang_data.keys())
|
||||||
|
|
||||||
|
miss = set(lang_data.keys()) - set(standard_data.keys())
|
||||||
|
|
||||||
|
# Add any missing keys to the language file
|
||||||
|
for key in diff:
|
||||||
|
lang_data[key] = key
|
||||||
|
|
||||||
|
# Del any extra keys to the language file
|
||||||
|
for key in miss:
|
||||||
|
del lang_data[key]
|
||||||
|
|
||||||
|
# Sort the keys of the language file to match the order of the standard file
|
||||||
|
lang_data = OrderedDict(
|
||||||
|
sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save the updated language file
|
||||||
|
with open(lang_file, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(lang_data, f, ensure_ascii=False, indent=4, sort_keys=True)
|
||||||
|
f.write("\n")
|
75
tools/i18n/scan_i18n.py
Normal file
75
tools/i18n/scan_i18n.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import ast
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
|
||||||
|
def extract_i18n_strings(node):
|
||||||
|
i18n_strings = []
|
||||||
|
|
||||||
|
if (
|
||||||
|
isinstance(node, ast.Call)
|
||||||
|
and isinstance(node.func, ast.Name)
|
||||||
|
and node.func.id == "i18n"
|
||||||
|
):
|
||||||
|
for arg in node.args:
|
||||||
|
if isinstance(arg, ast.Str):
|
||||||
|
i18n_strings.append(arg.s)
|
||||||
|
|
||||||
|
for child_node in ast.iter_child_nodes(node):
|
||||||
|
i18n_strings.extend(extract_i18n_strings(child_node))
|
||||||
|
|
||||||
|
return i18n_strings
|
||||||
|
|
||||||
|
|
||||||
|
# scan the directory for all .py files (recursively)
|
||||||
|
# for each file, parse the code into an AST
|
||||||
|
# for each AST, extract the i18n strings
|
||||||
|
|
||||||
|
strings = []
|
||||||
|
for filename in glob.iglob("**/*.py", recursive=True):
|
||||||
|
with open(filename, "r") as f:
|
||||||
|
code = f.read()
|
||||||
|
if "I18nAuto" in code:
|
||||||
|
tree = ast.parse(code)
|
||||||
|
i18n_strings = extract_i18n_strings(tree)
|
||||||
|
print(filename, len(i18n_strings))
|
||||||
|
strings.extend(i18n_strings)
|
||||||
|
code_keys = set(strings)
|
||||||
|
"""
|
||||||
|
n_i18n.py
|
||||||
|
gui_v1.py 26
|
||||||
|
app.py 16
|
||||||
|
infer-web.py 147
|
||||||
|
scan_i18n.py 0
|
||||||
|
i18n.py 0
|
||||||
|
lib/train/process_ckpt.py 1
|
||||||
|
"""
|
||||||
|
print()
|
||||||
|
print("Total unique:", len(code_keys))
|
||||||
|
|
||||||
|
|
||||||
|
standard_file = "i18n/locale/zh_CN.json"
|
||||||
|
with open(standard_file, "r", encoding="utf-8") as f:
|
||||||
|
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
||||||
|
standard_keys = set(standard_data.keys())
|
||||||
|
|
||||||
|
# Define the standard file name
|
||||||
|
unused_keys = standard_keys - code_keys
|
||||||
|
print("Unused keys:", len(unused_keys))
|
||||||
|
for unused_key in unused_keys:
|
||||||
|
print("\t", unused_key)
|
||||||
|
|
||||||
|
missing_keys = code_keys - standard_keys
|
||||||
|
print("Missing keys:", len(missing_keys))
|
||||||
|
for missing_key in missing_keys:
|
||||||
|
print("\t", missing_key)
|
||||||
|
|
||||||
|
code_keys_dict = OrderedDict()
|
||||||
|
for s in strings:
|
||||||
|
code_keys_dict[s] = s
|
||||||
|
|
||||||
|
# write back
|
||||||
|
with open(standard_file, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True)
|
||||||
|
f.write("\n")
|
Loading…
x
Reference in New Issue
Block a user