From aed4935fcea3b36da6d2080246cba71524b6bf6b Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:41:23 +0800 Subject: [PATCH 01/25] mps support --- webui.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 4461056b..59eb0ffc 100644 --- a/webui.py +++ b/webui.py @@ -45,14 +45,17 @@ i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio from multiprocessing import cpu_count + +os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu + n_cpu=cpu_count() -# 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] if_gpu_ok = False +# 判断是否有能用来训练和加速推理的N卡 if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) @@ -61,6 +64,12 @@ if torch.cuda.is_available() or ngpu != 0: if_gpu_ok = True # 至少有一张能用的N卡 gpu_infos.append("%s\t%s" % (i, gpu_name)) mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +# 判断是否支持mps加速 +if torch.backends.mps.is_available(): + if_gpu_ok = True + gpu_infos.append("%s\t%s" % ("0", "Apple GPU")) + mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存 + if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 From 382102c9d03807062d2a85601ca8ea65cd13db1c Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:45:13 +0800 Subject: [PATCH 02/25] mps support, optimized device selection --- config.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index 8b5f378f..c9124bf3 100644 --- a/config.py +++ b/config.py @@ -1,5 +1,6 @@ import sys,os +import torch # 推理用的指定模型 sovits_path = "" @@ -14,7 +15,12 @@ pretrained_gpt_path = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch= exp_root = "logs" python_exec = sys.executable or "python" -infer_device = "cuda" +if torch.cuda.is_available(): + infer_device = "cuda" +elif torch.mps.is_available(): + infer_device = "mps" +else: + infer_device = "cpu" webui_port_main = 9874 webui_port_uvr5 = 9873 From cb9d8fe8a5dfa4bc4c3aa144f1fb8772e3e0c065 Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:47:52 +0800 Subject: [PATCH 03/25] mps support --- api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api.py b/api.py index 376b0bcf..0070a160 100644 --- a/api.py +++ b/api.py @@ -35,7 +35,7 @@ parser.add_argument("-dr", "--default_refer_path", type=str, default="", parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") -parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") +parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu / mps") parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") @@ -290,6 +290,7 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan wav.seek(0) torch.cuda.empty_cache() + torch.mps.empty_cache() return StreamingResponse(wav, media_type="audio/wav") From a8e603445fdefbbce53c833987b8df32dfa0749e Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 17:27:58 +0800 Subject: [PATCH 04/25] support mps, optimized device selection --- GPT_SoVITS/inference_webui.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index fd04ac8f..1d417b15 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -35,7 +35,13 @@ from my_utils import load_audio from tools.i18n.i18n import I18nAuto i18n = I18nAuto() -device = "cuda" +if torch.cuda.is_available(): + device = "cuda" +elif torch.mps.is_available(): + device = "mps" +else: + device = "cpu" + tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: From 8069264e642f6e9a9a37f88a78ae1dd788e7865c Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 17:30:49 +0800 Subject: [PATCH 05/25] mps support --- GPT_SoVITS/s1_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index 4a770062..db7b9a31 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -116,9 +116,9 @@ def main(args): devices=-1, benchmark=False, fast_dev_run=False, - strategy=DDPStrategy( + strategy = "auto" if torch.mps.is_available() else DDPStrategy( process_group_backend="nccl" if platform.system() != "Windows" else "gloo" - ), + ), # mps 不支持多节点训练 precision=config["train"]["precision"], logger=logger, num_sanity_val_steps=0, From 07a5339691e786299d5a96364297be3ddedd3148 Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 19:37:47 +0800 Subject: [PATCH 06/25] mps support --- GPT_SoVITS/AR/data/bucket_sampler.py | 7 +- GPT_SoVITS/inference_webui.py | 4 +- GPT_SoVITS/prepare_datasets/1-get-text.py | 2 +- .../prepare_datasets/2-get-hubert-wav32k.py | 2 +- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 2 +- GPT_SoVITS/s1_train.py | 2 +- GPT_SoVITS/s2_train.py | 82 +++++++++++++------ config.py | 2 +- 8 files changed, 70 insertions(+), 33 deletions(-) diff --git a/GPT_SoVITS/AR/data/bucket_sampler.py b/GPT_SoVITS/AR/data/bucket_sampler.py index 7d752db5..647491f7 100644 --- a/GPT_SoVITS/AR/data/bucket_sampler.py +++ b/GPT_SoVITS/AR/data/bucket_sampler.py @@ -41,12 +41,13 @@ class DistributedBucketSampler(Sampler[T_co]): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() + num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1 if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() - torch.cuda.set_device(rank) + rank = dist.get_rank() if torch.cuda.is_available() else 0 + if torch.cuda.is_available(): + torch.cuda.set_device(rank) if rank >= num_replicas or rank < 0: raise ValueError( "Invalid rank {}, rank should be in the interval" diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 1d417b15..79e4a82d 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -35,9 +35,11 @@ from my_utils import load_audio from tools.i18n.i18n import I18nAuto i18n = I18nAuto() +os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 + if torch.cuda.is_available(): device = "cuda" -elif torch.mps.is_available(): +elif torch.backends.mps.is_available(): device = "mps" else: device = "cpu" diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index 85796931..b4a145cb 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -46,7 +46,7 @@ if os.path.exists(txt_path) == False: bert_dir = "%s/3-bert" % (opt_dir) os.makedirs(opt_dir, exist_ok=True) os.makedirs(bert_dir, exist_ok=True) - device = "cuda:0" + device = "cuda:0" if torch.cuda.is_available() else "mps" tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) if is_half == True: diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 71b48a96..31e80681 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -47,7 +47,7 @@ os.makedirs(wav32dir,exist_ok=True) maxx=0.95 alpha=0.5 -device="cuda:0" +device="cuda:0" if torch.cuda.is_available() else "mps" model=cnhubert.get_model() # is_half=False if(is_half==True): diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 7cee6e4d..69eea07a 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -38,7 +38,7 @@ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) if os.path.exists(semantic_path) == False: os.makedirs(opt_dir, exist_ok=True) - device = "cuda:0" + device = "cuda:0" if torch.cuda.is_available() else "mps" hps = utils.get_hparams_from_file(s2config_path) vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index db7b9a31..30c167e5 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -116,7 +116,7 @@ def main(args): devices=-1, benchmark=False, fast_dev_run=False, - strategy = "auto" if torch.mps.is_available() else DDPStrategy( + strategy = "auto" if torch.backends.mps.is_available() else DDPStrategy( process_group_backend="nccl" if platform.system() != "Windows" else "gloo" ), # mps 不支持多节点训练 precision=config["train"]["precision"], diff --git a/GPT_SoVITS/s2_train.py b/GPT_SoVITS/s2_train.py index d2ec262f..e6b64f6b 100644 --- a/GPT_SoVITS/s2_train.py +++ b/GPT_SoVITS/s2_train.py @@ -44,9 +44,12 @@ global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." + assert torch.cuda.is_available() or torch.backends.mps.is_available(), "Only GPU training is allowed." - n_gpus = torch.cuda.device_count() + if torch.backends.mps.is_available(): + n_gpus = 1 + else: + n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) @@ -70,13 +73,14 @@ def run(rank, n_gpus, hps): writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) dist.init_process_group( - backend="gloo" if os.name == "nt" else "nccl", + backend = "gloo" if os.name == "nt" or torch.backends.mps.is_available() else "nccl", init_method="env://", world_size=n_gpus, rank=rank, ) torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) + if torch.cuda.is_available(): + torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data) ######## train_sampler = DistributedBucketSampler( @@ -128,9 +132,14 @@ def run(rank, n_gpus, hps): hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, - ).cuda(rank) + ).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model, + ).to("mps") - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) + net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to("mps") for name, param in net_g.named_parameters(): if not param.requires_grad: print(name, "not requires_grad") @@ -174,8 +183,12 @@ def run(rank, n_gpus, hps): betas=hps.train.betas, eps=hps.train.eps, ) - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + if torch.cuda.is_available(): + net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) + net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + else: + net_g = net_g.to("mps") + net_d = net_d.to("mps") try: # 如果能加载自动resume _, _, _, epoch_str = utils.load_checkpoint( @@ -205,6 +218,9 @@ def run(rank, n_gpus, hps): net_g.module.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], strict=False, + ) if torch.cuda.is_available() else net_g.load_state_dict( + torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], + strict=False, ) ) ##测试不加载优化器 if hps.train.pretrained_s2D != "": @@ -213,6 +229,8 @@ def run(rank, n_gpus, hps): print( net_d.module.load_state_dict( torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] + ) if torch.cuda.is_available() else net_d.load_state_dict( + torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] ) ) @@ -288,18 +306,26 @@ def train_and_evaluate( text, text_lengths, ) in tqdm(enumerate(train_loader)): - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( - rank, non_blocking=True - ) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( - rank, non_blocking=True - ) - ssl = ssl.cuda(rank, non_blocking=True) - ssl.requires_grad = False - # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda( - rank, non_blocking=True - ) + if torch.cuda.is_available(): + spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( + rank, non_blocking=True + ) + y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( + rank, non_blocking=True + ) + ssl = ssl.cuda(rank, non_blocking=True) + ssl.requires_grad = False + # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) + text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda( + rank, non_blocking=True + ) + else: + spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps") + y, y_lengths = y.to("mps"), y_lengths.to("mps") + ssl = ssl.to("mps") + ssl.requires_grad = False + # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) + text, text_lengths = text.to("mps"), text_lengths.to("mps") with autocast(enabled=hps.train.fp16_run): ( @@ -500,13 +526,21 @@ def evaluate(hps, generator, eval_loader, writer_eval): text_lengths, ) in enumerate(eval_loader): print(111) - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - ssl = ssl.cuda() - text, text_lengths = text.cuda(), text_lengths.cuda() + if torch.cuda.is_available(): + spec, spec_lengths = spec.cuda(), spec_lengths.cuda() + y, y_lengths = y.cuda(), y_lengths.cuda() + ssl = ssl.cuda() + text, text_lengths = text.cuda(), text_lengths.cuda() + else: + spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps") + y, y_lengths = y.to("mps"), y_lengths.to("mps") + ssl = ssl.to("mps") + text, text_lengths = text.to("mps"), text_lengths.to("mps") for test in [0, 1]: y_hat, mask, *_ = generator.module.infer( ssl, spec, spec_lengths, text, text_lengths, test=test + ) if torch.cuda.is_available() else generator.infer( + ssl, spec, spec_lengths, text, text_lengths, test=test ) y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length diff --git a/config.py b/config.py index c9124bf3..897f53c1 100644 --- a/config.py +++ b/config.py @@ -17,7 +17,7 @@ exp_root = "logs" python_exec = sys.executable or "python" if torch.cuda.is_available(): infer_device = "cuda" -elif torch.mps.is_available(): +elif torch.backends.mps.is_available(): infer_device = "mps" else: infer_device = "cpu" From 5111713ed7e82f6f32d65c3c2a5be9962211c85c Mon Sep 17 00:00:00 2001 From: Miuzarte <982809597@qq.com> Date: Wed, 24 Jan 2024 20:16:39 +0800 Subject: [PATCH 07/25] feat: api.py change refer --- api.py | 92 +++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 17 deletions(-) diff --git a/api.py b/api.py index 725b12da..60d59190 100644 --- a/api.py +++ b/api.py @@ -7,7 +7,7 @@ import torch import librosa import soundfile as sf from fastapi import FastAPI, Request, HTTPException -from fastapi.responses import StreamingResponse +from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np @@ -51,10 +51,18 @@ args = parser.parse_args() sovits_path = args.sovits_path gpt_path = args.gpt_path -default_refer_path = args.default_refer_path -default_refer_text = args.default_refer_text -default_refer_language = args.default_refer_language -has_preset = False + +class DefaultRefer: + def __init__(self, path, text, language): + self.path = args.default_refer_path + self.text = args.default_refer_text + self.language = args.default_refer_language + + def is_ready(self) -> bool: + return is_full(self.path, self.text, self.language) + + +default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language) device = args.device port = args.port @@ -68,15 +76,13 @@ if gpt_path == "": print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}") # 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用 -if default_refer_path == "" or default_refer_text == "" or default_refer_language == "": - default_refer_path, default_refer_text, default_refer_language = "", "", "" +if default_refer.path == "" or default_refer.text == "" or default_refer.language == "": + default_refer.path, default_refer.text, default_refer.language = "", "", "" print("[INFO] 未指定默认参考音频") - has_preset = False else: - print(f"[INFO] 默认参考音频路径: {default_refer_path}") - print(f"[INFO] 默认参考音频文本: {default_refer_text}") - print(f"[INFO] 默认参考音频语种: {default_refer_language}") - has_preset = True + print(f"[INFO] 默认参考音频路径: {default_refer.path}") + print(f"[INFO] 默认参考音频文本: {default_refer.text}") + print(f"[INFO] 默认参考音频语种: {default_refer.language}") is_half = g_config.is_half if args.full_precision: @@ -100,6 +106,20 @@ else: bert_model = bert_model.to(device) +def is_empty(*items): # 任意一项不为空返回False + for item in items: + if item is not None and item != "": + return False + return True + + +def is_full(*items): # 任意一项为空返回False + for item in items: + if item is None or item == "": + return False + return True + + def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") @@ -203,7 +223,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) else: wav16k = wav16k.to(device) zero_wav_torch = zero_wav_torch.to(device) - wav16k=torch.cat([wav16k,zero_wav_torch]) + wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] @@ -264,6 +284,25 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) +def handle_change(path, text, language): + if is_empty(path, text, language): + raise HTTPException(status_code=400, detail='缺少任意一项以下参数: "path", "text", "language"') + + if path != "" or path is not None: + default_refer.path = path + if text != "" or text is not None: + default_refer.text = text + if language != "" or language is not None: + default_refer.language = language + + print(f"[INFO] 当前默认参考音频路径: {default_refer.path}") + print(f"[INFO] 当前默认参考音频文本: {default_refer.text}") + print(f"[INFO] 当前默认参考音频语种: {default_refer.language}") + print(f"[INFO] is_ready: {default_refer.is_ready()}") + + return JSONResponse({"code": 0, "message": "Success"}, status_code=200) + + def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): if command == "/restart": os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) @@ -277,11 +316,11 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan or prompt_language == "" or prompt_language is None ): refer_wav_path, prompt_text, prompt_language = ( - default_refer_path, - default_refer_text, - default_refer_language, + default_refer.path, + default_refer.text, + default_refer.language, ) - if not has_preset: + if not default_refer.is_ready(): raise HTTPException(status_code=400, detail="未指定参考音频且接口无预设") with torch.no_grad(): @@ -301,6 +340,25 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan app = FastAPI() +@app.post("/change_refer") +async def change_refer(request: Request): + json_post_raw = await request.json() + return handle_change( + json_post_raw.get("path"), + json_post_raw.get("text"), + json_post_raw.get("language") + ) + + +@app.get("/change_refer") +async def change_refer( + path: str = None, + text: str = None, + language: str = None +): + return handle_change(path, text, language) + + @app.post("/") async def tts_endpoint(request: Request): json_post_raw = await request.json() From 9092ac6d77aff76db72ca6bc09f3426e8adf61c6 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Wed, 24 Jan 2024 21:40:36 +0800 Subject: [PATCH 08/25] Update ko_KR.json --- i18n/locale/ko_KR.json | 408 +++++++++++++++++++++++++++-------------- 1 file changed, 274 insertions(+), 134 deletions(-) diff --git a/i18n/locale/ko_KR.json b/i18n/locale/ko_KR.json index 816ed3f7..fa530603 100644 --- a/i18n/locale/ko_KR.json +++ b/i18n/locale/ko_KR.json @@ -1,135 +1,275 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3이면 harvest 음높이 인식 결과에 중간값 필터를 사용합니다. 이 수치는 필터 반경이며, 사용하면 불명확한 음성을 어느정도 배제할 수 있습니다.", - "A模型权重": "A 모델 가중치", - "A模型路径": "A 모델 경로", - "B模型路径": "B 모델 경로", - "E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오+주석\\요네즈 켄시\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음높이, 기본 F0 및 음높이 변화를 대체함", - "Index Rate": "인덱스 비율", - "Onnx导出": "Onnx 내보내기", - "Onnx输出路径": "Onnx 출력 경로", - "RVC模型路径": "RVC 모델 경로", - "ckpt处理": "ckpt 처리", - "harvest进程数": "harvest 프로세스 수", - "index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다.", - "pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: '-'로 구분하여 입력된 다른 프로세스 카드 번호, 예를 들어 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 설정을 작성합니다. 실험 데이터는 logs 아래에 있으며, 각 실험마다 하나의 폴더가 있습니다. 실험 이름 경로를 수동으로 입력해야 하며, 이 안에는 실험 설정, 로그, 훈련으로 얻은 모델 파일이 포함되어 있습니다.", - "step1:正在处理数据": "step1: 데이터 처리 중", - "step2:正在提取音高&正在提取特征": "step2: 음높이 추출 및 특성 추출 중", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 훈련 폴더 아래 모든 오디오로 디코딩 가능한 파일을 자동으로 순회하고 슬라이스 정규화를 진행하여, 실험 디렉토리 아래에 2개의 wav 폴더를 생성합니다; 현재는 단일 사용자 훈련만 지원합니다.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU를 사용해 음높이를 추출합니다(모델이 음높이를 포함하는 경우), GPU를 사용해 특성을 추출합니다(카드 번호 선택)", - "step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정을 작성하고, 모델 및 인덱스 훈련을 시작합니다", - "step3a:正在训练模型": "step3a: 모델 훈련 중", - "一键训练": "원키 트레이닝", - "也可批量输入音频文件, 二选一, 优先读文件夹": "대량으로 오디오 파일 입력도 가능, 둘 중 하나 선택, 폴더 우선 읽기", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "인간 목소리와 반주 분리 대량 처리, UVR5 모델 사용.
올바른 폴더 경로 예: E:\\codes\\py39\\vits_vc_gpu\\백로서화 테스트 케이스(파일 탐색기 주소창에서 복사하면 됨).
모델은 세 가지 유형으로 나뉩니다:
1. 인간 목소리 보존: 하모니가 없는 오디오를 선택, 주요 인간 목소리를 HP5보다 더 잘 보존. 내장된 HP2와 HP3 모델, HP3는 약간의 반주를 놓칠 수 있지만 HP2보다는 인간 목소리를 조금 더 잘 보존합니다.
2. 오직 주요 인간 목소리 보존: 하모니가 있는 오디오를 선택, 주요 인간 목소리가 약간 약해질 수 있음. 내장된 HP5 모델 하나;
3. 울림 제거, 지연 제거 모델(by FoxJoy):
  (1)MDX-Net(onnx_dereverb): 양채널 울림에 대해서는 최선의 선택, 단채널 울림 제거 불가능;
 (234)DeEcho: 지연 효과 제거. Aggressive가 Normal보다 더 철저하게 제거하며, DeReverb는 추가로 울림 제거, 단일 채널 울림 제거 가능하지만 고주파 중심의 판형 울림은 완전히 제거하지 못함.
울림/지연 제거 시 참고:
1. DeEcho-DeReverb 모델의 처리 시간은 다른 두 DeEcho 모델의 거의 2배임;
2. MDX-Net-Dereverb 모델은 상당히 느림;
3. 개인적으로 추천하는 가장 깨끗한 구성은 MDX-Net 다음에 DeEcho-Aggressive 사용.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력한 카드 번호, 예를 들어 0-1-2는 카드0, 카드1, 카드2 사용", - "伴奏人声分离&去混响&去回声": "반주 및 인간 목소리 분리 & 울림 제거 & 에코 제거", - "使用模型采样率": "모델 샘플링 레이트 사용", - "使用设备采样率": "장치 샘플링 레이트 사용", - "保存名": "저장 이름", - "保存的文件名, 默认空为和源文件同名": "저장된 파일 이름, 기본값은 원본 파일과 동일", - "保存的模型名不带后缀": "저장된 모델 이름은 접미사 없음", - "保存频率save_every_epoch": "저장 빈도 save_every_epoch", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "청결한 자음과 숨소리를 보호하고, 전자음의 찢어짐과 같은 아티팩트를 방지하며, 0.5까지 끌어올리면 보호가 활성화되지 않으며, 낮추면 보호 강도는 증가하지만 인덱싱 효과는 감소할 수 있음", - "修改": "수정", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정(오직 weights 폴더에서 추출된 소형 모델 파일만 지원)", - "停止音频转换": "오디오 변환 중지", - "全流程结束!": "전체 과정 완료!", - "刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로고침", - "加载模型": "모델 로드", - "加载预训练底模D路径": "사전 훈련된 베이스 모델 D 경로 로드", - "加载预训练底模G路径": "사전 훈련된 베이스 모델 G 경로 로드", - "单次推理": "단일 추론", - "卸载音色省显存": "음색 언로드로 메모리 절약", - "变调(整数, 半音数量, 升八度12降八度-12)": "변조(정수, 반음 수, 옥타브 상승 12, 옥타브 하강 -12)", - "后处理重采样至最终采样率,0为不进行重采样": "후처리로 최종 샘플링 레이트까지 리샘플링, 0은 리샘플링하지 않음", - "否": "아니오", - "启用相位声码器": "위상 보코더 활성화", - "响应阈值": "응답 임계값", - "响度因子": "소리 크기 인자", - "处理数据": "데이터 처리", - "导出Onnx模型": "Onnx 모델 내보내기", - "导出文件格式": "파일 형식 내보내기", - "常见问题解答": "자주 묻는 질문 답변", - "常规设置": "일반 설정", - "开始音频转换": "오디오 변환 시작", - "很遗憾您这没有能用的显卡来支持您训练": "유감스럽게도 훈련을 지원할 수 있는 그래픽 카드가 없습니다", - "性能设置": "성능 설정", - "总训练轮数total_epoch": "총 훈련 회차 total_epoch", - "批量推理": "대량 추론", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "대량 변환, 변환할 오디오 폴더 입력, 또는 여러 오디오 파일 업로드, 지정된 폴더(기본값 opt)에 변환된 오디오 출력.", - "指定输出主人声文件夹": "주인공 목소리 출력 폴더 지정", - "指定输出文件夹": "출력 파일 폴더 지정", - "指定输出非主人声文件夹": "비주인공 목소리 출력 폴더 지정", - "推理时间(ms):": "추론 시간(ms):", - "推理音色": "추론 음색", - "提取": "추출", - "提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수", - "是": "예", - "是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 가장 최신의 ckpt 파일만 저장할지 여부", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "매 저장 시점마다 최종 작은 모델을 weights 폴더에 저장할지 여부", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 VRAM에 캐시할지 여부. 10분 미만의 작은 데이터는 훈련 속도를 높이기 위해 캐시할 수 있으나, 큰 데이터는 VRAM을 초과하여 큰 속도 향상을 기대할 수 없음.", - "显卡信息": "그래픽 카드 정보", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "이 소프트웨어는 MIT 라이선스로 오픈 소스이며, 작성자는 소프트웨어에 대한 어떠한 제어도 가지지 않으며, 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 사용자는 모든 책임을 져야 함.
이 조항을 인정하지 않는 경우, 소프트웨어 패키지 내의 어떠한 코드나 파일도 사용하거나 인용할 수 없음. 자세한 내용은 루트 디렉토리의 LICENSE를 참조.", - "查看": "보기", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 보기(오직 weights 폴더에서 추출된 작은 모델 파일만 지원)", - "检索特征占比": "특징 검색 비율", - "模型": "모델", - "模型推理": "모델 추론", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더 아래 대용량 모델 경로 입력), 중간에 훈련을 중단하고 싶은 경우나 작은 파일 모델을 자동으로 저장하지 않은 경우, 또는 중간 모델을 테스트하고 싶은 경우에 적합", - "模型是否带音高指导": "모델이 음높이 지도를 포함하는지 여부", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델이 음높이 지도를 포함하는지 여부(노래에는 필수, 말하기에는 선택적)", - "模型是否带音高指导,1是0否": "모델이 음높이 지도를 포함하는지 여부, 1은 '예', 0은 '아니오'", - "模型版本型号": "모델 버전 및 모델", - "模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능", - "模型路径": "모델 경로", - "每张显卡的batch_size": "각 GPU의 batch_size", - "淡入淡出长度": "페이드 인/아웃 길이", - "版本": "버전", - "特征提取": "특징 추출", - "特征检索库文件路径,为空则使用下拉的选择结果": "특징 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성에서 여성으로 전환 시 +12키 추천, 여성에서 남성으로 전환 시 -12키 추천, 음역대 폭발로 음색 왜곡이 발생할 경우 적절한 음역대로 조정 가능.", - "目标采样率": "목표 샘플링 비율", - "算法延迟(ms):": "알고리즘 지연(ms):", - "自动检测index路径,下拉式选择(dropdown)": "index 경로 자동 감지, 드롭다운 선택", - "融合": "통합", - "要改的模型信息": "수정할 모델 정보", - "要置入的模型信息": "삽입할 모델 정보", - "训练": "훈련", - "训练模型": "모델 훈련", - "训练特征索引": "특징 인덱스 훈련", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련이 완료되었습니다. 콘솔 훈련 로그나 실험 폴더 내의 train.log를 확인하세요.", - "请指定说话人id": "화자 id를 지정해주세요.", - "请选择index文件": "index 파일을 선택해주세요.", - "请选择pth文件": "pth 파일을 선택해주세요.", - "请选择说话人id": "화자 id를 선택해주세요.", - "转换": "변환", - "输入实验名": "실험명을 입력하세요.", - "输入待处理音频文件夹路径": "처리할 오디오 파일 폴더 경로를 입력하세요.", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리할 오디오 파일 폴더 경로를 입력하세요(파일 관리자의 주소 표시줄에서 복사하세요).", - "输入待处理音频文件路径(默认是正确格式示例)": "처리할 오디오 파일 경로를 입력하세요(기본값은 올바른 형식의 예시입니다).", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "원본 볼륨 엔벨로프와 출력 볼륨 엔벨로프의 혼합 비율을 입력하세요. 1에 가까울수록 출력 엔벨로프를 더 많이 사용합니다.", - "输入监听": "모니터링 입력", - "输入训练文件夹路径": "학습시킬 파일 폴더의 경로를 입력하세요.", - "输入设备": "입력 장치", - "输入降噪": "입력 노이즈 감소", - "输出信息": "출력 정보", - "输出变声": "음성 변환 출력", - "输出设备": "출력 장치", - "输出降噪": "출력 노이즈 감소", - "输出音频(右下角三个点,点了可以下载)": "오디오 출력(오른쪽 하단 세 개의 점, 클릭하면 다운로드 가능)", - "选择.index文件": ".index 파일 선택", - "选择.pth文件": ".pth 파일 선택", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음고 추출 알고리즘을 선택하세요. 노래 입력 시 pm으로 속도를 높일 수 있으며, harvest는 저음이 좋지만 매우 느리고, crepe는 효과가 좋지만 GPU를 많이 사용합니다.", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음고 추출 알고리즘을 선택하세요. 노래 입력 시 pm으로 속도를 높일 수 있고, harvest는 저음이 좋지만 매우 느리며, crepe는 효과가 좋지만 GPU를 많이 사용하고, rmvpe는 가장 좋은 효과를 내면서 GPU를 적게 사용합니다.", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음고 추출 알고리즘 선택: 노래 입력 시 pm으로 속도를 높일 수 있으며, 고품질 음성이지만 CPU가 낮을 때는 dio로 속도를 높일 수 있고, harvest는 품질이 더 좋지만 느리며, rmvpe는 최고의 효과를 내면서 CPU/GPU를 적게 사용합니다.", - "采样率:": "샘플링 레이트:", - "采样长度": "샘플링 길이", - "重载设备列表": "장치 목록 리로드", - "音调设置": "음조 설정", - "音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버를 사용해주세요)", - "音高算法": "음고 알고리즘", - "额外推理时长": "추가적인 추론 시간" -} + "很遗憾您这没有能用的显卡来支持您训练": "아쉽게도 훈련을 지원할 수 있는 사용 가능한 그래픽 카드가 없습니다", + "UVR5已开启": "UVR5가 활성화되었습니다", + "UVR5已关闭": "UVR5가 비활성화되었습니다", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다.
이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 LICENSE를 참조하십시오.", + "0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구", + "是否开启UVR5-WebUI": "UVR5-WebUI를 열까요?", + "UVR5进程输出信息": "UVR5 프로세스 출력 정보", + "0b-语音切分工具": "0b-음성 분리 도구", + "音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능", + "切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리", + "threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "최소 길이: 각 세그먼트의 최소 길이. 첫 번째 세그먼트가 너무 짧으면 계속해서 뒷부분과 연결하여 이 값 이상이 될 때까지", + "min_interval:最短切割间隔": "최소 분리 간격", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop 크기: 볼륨 곡선을 계산하는 방법. 작을수록 정확도가 높아지지만 계산량이 높아집니다 (정확도가 높다고 효과가 좋아지지 않음)", + "max_sil_kept:切完后静音最多留多长": "최대 유지되는 정적 길이 (분리 후)", + "开启语音切割": "음성 분리 활성화", + "终止语音切割": "음성 분리 종료", + "max:归一化后最大值多少": "최대 값 (정규화 후)", + "alpha_mix:混多少比例归一化后音频进来": "알파 믹스: 정규화된 오디오가 들어오는 비율", + "切割使用的进程数": "사용되는 프로세스 수로 자르기", + "语音切割进程输出信息": "음성 분리 프로세스 출력 정보", + "0c-中文批量离线ASR工具": "0c-중국어 대량 오프라인 ASR 도구", + "开启离线批量ASR": "오프라인 대량 ASR 활성화", + "终止ASR进程": "ASR 프로세스 종료", + "批量ASR(中文only)输入文件夹路径": "대량 ASR (중국어 전용) 입력 폴더 경로", + "ASR进程输出信息": "ASR 프로세스 출력 정보", + "0d-语音文本校对标注工具": "0d-음성 텍스트 교정 주석 도구", + "是否开启打标WebUI": "웹 기반 주석 활성화 여부", + "打标数据标注文件路径": "주석 데이터 주석 파일 경로", + "打标工具进程输出信息": "주석 도구 프로세스 출력 정보", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*실험/모델 이름", + "显卡信息": "그래픽 카드 정보", + "预训练的SoVITS-G模型路径": "사전 훈련된 SoVITS-G 모델 경로", + "预训练的SoVITS-D模型路径": "사전 훈련된 SoVITS-D 모델 경로", + "预训练的GPT模型路径": "사전 훈련된 GPT 모델 경로", + "1A-训练集格式化工具": "1A-훈련 세트 형식 지정 도구", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/실험 이름 디렉터리에는 23456으로 시작하는 파일과 폴더가 있어야 함", + "*文本标注文件": "*텍스트 주석 파일", + "*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터리", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "훈련 세트 오디오 파일 디렉터리 - 목록 파일에 해당하는 원형 이름 연결", + "1Aa-文本内容": "1Aa-텍스트 내용", + "GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함", + "预训练的中文BERT模型路径": "사전 훈련된 중국어 BERT 모델 경로", + "开启文本获取": "텍스트 추출 활성화", + "终止文本获取进程": "텍스트 추출 프로세스 종료", + "文本进程输出信息": "텍스트 프로세스 출력 정보", + "1Ab-SSL自监督特征提取": "1Ab-SSL 자기 지도 특징 추출", + "预训练的SSL模型路径": "사전 훈련된 SSL 모델 경로", + "开启SSL提取": "SSL 추출 활성화", + "终止SSL提取进程": "SSL 추출 프로세스 종료", + "SSL进程输出信息": "SSL 프로세스 출력 정보", + "1Ac-语义token提取": "1Ac-의미 토큰 추출", + "开启语义token提取": "의미 토큰 추출 활성화", + "终止语义token提取进程": "의미 토큰 추출 프로세스 종료", + "语义token提取进程输出信息": "의미 토큰 추출 프로세스 출력 정보", + "1Aabc-训练集格式化一键三连": "1Aabc-훈련 세트 형식 지정 일괄 처리", + "开启一键三连": "일괄 처리 활성화", + "终止一键三连": "일괄 처리 종료", + "一键三连进程输出信息": "일괄 처리 프로세스 출력 정보", + "1B-微调训练": "1B-미세 조정 훈련", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS 훈련. 공유 용 모델 파일은 SoVITS_weights 하위에 출력됩니다.", + "每张显卡的batch_size": "각 그래픽 카드의 배치 크기", + "总训练轮数total_epoch,不建议太高": "총 훈련 라운드 수 (total_epoch), 너무 높지 않게 권장됨", + "文本模块学习率权重": "텍스트 모듈 학습률 가중치", + "保存频率save_every_epoch": "저장 빈도 (각 라운드마다)", + "是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 ckpt 파일만 저장할지 여부", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부", + "开启SoVITS训练": "SoVITS 훈련 활성화", + "终止SoVITS训练": "SoVITS 훈련 종료", + "SoVITS训练进程输出信息": "SoVITS 훈련 프로세스 출력 정보", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT 훈련. 공유 용 모델 파일은 GPT_weights 하위에 출력됩니다.", + "总训练轮数total_epoch": "총 훈련 라운드 수 (total_epoch)", + "开启GPT训练": "GPT 훈련 활성화", + "终止GPT训练": "GPT 훈련 종료", + "GPT训练进程输出信息": "GPT 훈련 프로세스 출력 정보", + "1C-推理": "1C-추론", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weights 및 GPT_weights에 저장된 훈련 완료된 모델 중 선택. 기본적으로 하나는 기본 모델이며 5초 Zero Shot TTS를 체험할 수 있습니다.", + "*GPT模型列表": "*GPT 모델 목록", + "*SoVITS模型列表": "*SoVITS 모델 목록", + "GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능", + "刷新模型路径": "모델 경로 새로 고침", + "是否开启TTS推理WebUI": "TTS 추론 WebUI 활성화 여부", + "TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환", + "施工中,请静候佳音": "공사 중입니다. 기다려주십시오.", + "TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다", + "TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다", + "打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다", + "打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다", + "*请上传并填写参考信息": "*참고 정보를 업로드하고 입력하십시오", + "*请填写需要合成的目标文本": "*합성할 대상 텍스트를 입력하십시오", + "ASR任务开启:%s": "ASR 작업 시작: %s", + "GPT训练完成": "GPT 훈련 완료", + "GPT训练开始:%s": "GPT 훈련 시작: %s", + "SSL提取进程执行中": "SSL 추출 프로세스 실행 중", + "SSL提取进程结束": "SSL 추출 프로세스 종료", + "SoVITS训练完成": "SoVITS 훈련 완료", + "SoVITS训练开始:%s": "SoVITS 훈련 시작: %s", + "一键三连中途报错": "일괄 처리 중 오류 발생", + "一键三连进程结束": "일괄 처리 프로세스 종료", + "中文": "중국어", + "凑50字一切": "50자를 채우십시오", + "凑五句一切": "다섯 문장을 채우십시오", + "切分后文本": "분리된 텍스트", + "切割执行中": "분리 진행 중", + "切割结束": "분리 종료", + "参考音频的文本": "참고 오디오의 텍스트", + "参考音频的语种": "참고 오디오의 언어", + "合成语音": "합성 음성", + "后续将支持混合语种编码文本输入。": "향후 혼합 언어 코딩 텍스트 입력을 지원할 예정입니다.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "이미 진행 중인 ASR 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "이미 진행 중인 GPT 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "이미 진행 중인 SSL 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "이미 진행 중인 SoVITS 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "이미 진행 중인 일괄 처리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "이미 진행 중인 분리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "이미 진행 중인 텍스트 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "이미 진행 중인 의미 토큰 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已终止ASR进程": "ASR 프로세스 종료됨", + "已终止GPT训练": "GPT 훈련 종료됨", + "已终止SoVITS训练": "SoVITS 훈련 종료됨", + "已终止所有1a进程": "모든 1a 프로세스 종료됨", + "已终止所有1b进程": "모든 1b 프로세스 종료됨", + "已终止所有一键三连进程": "모든 일괄 처리 프로세스 종료됨", + "已终止所有切割进程": "모든 분리 프로세스 종료됨", + "已终止所有语义token进程": "모든 의미 토큰 프로세스 종료됨", + "按中文句号。切": "중국어 문장으로 분리하십시오.", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "텍스트 분리 도구. 너무 긴 텍스트는 합성 결과가 항상 좋지 않을 수 있으므로 너무 길면 먼저 분리하는 것이 좋습니다. 합성은 텍스트 줄 바꿈을 기준으로 분리되어 다시 조합됩니다.", + "文本进程执行中": "텍스트 프로세스 실행 중", + "文本进程结束": "텍스트 프로세스 종료", + "日文": "일본어", + "英文": "영어", + "语义token提取进程执行中": "의미 토큰 추출 프로세스 실행 중", + "语义token提取进程结束": "의미 토큰 추출 프로세스 종료", + "请上传参考音频": "참고 오디오를 업로드하십시오", + "输入路径不存在": "입력 경로가 존재하지 않습니다", + "输入路径存在但既不是文件也不是文件夹": "입력 경로가 파일이나 폴더가 아닙니다", + "输出的语音": "출력 음성", + "进度:1a-done": "진행: 1a-done", + "进度:1a-done, 1b-ing": "진행: 1a-done, 1b-ing", + "进度:1a-ing": "진행: 1a-ing", + "进度:1a1b-done": "진행: 1a1b-done", + "进度:1a1b-done, 1cing": "진행: 1a1b-done, 1cing", + "进度:all-done": "진행: all-done", + "需要合成的切分前文本": "합성해야 할 분할 전 텍스트", + "需要合成的文本": "합성해야 할 텍스트", + "需要合成的语种": "합성해야 할 언어", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3이면 harvest 음고 인식 결과에 중앙값 필터를 사용하며, 값은 필터 반경이며 사용하면 소리를 약하게 할 수 있습니다", + "A模型权重": "A 모델 가중치", + "A模型路径": "A 모델 경로", + "B模型路径": "B 모델 경로", + "E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오 + 주석\\Miyuki Kenshi\\src", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음고, 기본 F0 및 음조 대신 사용", + "Index Rate": "인덱스 비율", + "Onnx导出": "Onnx 내보내기", + "Onnx输出路径": "Onnx 출력 경로", + "RVC模型路径": "RVC 모델 경로", + "ckpt处理": "ckpt 처리", + "harvest进程数": "harvest 프로세스 수", + "index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다", + "pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: 각 입력에 사용되는 다른 프로세스 카드를 -로 구분하여 입력하십시오. 예: 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행합니다", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 구성 입력. 실험 데이터는 logs 하위에 있으며 각 실험에 대한 폴더가 있어야합니다. 실험 이름 경로를 수동으로 입력해야하며 실험 구성, 로그, 훈련된 모델 파일이 포함되어 있습니다.", + "step1:正在处理数据": "step1: 데이터 처리 중", + "step2:正在提取音高&正在提取特征": "step2: 음고 추출 및 특징 추출 중", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 자동으로 훈련 폴더에서 오디오로 디코딩할 수 있는 모든 파일을 반복하고 슬라이스 정규화를 수행하여 실험 디렉토리에 2 개의 wav 폴더를 생성합니다. 현재 단일 훈련만 지원됩니다.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU로 음고 추출(모델이 음고를 지원하는 경우), GPU로 특징 추출(카드 번호 선택)", + "step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정 입력, 모델 및 인덱스 훈련 시작", + "step3a:正在训练模型": "step3a: 모델 훈련 중", + "一键训练": "일괄 훈련", + "也可批量输入音频文件, 二选一, 优先读文件夹": "오디오 파일을 일괄로 입력할 수도 있습니다. 둘 중 하나를 선택하고 폴더를 읽기를 우선합니다.", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력에 사용되는 카드 번호를 지정하십시오. 예 : 0-1-2는 카드 0, 1 및 2를 사용합니다", + "伴奏人声分离&去混响&去回声": "반주 및 보컬 분리 & 리버브 제거 & 에코 제거", + "使用模型采样率": "모델 샘플링 속도 사용", + "使用设备采样率": "기기 샘플링 속도 사용", + "保存名": "저장 이름", + "保存的文件名, 默认空为和源文件同名": "저장할 파일 이름, 기본적으로 공백은 원본 파일과 동일한 이름입니다", + "保存的模型名不带后缀": "저장할 모델 이름에는 확장자가 없습니다", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "클리어 자음 및 숨소를 보호하여 전자 음향 찢김과 같은 아티팩트를 방지하려면 0.5로 설정하되, 보호 강도를 높이려면 0.5로 당기지 않고 낮추면 인덱스 효과가 감소할 수 있습니다", + "修改": "수정", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정 (weights 폴더에서 추출된 작은 모델 파일만 지원됨)", + "停止音频转换": "오디오 변환 중지", + "全流程结束!": "전체 프로세스 완료!", + "刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로 고침", + "加载模型": "모델 로드", + "加载预训练底模D路径": "사전 훈련된 기본 모델 D 경로 로드", + "加载预训练底模G路径": "사전 훈련된 기본 모델 G 경로 로드", + "单次推理": "단일 추론", + "卸载音色省显存": "음색 언로드 및 GPU 메모리 절약", + "变调(整数, 半音数量, 升八度12降八度-12)": "음높이 변경(정수, 반음 수, 올림 높이 12 내림 높이 -12)", + "后处理重采样至最终采样率,0为不进行重采样": "후 처리를 통한 최종 샘플링률 재샘플링, 0은 재샘플링 미실행", + "否": "아니오", + "启用相位声码器": "페이즈 보코더 사용", + "响应阈值": "응답 임계값", + "响度因子": "음량 요소", + "处理数据": "데이터 처리", + "导出Onnx模型": "Onnx 모델 내보내기", + "导出文件格式": "내보내기 파일 형식", + "常见问题解答": "자주 묻는 질문 해결", + "常规设置": "일반 설정", + "开始音频转换": "오디오 변환 시작", + "性能设置": "성능 설정", + "批量推理": "일괄 추론", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "일괄 변환, 변환 대기 중인 오디오 폴더를 입력하거나 여러 오디오 파일을 업로드하고 지정된 폴더(opt 기본값)에 변환된 오디오를 출력합니다.", + "指定输出主人声文件夹": "지정된 주인 목소리 출력 폴더", + "指定输出文件夹": "지정된 출력 폴더", + "指定输出非主人声文件夹": "지정된 비주인 목소리 출력 폴더", + "推理时间(ms):": "추론 시간(ms):", + "推理音色": "추론 음색", + "提取": "추출", + "提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수 추출", + "是": "예", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 GPU 메모리에 캐시할지 여부. 10분 미만의 소량 데이터는 훈련 속도를 높이기 위해 캐시할 수 있지만, 대량 데이터를 캐시하면 메모리가 터지고 속도가 크게 향상되지 않을 수 있습니다.", + "查看": "보기", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보보기(작은 모델 파일로 추출된 weights 폴더에서만 지원)", + "检索特征占比": "특징 비율 검색", + "模型": "모델", + "模型推理": "모델 추론", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더에 대형 파일 모델 경로 입력), 반 훈련하고 싶지 않거나 모델이 자동으로 작은 파일 모델로 추출되지 않았거나 중간 모델을 테스트하려는 경우에 사용", + "模型是否带音高指导": "모델에 음높이 안내가 있는지 여부", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델에 음높이 안내가 있는지 여부(노래에는 필수, 음성은 선택 사항)", + "模型是否带音高指导,1是0否": "모델에 음높이 안내가 있는지 여부, 1이면 있음 0이면 없음", + "模型版本型号": "모델 버전 및 모델 번호", + "模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능", + "模型路径": "모델 경로", + "淡入淡出长度": "페이드 인/아웃 길이", + "版本": "버전", + "特征提取": "특성 추출", + "特征检索库文件路径,为空则使用下拉的选择结果": "특성 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성을 여성으로 추천 +12키, 여성을 남성으로 추천 -12키, 음역 폭발로 음색이 왜곡되면 적절한 음역으로 직접 조절 가능", + "目标采样率": "목표 샘플링률", + "算法延迟(ms):": "알고리즘 지연 시간(ms):", + "自动检测index路径,下拉式选择(dropdown)": "자동으로 index 경로 감지, 드롭다운 선택", + "融合": "융합", + "要改的模型信息": "수정할 모델 정보", + "要置入的模型信息": "삽입할 모델 정보", + "训练": "훈련", + "训练模型": "모델 훈련", + "训练特征索引": "특성 인덱스 훈련", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련 종료, 콘솔 훈련 로그 또는 실험 폴더의 train.log를 확인할 수 있습니다", + "请指定说话人id": "화자 ID 지정", + "请选择index文件": "index 파일 선택", + "请选择pth文件": "pth 파일 선택", + "请选择说话人id": "화자 ID 선택", + "转换": "변환", + "输入实验名": "실험명 입력", + "输入待处理音频文件夹路径": "처리 대기 중인 오디오 폴더 경로 입력", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리 대기 중인 오디오 폴더 경로 입력(파일 관리자 주소 표시 줄에서 복사하면 됨)", + "输入待处理音频文件路径(默认是正确格式示例)": "처리 대기 중인 오디오 파일 경로 입력(기본적으로 올바른 형식의 예제)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "소스 음량 에너벌롭을 입력하여 출력 음량 에너벌롭 합성 비율을 대체하면 1에 가까울수록 출력 에너벌롭 사용", + "输入监听": "입력 모니터링", + "输入训练文件夹路径": "훈련 폴더 경로 입력", + "输入设备": "입력 장치", + "输入降噪": "노이즈 감소 입력", + "输出信息": "출력 정보", + "输出变声": "음성 출력", + "输出设备": "출력 장치", + "输出降噪": "노이즈 감소 출력", + "输出音频(右下角三个点,点了可以下载)": "출력 오디오(우하단 세 점, 클릭하면 다운로드 가능)", + "选择.index文件": "index 파일 선택", + "选择.pth文件": "pth 파일 선택", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용, rmvpe 효과가 가장 좋으며 약간의 GPU 사용", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음높이 추출 알고리즘 선택: 노래 입력에 pm 사용 가능, 고품질 음성이지만 CPU가 낮음, dio 사용 가능, harvest 품질이 더 좋지만 느림, rmvpe 효과가 최고이며 CPU/GPU 약간 사용", + "采样率:": "샘플링률:", + "采样长度": "샘플링 길이", + "重载设备列表": "장치 목록 다시로드", + "音调设置": "음조 설정", + "音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버 사용 권장)", + "音高算法": "음높이 알고리즘", + "额外推理时长": "추가 추론 시간" + } From c24687f620b959972fdd47580b9c0fb14a04cb9e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:16:20 +0800 Subject: [PATCH 09/25] Update requirements.txt --- requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index a8e72ea9..1bafeefe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,20 +4,21 @@ tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning -gradio==3.14.0 +gradio==3.38.0 ffmpeg-python onnxruntime tqdm -funasr +funasr==0.8.7 cn2an pypinyin pyopenjtalk g2p_en torchaudio -modelscope +modelscope==1.10.0 sentencepiece transformers chardet PyYAML psutil jieba_fast +jieba From 249561e5a18576010df6587c274d38cbd9e18b4b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:30:39 +0800 Subject: [PATCH 10/25] Add files via upload --- GPT_SoVITS/inference_webui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index fd04ac8f..98dab284 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -1,4 +1,9 @@ -import os,re +import os,re,logging +logging.getLogger("markdown_it").setLevel(logging.ERROR) +logging.getLogger("urllib3").setLevel(logging.ERROR) +logging.getLogger("httpcore").setLevel(logging.ERROR) +logging.getLogger("httpx").setLevel(logging.ERROR) +logging.getLogger("asyncio").setLevel(logging.ERROR) import pdb gpt_path = os.environ.get( From 80c9acc43b9f287c363c9992e7fc5262cd3994c6 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:51:36 +0800 Subject: [PATCH 11/25] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4bf65815..4a89637c 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb +AutoDL Cloud Docker Training (for users in China region): https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official + ## Features: 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion. From 2bdbfbccec3a807b6d08c922a8fb11a92f07fc95 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:52:20 +0800 Subject: [PATCH 12/25] Update README.md --- docs/cn/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/cn/README.md b/docs/cn/README.md index 072dc0d0..2c63814e 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -23,6 +23,8 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb +中国地区用户可使用AutoDL云端镜像进行体验:https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official + ## 功能: 1. **零样本文本到语音(TTS):** 输入5秒的声音样本,即刻体验文本到语音转换。 From 658d0dad6376973b5176e89a2fc2016802ff4fd5 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:53:28 +0800 Subject: [PATCH 13/25] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a89637c..83538a95 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb -AutoDL Cloud Docker Training (for users in China region): https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official +For users in China region, you can use AutoDL Cloud Docker to experience the full functionality online: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official ## Features: 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion. From 02da15c996dca916c3ff29327ef5ac9a466b92dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:30:08 +0800 Subject: [PATCH 14/25] Add Onnx Export --- GPT_SoVITS/onnx_export.py | 314 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 GPT_SoVITS/onnx_export.py diff --git a/GPT_SoVITS/onnx_export.py b/GPT_SoVITS/onnx_export.py new file mode 100644 index 00000000..f08679f9 --- /dev/null +++ b/GPT_SoVITS/onnx_export.py @@ -0,0 +1,314 @@ +from module.models_onnx import SynthesizerTrn, symbols +from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule +import torch +import torchaudio +from torch import nn +from feature_extractor import cnhubert +cnhubert_base_path = "pretrained_models/chinese-hubert-base" +cnhubert.cnhubert_base_path=cnhubert_base_path +ssl_model = cnhubert.get_model() +from text import cleaned_text_to_sequence +import soundfile +from my_utils import load_audio +import os +import json + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + hann_window = torch.hann_window(win_size).to( + dtype=y.dtype, device=y.device + ) + y = torch.nn.functional.pad( + y.unsqueeze(1), + (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode="reflect", + ) + y = y.squeeze(1) + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window, + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=False, + ) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +class DictToAttrRecursive(dict): + def __init__(self, input_dict): + super().__init__(input_dict) + for key, value in input_dict.items(): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + self[key] = value + setattr(self, key, value) + + def __getattr__(self, item): + try: + return self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + def __setattr__(self, key, value): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + super(DictToAttrRecursive, self).__setitem__(key, value) + super().__setattr__(key, value) + + def __delattr__(self, item): + try: + del self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + +class T2SEncoder(nn.Module): + def __init__(self, t2s, vits): + super().__init__() + self.encoder = t2s.onnx_encoder + self.vits = vits + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): + codes = self.vits.extract_latent(ssl_content) + prompt_semantic = codes[0, 0] + bert = torch.cat([ref_bert.transpose(0, 1), text_bert.transpose(0, 1)], 1) + all_phoneme_ids = torch.cat([ref_seq, text_seq], 1) + bert = bert.unsqueeze(0) + prompt = prompt_semantic.unsqueeze(0) + return self.encoder(all_phoneme_ids, bert), prompt + + +class T2SModel(nn.Module): + def __init__(self, t2s_path, vits_model): + super().__init__() + dict_s1 = torch.load(t2s_path, map_location="cpu") + self.config = dict_s1["config"] + self.t2s_model = Text2SemanticLightningModule(self.config, "ojbk", is_train=False) + self.t2s_model.load_state_dict(dict_s1["weight"]) + self.t2s_model.eval() + self.vits_model = vits_model.vq_model + self.hz = 50 + self.max_sec = self.config["data"]["max_sec"] + self.t2s_model.model.top_k = torch.LongTensor([self.config["inference"]["top_k"]]) + self.t2s_model.model.early_stop_num = torch.LongTensor([self.hz * self.max_sec]) + self.t2s_model = self.t2s_model.model + self.t2s_model.init_onnx() + self.onnx_encoder = T2SEncoder(self.t2s_model, self.vits_model) + self.first_stage_decoder = self.t2s_model.first_stage_decoder + self.stage_decoder = self.t2s_model.stage_decoder + #self.t2s_model = torch.jit.script(self.t2s_model) + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): + early_stop_num = self.t2s_model.early_stop_num + + #[1,N] [1,N] [N, 1024] [N, 1024] [1, 768, N] + x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + + prefix_len = prompts.shape[1] + + #[1,N,512] [1,N] + y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) + + stop = False + for idx in range(1, 1500): + #[1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N] + enco = self.stage_decoder(y, k, v, y_emb, x_example) + y, k, v, y_emb, logits, samples = enco + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.t2s_model.EOS or samples[0, 0] == self.t2s_model.EOS: + stop = True + if stop: + break + y[0, -1] = 0 + + return y[:, -idx:].unsqueeze(0) + + def export(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name, dynamo=False): + #self.onnx_encoder = torch.jit.script(self.onnx_encoder) + if dynamo: + export_options = torch.onnx.ExportOptions(dynamic_shapes=True) + onnx_encoder_export_output = torch.onnx.dynamo_export( + self.onnx_encoder, + (ref_seq, text_seq, ref_bert, text_bert, ssl_content), + export_options=export_options + ) + onnx_encoder_export_output.save(f"onnx/{project_name}/{project_name}_t2s_encoder.onnx") + return + torch.onnx.export( + self.onnx_encoder, + (ref_seq, text_seq, ref_bert, text_bert, ssl_content), + f"onnx/{project_name}/{project_name}_t2s_encoder.onnx", + input_names=["ref_seq", "text_seq", "ref_bert", "text_bert", "ssl_content"], + output_names=["x", "prompts"], + dynamic_axes={ + "ref_seq": [1], + "text_seq": [1], + "ref_bert": [0], + "text_bert": [0], + "ssl_content": [2], + }, + opset_version=16 + ) + x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + torch.exp + torch.onnx.export( + self.first_stage_decoder, + (x, prompts), + f"onnx/{project_name}/{project_name}_t2s_fsdec.onnx", + input_names=["x", "prompts"], + output_names=["y", "k", "v", "y_emb", "x_example"], + dynamic_axes={ + "x": [1], + "prompts": [1], + }, + verbose=True, + opset_version=16 + ) + y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) + + torch.onnx.export( + self.stage_decoder, + (y, k, v, y_emb, x_example), + f"onnx/{project_name}/{project_name}_t2s_sdec.onnx", + input_names=["iy", "ik", "iv", "iy_emb", "ix_example"], + output_names=["y", "k", "v", "y_emb", "logits", "samples"], + dynamic_axes={ + "iy": [1], + "ik": [1], + "iv": [1], + "iy_emb": [1], + "ix_example": [1], + }, + verbose=True, + opset_version=16 + ) + + +class VitsModel(nn.Module): + def __init__(self, vits_path): + super().__init__() + dict_s2 = torch.load(vits_path,map_location="cpu") + self.hps = dict_s2["config"] + self.hps = DictToAttrRecursive(self.hps) + self.hps.model.semantic_frame_rate = "25hz" + self.vq_model = SynthesizerTrn( + self.hps.data.filter_length // 2 + 1, + self.hps.train.segment_size // self.hps.data.hop_length, + n_speakers=self.hps.data.n_speakers, + **self.hps.model + ) + self.vq_model.eval() + self.vq_model.load_state_dict(dict_s2["weight"], strict=False) + + def forward(self, text_seq, pred_semantic, ref_audio): + refer = spectrogram_torch( + ref_audio, + self.hps.data.filter_length, + self.hps.data.sampling_rate, + self.hps.data.hop_length, + self.hps.data.win_length, + center=False + ) + return self.vq_model(pred_semantic, text_seq, refer)[0, 0] + + +class GptSoVits(nn.Module): + def __init__(self, vits, t2s): + super().__init__() + self.vits = vits + self.t2s = t2s + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content): + pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + return self.vits(text_seq, pred_semantic, ref_audio) + + def export(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, project_name): + self.t2s.export(ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name) + pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + torch.onnx.export( + self.vits, + (text_seq, pred_semantic, ref_audio), + f"onnx/{project_name}/{project_name}_vits.onnx", + input_names=["text_seq", "pred_semantic", "ref_audio"], + output_names=["audio"], + dynamic_axes={ + "text_seq": [1], + "pred_semantic": [2], + "ref_audio": [1], + }, + opset_version=17 + ) + + +class SSLModel(nn.Module): + def __init__(self): + super().__init__() + self.ssl = ssl_model + + def forward(self, ref_audio_16k): + return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2) + + +def export(vits_path, gpt_path, project_name): + vits = VitsModel(vits_path) + gpt = T2SModel(gpt_path, vits) + gpt_sovits = GptSoVits(vits, gpt) + ssl = SSLModel() + ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])]) + text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])]) + ref_bert = torch.randn((ref_seq.shape[1], 1024)).float() + text_bert = torch.randn((text_seq.shape[1], 1024)).float() + ref_audio = torch.randn((1, 48000 * 5)).float() + # ref_audio = torch.tensor([load_audio("rec.wav", 48000)]).float() + ref_audio_16k = torchaudio.functional.resample(ref_audio,48000,16000).float() + ref_audio_sr = torchaudio.functional.resample(ref_audio,48000,vits.hps.data.sampling_rate).float() + + try: + os.mkdir(f"onnx/{project_name}") + except: + pass + + ssl_content = ssl(ref_audio_16k).float() + + a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy() + + # soundfile.write("out.wav", a, vits.hps.data.sampling_rate) + + gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name) + + MoeVSConf = { + "Folder" : f"{project_name}", + "Name" : f"{project_name}", + "Type" : "GPT-SoVits", + "Rate" : vits.hps.data.sampling_rate, + "NumLayers": gpt.t2s_model.num_layers, + "EmbeddingDim": gpt.t2s_model.embedding_dim, + "Dict": "BasicDict", + "BertPath": "chinese-roberta-wwm-ext-large", + "Symbol": symbols, + "AddBlank": False + } + + MoeVSConfJson = json.dumps(MoeVSConf) + with open(f"onnx/{project_name}.json", 'w') as MoeVsConfFile: + json.dump(MoeVSConf, MoeVsConfFile, indent = 4) + + +if __name__ == "__main__": + try: + os.mkdir("onnx") + except: + pass + + gpt_path = "pt_model/koharu-e20.ckpt" + vits_path = "pt_model/koharu_e20_s4960.pth" + exp_path = "koharu" + export(vits_path, gpt_path, exp_path) + + # soundfile.write("out.wav", a, vits.hps.data.sampling_rate) \ No newline at end of file From bd68358c3f675300f028fe602733456e397d7f3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:30:37 +0800 Subject: [PATCH 15/25] Add Vits Onnx Module --- GPT_SoVITS/module/attentions_onnx.py | 365 +++++++++++ GPT_SoVITS/module/models_onnx.py | 920 +++++++++++++++++++++++++++ 2 files changed, 1285 insertions(+) create mode 100644 GPT_SoVITS/module/attentions_onnx.py create mode 100644 GPT_SoVITS/module/models_onnx.py diff --git a/GPT_SoVITS/module/attentions_onnx.py b/GPT_SoVITS/module/attentions_onnx.py new file mode 100644 index 00000000..df0ae824 --- /dev/null +++ b/GPT_SoVITS/module/attentions_onnx.py @@ -0,0 +1,365 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +from module import commons +from module.modules import LayerNorm + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=4, + isflow=True, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + # if isflow: + # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) + # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) + # self.cond_layer = weight_norm(cond_layer, name='weight') + # self.gin_channels = 256 + self.cond_layer_idx = self.n_layers + if "gin_channels" in kwargs: + self.gin_channels = kwargs["gin_channels"] + if self.gin_channels != 0: + self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) + # vits2 says 3rd block, so idx is 2 by default + self.cond_layer_idx = ( + kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2 + ) + logging.debug(self.gin_channels, self.cond_layer_idx) + assert ( + self.cond_layer_idx < self.n_layers + ), "cond_layer_idx should be less than n_layers" + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size, + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, g=None): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + if i == self.cond_layer_idx and g is not None: + g = self.spk_emb_linear(g.transpose(1, 2)) + g = g.transpose(1, 2) + x = x + g + x = x * x_mask + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, _ = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), key_relative_embeddings + ) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s + ) + output = output + self._matmul_with_relative_values( + relative_weights, value_relative_embeddings + ) + output = ( + output.transpose(2, 3).contiguous().view(b, d, -1) + ) + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + ) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[ + :, slice_start_position:slice_end_position + ] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + ) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ + :, :, :length, length - 1 : + ] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad( + x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + ) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__( + self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0.0, + activation=None, + causal=False, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/GPT_SoVITS/module/models_onnx.py b/GPT_SoVITS/module/models_onnx.py new file mode 100644 index 00000000..35fd291f --- /dev/null +++ b/GPT_SoVITS/module/models_onnx.py @@ -0,0 +1,920 @@ +import copy +import math +import torch +from torch import nn +from torch.nn import functional as F + +from module import commons +from module import modules +from module import attentions_onnx as attentions + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from module.commons import init_weights, get_padding +from module.mrte_model import MRTE +from module.quantize import ResidualVectorQuantizer +from text import symbols +from torch.cuda.amp import autocast + + +class StochasticDurationPredictor(nn.Module): + def __init__( + self, + in_channels, + filter_channels, + kernel_size, + p_dropout, + n_flows=4, + gin_channels=0, + ): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) + ) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv( + filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout + ) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) + ) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv( + filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout + ) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = ( + torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) + * x_mask + ) + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum( + (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2] + ) + logq = ( + torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2]) + - logdet_tot_q + ) + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = ( + torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2]) + - logdet_tot + ) + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = ( + torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) + * noise_scale + ) + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__( + self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 + ): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d( + in_channels, filter_channels, kernel_size, padding=kernel_size // 2 + ) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d( + filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 + ) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + latent_channels=192, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.latent_channels = latent_channels + + self.ssl_proj = nn.Conv1d(768, hidden_channels, 1) + + self.encoder_ssl = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers // 2, + kernel_size, + p_dropout, + ) + + self.encoder_text = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.text_embedding = nn.Embedding(len(symbols), hidden_channels) + + self.mrte = MRTE() + + self.encoder2 = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers // 2, + kernel_size, + p_dropout, + ) + + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, y, text, ge): + y_mask = torch.ones_like(y[:1,:1,:]) + + y = self.ssl_proj(y * y_mask) * y_mask + y = self.encoder_ssl(y * y_mask, y_mask) + + text_mask = torch.ones_like(text).to(y.dtype).unsqueeze(0) + + text = self.text_embedding(text).transpose(1, 2) + text = self.encoder_text(text * text_mask, text_mask) + y = self.mrte(y, y_mask, text, text_mask, ge) + + y = self.encoder2(y * y_mask, y_mask) + + stats = self.proj(y) * y_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return y, m, logs, y_mask + + def extract_latent(self, x): + x = self.ssl_proj(x) + quantized, codes, commit_loss, quantized_list = self.quantizer(x) + return codes.transpose(0, 1) + + def decode_latent(self, codes, y_mask, refer, refer_mask, ge): + quantized = self.quantizer.decode(codes) + + y = self.vq_proj(quantized) * y_mask + y = self.encoder_ssl(y * y_mask, y_mask) + + y = self.mrte(y, y_mask, refer, refer_mask, ge) + + y = self.encoder2(y * y_mask, y_mask) + + stats = self.proj(y) * y_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return y, m, logs, y_mask, quantized + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + if g != None: + g = g.detach() + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class WNEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.norm = modules.LayerNorm(out_channels) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + out = self.proj(x) * x_mask + out = self.norm(out) + return out + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print("Removing weight norm...") + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class ReferenceEncoder(nn.Module): + """ + inputs --- [N, Ty/r, n_mels*r] mels + outputs --- [N, ref_enc_gru_size] + """ + + def __init__(self, spec_channels, gin_channels=0): + super().__init__() + self.spec_channels = spec_channels + ref_enc_filters = [32, 32, 64, 64, 128, 128] + K = len(ref_enc_filters) + filters = [1] + ref_enc_filters + convs = [ + weight_norm( + nn.Conv2d( + in_channels=filters[i], + out_channels=filters[i + 1], + kernel_size=(3, 3), + stride=(2, 2), + padding=(1, 1), + ) + ) + for i in range(K) + ] + self.convs = nn.ModuleList(convs) + # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) + + out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) + self.gru = nn.GRU( + input_size=ref_enc_filters[-1] * out_channels, + hidden_size=256 // 2, + batch_first=True, + ) + self.proj = nn.Linear(128, gin_channels) + + def forward(self, inputs): + N = inputs.size(0) + out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] + for conv in self.convs: + out = conv(out) + # out = wn(out) + out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] + + out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] + T = out.size(1) + N = out.size(0) + out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] + + self.gru.flatten_parameters() + memory, out = self.gru(out) # out --- [1, N, 128] + + return self.proj(out.squeeze(0)).unsqueeze(-1) + + def calculate_channels(self, L, kernel_size, stride, pad, n_convs): + for i in range(n_convs): + L = (L - kernel_size + 2 * pad) // stride + 1 + return L + + +class Quantizer_module(torch.nn.Module): + def __init__(self, n_e, e_dim): + super(Quantizer_module, self).__init__() + self.embedding = nn.Embedding(n_e, e_dim) + self.embedding.weight.data.uniform_(-1.0 / n_e, 1.0 / n_e) + + def forward(self, x): + d = ( + torch.sum(x**2, 1, keepdim=True) + + torch.sum(self.embedding.weight**2, 1) + - 2 * torch.matmul(x, self.embedding.weight.T) + ) + min_indicies = torch.argmin(d, 1) + z_q = self.embedding(min_indicies) + return z_q, min_indicies + + +class Quantizer(torch.nn.Module): + def __init__(self, embed_dim=512, n_code_groups=4, n_codes=160): + super(Quantizer, self).__init__() + assert embed_dim % n_code_groups == 0 + self.quantizer_modules = nn.ModuleList( + [ + Quantizer_module(n_codes, embed_dim // n_code_groups) + for _ in range(n_code_groups) + ] + ) + self.n_code_groups = n_code_groups + self.embed_dim = embed_dim + + def forward(self, xin): + # B, C, T + B, C, T = xin.shape + xin = xin.transpose(1, 2) + x = xin.reshape(-1, self.embed_dim) + x = torch.split(x, self.embed_dim // self.n_code_groups, dim=-1) + min_indicies = [] + z_q = [] + for _x, m in zip(x, self.quantizer_modules): + _z_q, _min_indicies = m(_x) + z_q.append(_z_q) + min_indicies.append(_min_indicies) # B * T, + z_q = torch.cat(z_q, -1).reshape(xin.shape) + loss = 0.25 * torch.mean((z_q.detach() - xin) ** 2) + torch.mean( + (z_q - xin.detach()) ** 2 + ) + z_q = xin + (z_q - xin).detach() + z_q = z_q.transpose(1, 2) + codes = torch.stack(min_indicies, -1).reshape(B, T, self.n_code_groups) + return z_q, loss, codes.transpose(1, 2) + + def embed(self, x): + # idx: N, 4, T + x = x.transpose(1, 2) + x = torch.split(x, 1, 2) + ret = [] + for q, embed in zip(x, self.quantizer_modules): + q = embed.embedding(q.squeeze(-1)) + ret.append(q) + ret = torch.cat(ret, -1) + return ret.transpose(1, 2) # N, C, T + + +class CodePredictor(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + n_q=8, + dims=1024, + ssl_dim=768, + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.vq_proj = nn.Conv1d(ssl_dim, hidden_channels, 1) + self.ref_enc = modules.MelStyleEncoder( + ssl_dim, style_vector_dim=hidden_channels + ) + + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + + self.out_proj = nn.Conv1d(hidden_channels, (n_q - 1) * dims, 1) + self.n_q = n_q + self.dims = dims + + def forward(self, x, x_mask, refer, codes, infer=False): + x = x.detach() + x = self.vq_proj(x * x_mask) * x_mask + g = self.ref_enc(refer, x_mask) + x = x + g + x = self.encoder(x * x_mask, x_mask) + x = self.out_proj(x * x_mask) * x_mask + logits = x.reshape(x.shape[0], self.n_q - 1, self.dims, x.shape[-1]).transpose( + 2, 3 + ) + target = codes[1:].transpose(0, 1) + if not infer: + logits = logits.reshape(-1, self.dims) + target = target.reshape(-1) + loss = torch.nn.functional.cross_entropy(logits, target) + return loss + else: + _, top10_preds = torch.topk(logits, 10, dim=-1) + correct_top10 = torch.any(top10_preds == target.unsqueeze(-1), dim=-1) + top3_acc = 100 * torch.mean(correct_top10.float()).detach().cpu().item() + + print("Top-10 Accuracy:", top3_acc, "%") + + pred_codes = torch.argmax(logits, dim=-1) + acc = 100 * torch.mean((pred_codes == target).float()).detach().cpu().item() + print("Top-1 Accuracy:", acc, "%") + + return pred_codes.transpose(0, 1) + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + semantic_frame_rate=None, + freeze_quantizer=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + self.enc_p = TextEncoder( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels + ) + + self.ref_enc = modules.MelStyleEncoder( + spec_channels, style_vector_dim=gin_channels + ) + + ssl_dim = 768 + self.ssl_dim = ssl_dim + assert semantic_frame_rate in ["25hz", "50hz"] + self.semantic_frame_rate = semantic_frame_rate + if semantic_frame_rate == "25hz": + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 2, stride=2) + else: + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1) + + self.quantizer = ResidualVectorQuantizer(dimension=ssl_dim, n_q=1, bins=1024) + if freeze_quantizer: + self.ssl_proj.requires_grad_(False) + self.quantizer.requires_grad_(False) + # self.enc_p.text_embedding.requires_grad_(False) + # self.enc_p.encoder_text.requires_grad_(False) + # self.enc_p.mrte.requires_grad_(False) + + def forward(self, codes, text, refer): + refer_mask = torch.ones_like(refer[:1,:1,:]) + ge = self.ref_enc(refer * refer_mask, refer_mask) + + y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device) + text_lengths = torch.LongTensor([text.size(-1)]).to(text.device) + + quantized = self.quantizer.decode(codes) + if self.semantic_frame_rate == "25hz": + dquantized = torch.cat([quantized, quantized]).permute(1, 2, 0) + quantized = dquantized.contiguous().view(1, self.ssl_dim, -1) + + x, m_p, logs_p, y_mask = self.enc_p( + quantized, text, ge + ) + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) + + z = self.flow(z_p, y_mask, g=ge, reverse=True) + + o = self.dec((z * y_mask)[:, :, :], g=ge) + return o + + def extract_latent(self, x): + ssl = self.ssl_proj(x) + quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) + return codes.transpose(0, 1) \ No newline at end of file From 7d1e94c8b05e102e1914fd59171cb2b908fd8d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:31:08 +0800 Subject: [PATCH 16/25] Add AR Onnx Module --- .../AR/models/t2s_lightning_module_onnx.py | 106 ++++++ GPT_SoVITS/AR/models/t2s_model_onnx.py | 337 ++++++++++++++++++ GPT_SoVITS/AR/modules/activation_onnx.py | 178 +++++++++ GPT_SoVITS/AR/modules/embedding_onnx.py | 63 ++++ .../AR/modules/patched_mha_with_cache_onnx.py | 92 +++++ GPT_SoVITS/AR/modules/transformer_onnx.py | 292 +++++++++++++++ 6 files changed, 1068 insertions(+) create mode 100644 GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py create mode 100644 GPT_SoVITS/AR/models/t2s_model_onnx.py create mode 100644 GPT_SoVITS/AR/modules/activation_onnx.py create mode 100644 GPT_SoVITS/AR/modules/embedding_onnx.py create mode 100644 GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py create mode 100644 GPT_SoVITS/AR/modules/transformer_onnx.py diff --git a/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py new file mode 100644 index 00000000..bb9e30b9 --- /dev/null +++ b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py @@ -0,0 +1,106 @@ +# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py +import os, sys + +now_dir = os.getcwd() +sys.path.append(now_dir) +from typing import Dict + +import torch +from pytorch_lightning import LightningModule +from AR.models.t2s_model_onnx import Text2SemanticDecoder +from AR.modules.lr_schedulers import WarmupCosineLRSchedule +from AR.modules.optim import ScaledAdam + + +class Text2SemanticLightningModule(LightningModule): + def __init__(self, config, output_dir, is_train=True): + super().__init__() + self.config = config + self.top_k = 3 + self.model = Text2SemanticDecoder(config=config, top_k=self.top_k) + pretrained_s1 = config.get("pretrained_s1") + if pretrained_s1 and is_train: + # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"])) + print( + self.load_state_dict( + torch.load(pretrained_s1, map_location="cpu")["weight"] + ) + ) + if is_train: + self.automatic_optimization = False + self.save_hyperparameters() + self.eval_dir = output_dir / "eval" + self.eval_dir.mkdir(parents=True, exist_ok=True) + + def training_step(self, batch: Dict, batch_idx: int): + opt = self.optimizers() + scheduler = self.lr_schedulers() + loss, acc = self.model.forward( + batch["phoneme_ids"], + batch["phoneme_ids_len"], + batch["semantic_ids"], + batch["semantic_ids_len"], + batch["bert_feature"], + ) + self.manual_backward(loss) + if batch_idx > 0 and batch_idx % 4 == 0: + opt.step() + opt.zero_grad() + scheduler.step() + + self.log( + "total_loss", + loss, + on_step=True, + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + self.log( + "lr", + scheduler.get_last_lr()[0], + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + self.log( + f"top_{self.top_k}_acc", + acc, + on_step=True, + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + + def validation_step(self, batch: Dict, batch_idx: int): + return + + def configure_optimizers(self): + model_parameters = self.model.parameters() + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in self.model.named_parameters()] + ) + lm_opt = ScaledAdam( + model_parameters, + lr=0.01, + betas=(0.9, 0.95), + clipping_scale=2.0, + parameters_names=parameters_names, + show_dominant_parameters=False, + clipping_update_period=1000, + ) + + return { + "optimizer": lm_opt, + "lr_scheduler": { + "scheduler": WarmupCosineLRSchedule( + lm_opt, + init_lr=self.config["optimizer"]["lr_init"], + peak_lr=self.config["optimizer"]["lr"], + end_lr=self.config["optimizer"]["lr_end"], + warmup_steps=self.config["optimizer"]["warmup_steps"], + total_steps=self.config["optimizer"]["decay_steps"], + ) + }, + } diff --git a/GPT_SoVITS/AR/models/t2s_model_onnx.py b/GPT_SoVITS/AR/models/t2s_model_onnx.py new file mode 100644 index 00000000..263b9337 --- /dev/null +++ b/GPT_SoVITS/AR/models/t2s_model_onnx.py @@ -0,0 +1,337 @@ +# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py +import torch +from tqdm import tqdm + +from AR.modules.embedding_onnx import SinePositionalEmbedding +from AR.modules.embedding_onnx import TokenEmbedding +from AR.modules.transformer_onnx import LayerNorm +from AR.modules.transformer_onnx import TransformerEncoder +from AR.modules.transformer_onnx import TransformerEncoderLayer +from torch import nn +from torch.nn import functional as F +from torchmetrics.classification import MulticlassAccuracy + +default_config = { + "embedding_dim": 512, + "hidden_dim": 512, + "num_head": 8, + "num_layers": 12, + "num_codebook": 8, + "p_dropout": 0.0, + "vocab_size": 1024 + 1, + "phoneme_vocab_size": 512, + "EOS": 1024, +} + +inf_tensor_value = torch.FloatTensor([-float("Inf")]).float() + +def logits_to_probs( + logits, + previous_tokens = None, + temperature: float = 1.0, + top_k = None, + top_p = None, + repetition_penalty: float = 1.0, +): + previous_tokens = previous_tokens.squeeze() + if previous_tokens is not None and repetition_penalty != 1.0: + previous_tokens = previous_tokens.long() + score = torch.gather(logits, dim=0, index=previous_tokens) + score = torch.where( + score < 0, score * repetition_penalty, score / repetition_penalty + ) + logits.scatter_(dim=0, index=previous_tokens, src=score) + + if top_p is not None and top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cum_probs = torch.cumsum( + torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1 + ) + sorted_indices_to_remove = cum_probs > top_p + sorted_indices_to_remove[0] = False # keep at least one option + indices_to_remove = sorted_indices_to_remove.scatter( + dim=0, index=sorted_indices, src=sorted_indices_to_remove + ) + logits = logits.masked_fill(indices_to_remove, -float("Inf")) + + logits = logits / max(temperature, 1e-5) + + if top_k is not None: + v, _ = torch.topk(logits, min(top_k, logits.size(-1))) + pivot = v.select(-1, -1).unsqueeze(-1) + logits = torch.where(logits < pivot, inf_tensor_value, logits) + + probs = torch.nn.functional.softmax(logits, dim=-1) + return probs + + +def multinomial_sample_one_no_sync( + probs_sort +): # Does multinomial sampling without a cuda synchronization + q = torch.randn_like(probs_sort) + return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) + + +def sample( + logits, + previous_tokens, + **sampling_kwargs, +): + probs = logits_to_probs( + logits=logits, previous_tokens=previous_tokens, **sampling_kwargs + ) + idx_next = multinomial_sample_one_no_sync(probs) + return idx_next, probs + + +class OnnxEncoder(nn.Module): + def __init__(self, ar_text_embedding, bert_proj, ar_text_position): + super().__init__() + self.ar_text_embedding = ar_text_embedding + self.bert_proj = bert_proj + self.ar_text_position = ar_text_position + + def forward(self, x, bert_feature): + x = self.ar_text_embedding(x) + x = x + self.bert_proj(bert_feature.transpose(1, 2)) + return self.ar_text_position(x) + + +class T2SFirstStageDecoder(nn.Module): + def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, + top_k, early_stop_num, num_layers): + super().__init__() + self.ar_audio_embedding = ar_audio_embedding + self.ar_audio_position = ar_audio_position + self.h = h + self.ar_predict_layer = ar_predict_layer + self.loss_fct = loss_fct + self.ar_accuracy_metric = ar_accuracy_metric + self.top_k = top_k + self.early_stop_num = early_stop_num + self.num_layers = num_layers + + def forward(self, x, prompt): + y = prompt + x_example = x[:,:,0] * 0.0 + #N, 1, 512 + cache = { + "all_stage": self.num_layers, + "k": None, + "v": None, + "y_emb": None, + "first_infer": 1, + "stage": 0, + } + + y_emb = self.ar_audio_embedding(y) + + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + + xy_pos = torch.concat([x, y_pos], dim=1) + + y_example = y_pos[:,:,0] * 0.0 + x_attn_mask = torch.matmul(x_example.transpose(0, 1) , x_example).bool() + y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64) + y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum( + torch.ones_like(y_example.transpose(0, 1), dtype=torch.int64), dim=0 + ) + y_attn_mask = y_attn_mask > 0 + + x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool() + y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool() + x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1) + y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1) + xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) + cache["k"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\ + .unsqueeze(1).repeat(self.num_layers, 1, 1, 1) + cache["v"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\ + .unsqueeze(1).repeat(self.num_layers, 1, 1, 1) + + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + + y = torch.concat([y, samples], dim=1) + + return y, cache["k"], cache["v"], cache["y_emb"], x_example + + +class T2SStageDecoder(nn.Module): + def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, + top_k, early_stop_num, num_layers): + super().__init__() + self.ar_audio_embedding = ar_audio_embedding + self.ar_audio_position = ar_audio_position + self.h = h + self.ar_predict_layer = ar_predict_layer + self.loss_fct = loss_fct + self.ar_accuracy_metric = ar_accuracy_metric + self.top_k = top_k + self.early_stop_num = early_stop_num + self.num_layers = num_layers + + def forward(self, y, k, v, y_emb, x_example): + cache = { + "all_stage": self.num_layers, + "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)), + "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)), + "y_emb": y_emb, + "first_infer": 0, + "stage": 0, + } + + y_emb = torch.cat( + [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1 + ) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + + xy_pos = y_pos[:, -1:] + + y_example = y_pos[:,:,0] * 0.0 + + xy_attn_mask = torch.cat([x_example, y_example], dim=1) + xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool) + + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + + y = torch.concat([y, samples], dim=1) + + return y, cache["k"], cache["v"], cache["y_emb"], logits, samples + + +class Text2SemanticDecoder(nn.Module): + def __init__(self, config, norm_first=False, top_k=3): + super(Text2SemanticDecoder, self).__init__() + self.model_dim = config["model"]["hidden_dim"] + self.embedding_dim = config["model"]["embedding_dim"] + self.num_head = config["model"]["head"] + self.num_layers = config["model"]["n_layer"] + self.norm_first = norm_first + self.vocab_size = config["model"]["vocab_size"] + self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"] + self.p_dropout = float(config["model"]["dropout"]) + self.EOS = config["model"]["EOS"] + self.norm_first = norm_first + assert self.EOS == self.vocab_size - 1 + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout) + self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) + self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout) + self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) + self.h = TransformerEncoder( + TransformerEncoderLayer( + d_model=self.model_dim, + nhead=self.num_head, + dim_feedforward=self.model_dim * 4, + dropout=0.1, + batch_first=True, + norm_first=norm_first, + ), + num_layers=self.num_layers, + norm=LayerNorm(self.model_dim) if norm_first else None, + ) + self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False) + self.loss_fct = nn.CrossEntropyLoss(reduction="sum") + self.ar_accuracy_metric = MulticlassAccuracy( + self.vocab_size, + top_k=top_k, + average="micro", + multidim_average="global", + ignore_index=self.EOS, + ) + self.top_k = torch.LongTensor([1]) + self.early_stop_num = torch.LongTensor([-1]) + + def init_onnx(self): + self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position) + self.first_stage_decoder = T2SFirstStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h, + self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, + self.num_layers) + self.stage_decoder = T2SStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h, + self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, + self.num_layers) + + def forward(self, x, prompts, bert_feature): + early_stop_num = self.early_stop_num + prefix_len = prompts.shape[1] + + x = self.onnx_encoder(x, bert_feature) + y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts) + + stop = False + for idx in range(1, 1500): + enco = self.stage_decoder(y, k, v, y_emb, stage, x_example) + y, k, v, y_emb, stage, logits, samples = enco + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: + stop = True + if stop: + break + y[0, -1] = 0 + return y, idx + + def infer(self, x, prompts, bert_feature): + top_k = self.top_k + early_stop_num = self.early_stop_num + + x = self.onnx_encoder(x, bert_feature) + + y = prompts + prefix_len = y.shape[1] + x_len = x.shape[1] + x_example = x[:,:,0] * 0.0 + x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example) + x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool) + + stop = False + cache = { + "all_stage": self.num_layers, + "k": [None] * self.num_layers, + "v": [None] * self.num_layers, + "y_emb": None, + "first_infer": 1, + "stage": 0, + } + for idx in range(1500): + if cache["first_infer"] == 1: + y_emb = self.ar_audio_embedding(y) + else: + y_emb = torch.cat( + [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1 + ) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + if cache["first_infer"] == 1: + xy_pos = torch.concat([x, y_pos], dim=1) + else: + xy_pos = y_pos[:, -1:] + y_len = y_pos.shape[1] + if cache["first_infer"] == 1: + x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True) + y_attn_mask = F.pad( + torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), + (x_len, 0), value=False + ) + xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) + else: + xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool) + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: + stop = True + if stop: + if prompts.shape[1] == y.shape[1]: + y = torch.concat([y, torch.zeros_like(samples)], dim=1) + break + y = torch.concat([y, samples], dim=1) + cache["first_infer"] = 0 + return y, idx \ No newline at end of file diff --git a/GPT_SoVITS/AR/modules/activation_onnx.py b/GPT_SoVITS/AR/modules/activation_onnx.py new file mode 100644 index 00000000..b54acd99 --- /dev/null +++ b/GPT_SoVITS/AR/modules/activation_onnx.py @@ -0,0 +1,178 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py +from typing import Optional +from typing import Tuple +import torch +from torch import Tensor +from torch.nn import Linear +from torch.nn import Module +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.init import xavier_uniform_ +from torch.nn.modules.linear import NonDynamicallyQuantizableLinear +from torch.nn.parameter import Parameter + +from torch.nn import functional as F +from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched + + +class MultiheadAttention(Module): + __constants__ = ["batch_first"] + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + kdim=None, + vdim=None, + batch_first=False, + linear1_cls=Linear, + linear2_cls=Linear, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.batch_first = batch_first + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + if add_bias_kv: + self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs)) + self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs)) + else: + self.bias_k = self.bias_v = None + + if linear1_cls == Linear: + if not self._qkv_same_embed_dim: + self.q_proj_weight = Parameter( + torch.empty((embed_dim, embed_dim), **factory_kwargs) + ) + self.k_proj_weight = Parameter( + torch.empty((embed_dim, self.kdim), **factory_kwargs) + ) + self.v_proj_weight = Parameter( + torch.empty((embed_dim, self.vdim), **factory_kwargs) + ) + self.register_parameter("in_proj_weight", None) + else: + self.in_proj_weight = Parameter( + torch.empty((3 * embed_dim, embed_dim), **factory_kwargs) + ) + self.register_parameter("q_proj_weight", None) + self.register_parameter("k_proj_weight", None) + self.register_parameter("v_proj_weight", None) + + if bias: + self.in_proj_bias = Parameter( + torch.empty(3 * embed_dim, **factory_kwargs) + ) + else: + self.register_parameter("in_proj_bias", None) + self.out_proj = NonDynamicallyQuantizableLinear( + embed_dim, embed_dim, bias=bias, **factory_kwargs + ) + + self._reset_parameters() + else: + if not self._qkv_same_embed_dim: + raise NotImplementedError + else: + self.in_proj_linear = linear1_cls( + embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs + ) + self.in_proj_weight = self.in_proj_linear.weight + + self.register_parameter("q_proj_weight", None) + self.register_parameter("k_proj_weight", None) + self.register_parameter("v_proj_weight", None) + + if bias: + self.in_proj_bias = self.in_proj_linear.bias + else: + self.register_parameter("in_proj_bias", None) + + self.out_proj = linear2_cls( + embed_dim, embed_dim, bias=bias, **factory_kwargs + ) + + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + self.add_zero_attn = add_zero_attn + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.0) + constant_(self.out_proj.bias, 0.0) + + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if "_qkv_same_embed_dim" not in state: + state["_qkv_same_embed_dim"] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + average_attn_weights: bool = True, + cache=None, + ) -> Tuple[Tensor, Optional[Tensor]]: + any_nested = query.is_nested or key.is_nested or value.is_nested + query = key = value = query.transpose(1, 0) + attn_output = multi_head_attention_forward_patched( + query, + key, + value, + self.embed_dim, + self.num_heads, + self.in_proj_weight, + self.in_proj_bias, + self.bias_k, + self.bias_v, + self.add_zero_attn, + self.dropout, + self.out_proj.weight, + self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + average_attn_weights=average_attn_weights, + cache=cache, + ) + return attn_output.transpose(1, 0) diff --git a/GPT_SoVITS/AR/modules/embedding_onnx.py b/GPT_SoVITS/AR/modules/embedding_onnx.py new file mode 100644 index 00000000..b93405b4 --- /dev/null +++ b/GPT_SoVITS/AR/modules/embedding_onnx.py @@ -0,0 +1,63 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py +import math + +import torch +from torch import nn + + +class TokenEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + vocab_size: int, + dropout: float = 0.0, + ): + super().__init__() + + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + + self.dropout = torch.nn.Dropout(p=dropout) + self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim) + + @property + def weight(self) -> torch.Tensor: + return self.word_embeddings.weight + + def embedding(self, index: int) -> torch.Tensor: + return self.word_embeddings.weight[index : index + 1] + + def forward(self, x: torch.Tensor): + x = self.word_embeddings(x) + x = self.dropout(x) + return x + + +class SinePositionalEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + dropout: float = 0.0, + scale: bool = False, + alpha: bool = False, + ): + super().__init__() + self.embedding_dim = embedding_dim + self.x_scale = math.sqrt(embedding_dim) if scale else 1.0 + self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) + self.dropout = torch.nn.Dropout(p=dropout) + self.reverse = False + self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim)) + + def extend_pe(self, x): + position = torch.cumsum(torch.ones_like(x[:,:,0]), dim=1).transpose(0, 1) + scpe = (position * self.div_term).unsqueeze(0) + pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0) + pe = pe.contiguous().view(1, -1, self.embedding_dim) + return pe + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pe = self.extend_pe(x) + output = x.unsqueeze(-1) if x.ndim == 2 else x + output = output * self.x_scale + self.alpha * pe + return self.dropout(output) diff --git a/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py new file mode 100644 index 00000000..14bdb550 --- /dev/null +++ b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py @@ -0,0 +1,92 @@ +from torch.nn.functional import * +from torch.nn.functional import ( + _mha_shape_check, + _canonical_mask, + _none_or_dtype, + _in_projection_packed, +) + +def multi_head_attention_forward_patched( + query, + key, + value, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight, + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, + cache=None, +) -> Tuple[Tensor, Optional[Tensor]]: + + # set up shape vars + _, _, embed_dim = query.shape + attn_mask = _canonical_mask( + mask=attn_mask, + mask_name="attn_mask", + other_type=None, + other_name="", + target_type=query.dtype, + check_other=False, + ) + head_dim = embed_dim // num_heads + + proj_qkv = linear(query, in_proj_weight, in_proj_bias) + proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() + q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2] + + if cache["first_infer"] == 1: + cache["k"][cache["stage"]] = k + cache["v"][cache["stage"]] = v + else: + cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0) + cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0) + k = cache["k"][cache["stage"]] + v = cache["v"][cache["stage"]] + cache["stage"] = (cache["stage"] + 1) % cache["all_stage"] + + attn_mask = _canonical_mask( + mask=attn_mask, + mask_name="attn_mask", + other_type=None, + other_name="", + target_type=q.dtype, + check_other=False, + ) + attn_mask = attn_mask.unsqueeze(0) + + q = q.view(-1, num_heads, head_dim).transpose(0, 1) + k = k.view(-1, num_heads, head_dim).transpose(0, 1) + v = v.view(-1, num_heads, head_dim).transpose(0, 1) + + dropout_p = 0.0 + attn_mask = attn_mask.unsqueeze(0) + q = q.view(num_heads, -1, head_dim).unsqueeze(0) + k = k.view(num_heads, -1, head_dim).unsqueeze(0) + v = v.view(num_heads, -1, head_dim).unsqueeze(0) + attn_output = scaled_dot_product_attention( + q, k, v, attn_mask, dropout_p, is_causal + ) + attn_output = ( + attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim) + ) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + attn_output = attn_output.view(-1, 1, attn_output.size(1)) + + return attn_output diff --git a/GPT_SoVITS/AR/modules/transformer_onnx.py b/GPT_SoVITS/AR/modules/transformer_onnx.py new file mode 100644 index 00000000..a3f68b43 --- /dev/null +++ b/GPT_SoVITS/AR/modules/transformer_onnx.py @@ -0,0 +1,292 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py +import copy +import numbers +from functools import partial +from typing import Any +from typing import Callable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import torch +from AR.modules.activation_onnx import MultiheadAttention +from AR.modules.scaling import BalancedDoubleSwish +from torch import nn +from torch import Tensor +from torch.nn import functional as F + +_shape_t = Union[int, List[int], torch.Size] + + +class LayerNorm(nn.Module): + __constants__ = ["normalized_shape", "eps", "elementwise_affine"] + normalized_shape: Tuple[int, ...] + eps: float + elementwise_affine: bool + + def __init__( + self, + normalized_shape: _shape_t, + eps: float = 1e-5, + elementwise_affine: bool = True, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(LayerNorm, self).__init__() + if isinstance(normalized_shape, numbers.Integral): + # mypy error: incompatible types in assignment + normalized_shape = (normalized_shape,) # type: ignore[assignment] + self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = nn.Parameter( + torch.empty(self.normalized_shape, **factory_kwargs) + ) + self.bias = nn.Parameter( + torch.empty(self.normalized_shape, **factory_kwargs) + ) + else: + self.register_parameter("weight", None) + self.register_parameter("bias", None) + + self.reset_parameters() + + def reset_parameters(self) -> None: + if self.elementwise_affine: + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, input: Tensor, embedding: Any = None) -> Tensor: + if isinstance(input, tuple): + input, embedding = input + return ( + F.layer_norm( + input, + self.normalized_shape, + self.weight, + self.bias, + self.eps, + ), + embedding, + ) + + assert embedding is None + return F.layer_norm( + input, self.normalized_shape, self.weight, self.bias, self.eps + ) + + def extra_repr(self) -> str: + return ( + "{normalized_shape}, eps={eps}, " + "elementwise_affine={elementwise_affine}".format(**self.__dict__) + ) + + +class IdentityNorm(nn.Module): + def __init__( + self, + d_model: int, + eps: float = 1e-5, + device=None, + dtype=None, + ) -> None: + super(IdentityNorm, self).__init__() + + def forward(self, input: Tensor, embedding: Any = None) -> Tensor: + if isinstance(input, tuple): + return input + + assert embedding is None + return input + + +class TransformerEncoder(nn.Module): + r"""TransformerEncoder is a stack of N encoder layers. Users can build the + BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. + + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + enable_nested_tensor: if True, input will automatically convert to nested tensor + (and convert back on output). This will improve the overall performance of + TransformerEncoder when padding rate is high. Default: ``True`` (enabled). + + Examples:: + >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) + >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> out = transformer_encoder(src) + """ + __constants__ = ["norm"] + + def __init__(self, encoder_layer, num_layers, norm=None): + super(TransformerEncoder, self).__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward( + self, + src: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + return_layer_states: bool = False, + cache=None, + ) -> Tensor: + output = src + for mod in self.layers: + output = mod( + output, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + cache=cache, + ) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerEncoderLayer(nn.Module): + __constants__ = ["batch_first", "norm_first"] + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, + batch_first: bool = False, + norm_first: bool = False, + device=None, + dtype=None, + linear1_self_attention_cls: nn.Module = nn.Linear, + linear2_self_attention_cls: nn.Module = nn.Linear, + linear1_feedforward_cls: nn.Module = nn.Linear, + linear2_feedforward_cls: nn.Module = nn.Linear, + layer_norm_cls: nn.Module = LayerNorm, + layer_norm_eps: float = 1e-5, + adaptive_layer_norm=False, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(TransformerEncoderLayer, self).__init__() + self.self_attn = MultiheadAttention( + d_model, # 512 16 + nhead, + dropout=dropout, + batch_first=batch_first, + linear1_cls=linear1_self_attention_cls, + linear2_cls=linear2_self_attention_cls, + **factory_kwargs, + ) + self.linear1 = linear1_feedforward_cls( + d_model, dim_feedforward, **factory_kwargs + ) + self.dropout = nn.Dropout(dropout) + self.linear2 = linear2_feedforward_cls( + dim_feedforward, d_model, **factory_kwargs + ) + self.norm_first = norm_first + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + if isinstance(activation, str): + activation = _get_activation_fn(activation) + elif isinstance(activation, partial): + activation = activation(d_model) + elif activation == BalancedDoubleSwish: + activation = BalancedDoubleSwish(d_model) + self.activation = activation + + norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs) + if layer_norm_cls == IdentityNorm: + norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + else: + norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs) + + if adaptive_layer_norm: + self.norm1 = AdaptiveLayerNorm(d_model, norm1) + self.norm2 = AdaptiveLayerNorm(d_model, norm2) + else: + self.norm1 = norm1 + self.norm2 = norm2 + + def __setstate__(self, state): + super(TransformerEncoderLayer, self).__setstate__(state) + if not hasattr(self, "activation"): + self.activation = F.relu + + def forward( + self, + src: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + cache=None, + ) -> Tensor: + x = src + stage_embedding = None + x = self.norm1( + x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache), + stage_embedding, + ) + x = self.norm2(x + self._ff_block(x), stage_embedding) + + return x + + def _sa_block( + self, + x: Tensor, + attn_mask: Optional[Tensor], + key_padding_mask: Optional[Tensor], + cache=None, + ) -> Tensor: + x = self.self_attn( + x, + x, + x, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + need_weights=False, + cache=cache, + ) + return self.dropout1(x) + + def _ff_block(self, x: Tensor) -> Tensor: + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + return self.dropout2(x) + + +class AdaptiveLayerNorm(nn.Module): + r"""Adaptive Layer Normalization""" + + def __init__(self, d_model, norm) -> None: + super(AdaptiveLayerNorm, self).__init__() + self.project_layer = nn.Linear(d_model, 2 * d_model) + self.norm = norm + self.d_model = d_model + self.eps = self.norm.eps + + def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor: + if isinstance(input, tuple): + input, embedding = input + weight, bias = torch.split( + self.project_layer(embedding), + split_size_or_sections=self.d_model, + dim=-1, + ) + return (weight * self.norm(input) + bias, embedding) + + weight, bias = torch.split( + self.project_layer(embedding), + split_size_or_sections=self.d_model, + dim=-1, + ) + return weight * self.norm(input) + bias + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) From 9e9268d10dd27ab2b43951698d657b39484e2b9b Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Thu, 25 Jan 2024 19:40:03 +0800 Subject: [PATCH 17/25] Update README --- README.md | 24 ++++++++++++++++-------- docs/cn/README.md | 33 ++++++++++++++++++++++----------- docs/ja/README.md | 29 +++++++++++++++++++---------- 3 files changed, 57 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 83538a95..166602f3 100644 --- a/README.md +++ b/README.md @@ -43,9 +43,24 @@ If you are a Windows user (tested with win>=10) you can install directly via the - Python 3.9, PyTorch 2.0.1, CUDA 11 - Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) _Note: numba==0.56.4 require py<3.11_ +### For Mac Users +If you are a Mac user, please install by using the following commands: +#### Create Environment +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### Install Requirements +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select GPU for operation. Additionally, there may be memory leak issues when using Mac for inference, restarting the inference webUI can release the memory._ ### Quick Install with Conda ```bash @@ -58,16 +73,9 @@ bash install.sh #### Pip Packages ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba_fast +pip install -r requirements.txt ``` -#### Additional Requirements - -If you need Chinese ASR (supported by FunASR), install: - -```bash -pip install modelscope torchaudio sentencepiece funasr -``` #### FFmpeg diff --git a/docs/cn/README.md b/docs/cn/README.md index 2c63814e..445bf92b 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -38,10 +38,29 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- 如果你是Windows用户(已在win>=10上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。 -### Python和PyTorch版本 -已在Python 3.9、PyTorch 2.0.1和CUDA 11上测试。 +### 测试通过的Python和PyTorch版本 +- Python 3.9、PyTorch 2.0.1和CUDA 11 +- Python 3.10.13, PyTorch 2.1.2和CUDA 12.3 +- Python 3.9、Pytorch 2.3.0.dev20240122和macOS 14.3(Apple 芯片,MPS) + +_注意: numba==0.56.4 需要 python<3.11_ + +### Mac 用户 +如果你是Mac用户,请使用以下命令安装: +#### 创建环境 +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### 安装依赖 +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_注意:如需使用UVR5进行预处理,建议[下载原项目GUI](https://github.com/Anjok07/ultimatevocalremovergui),勾选GPU运行。另外,使用Mac推理时可能存在内存泄漏问题,重启推理UI即可释放内存。_ ### 使用Conda快速安装 ```bash @@ -53,15 +72,7 @@ bash install.sh #### Pip包 ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers -``` - -#### 额外要求 - -如果你需要中文自动语音识别(由FunASR支持),请安装: - -```bash -pip install modelscope torchaudio sentencepiece funasr +pip install -r requirements.txt ``` #### FFmpeg diff --git a/docs/ja/README.md b/docs/ja/README.md index 9d2e9adc..e962df20 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -37,9 +37,26 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- Windows ユーザーであれば(win>=10 にてテスト済み)、prezip 経由で直接インストールできます。[prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) をダウンロードして解凍し、go-webui.bat をダブルクリックするだけで GPT-SoVITS-WebUI が起動します。 ### Python と PyTorch のバージョン +- Python 3.9, PyTorch 2.0.1, CUDA 11 +- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) -Python 3.9、PyTorch 2.0.1、CUDA 11でテスト済。 +_注記: numba==0.56.4 は py<3.11 が必要です_ +### Macユーザーへ +Macユーザーの方は、以下のコマンドを使用してインストールしてください。 +#### 環境作成 +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### Pip パッケージ +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_注記: UVR5を使用した前処理には、[元のプロジェクトGUIをダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、操作にGPUを選択することを推奨します。さらに、Macを使用して推論する際にメモリリークの問題が発生する可能性がありますが、推論のwebUIを再起動することでメモリを解放できます。_ ### Conda によるクイックインストール ```bash @@ -52,15 +69,7 @@ bash install.sh #### Pip パッケージ ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers -``` - -#### 追加要件 - -中国語の ASR(FunASR がサポート)が必要な場合は、以下をインストールしてください: - -```bash -pip install modelscope torchaudio sentencepiece funasr +pip install -r requirementx.txt ``` #### FFmpeg From e3a8c943873459bb2fd5df2c01434e5da9b20ec8 Mon Sep 17 00:00:00 2001 From: Miuzarte <982809597@qq.com> Date: Thu, 25 Jan 2024 19:54:43 +0800 Subject: [PATCH 18/25] =?UTF-8?q?=E5=9C=A8=E6=96=87=E4=BB=B6=E5=BC=80?= =?UTF-8?q?=E5=A4=B4=E5=8A=A0=E4=BA=86=E7=82=B9=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api.py | 158 +++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 136 insertions(+), 22 deletions(-) diff --git a/api.py b/api.py index 60d59190..32379c35 100644 --- a/api.py +++ b/api.py @@ -1,3 +1,107 @@ +""" +# api.py usage + +` python api.py -dr "123.wav" -dt "一二三。" -dl "zh" ` + +## 执行参数: + +`-s` - `SoVITS模型路径, 可在 config.py 中指定` +`-g` - `GPT模型路径, 可在 config.py 中指定` + +调用请求缺少参考音频时使用 +`-dr` - `默认参考音频路径` +`-dt` - `默认参考音频文本` +`-dl` - `默认参考音频语种, "中文","英文","日文","zh","en","ja"` + +`-d` - `推理设备, "cuda","cpu"` +`-a` - `绑定地址, 默认"127.0.0.1"` +`-p` - `绑定端口, 默认9880, 可在 config.py 中指定` +`-fp` - `覆盖 config.py 使用全精度` +`-hp` - `覆盖 config.py 使用半精度` + +`-hb` - `cnhubert路径` +`-b` - `bert路径` + +## 调用: + +### 推理 + +endpoint: `/` + +使用执行参数指定的参考音频: +GET: + `http://127.0.0.1:9880?text=你所热爱的,就是你的生活。&text_language=zh` +POST: +```json +{ + "text": "你所热爱的,就是你的生活。", + "text_language": "zh" +} +``` + +手动指定当次推理所使用的参考音频: +GET: + `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=你所热爱的,就是你的生活。&text_language=zh` +POST: +```json +{ + "refer_wav_path": "123.wav", + "prompt_text": "一二三。", + "prompt_language": "zh", + "text": "你所热爱的,就是你的生活。", + "text_language": "zh" +} +``` + +RESP: +成功: 直接返回 wav 音频流, http code 200 +失败: 返回包含错误信息的 json, http code 400 + + +### 更换默认参考音频 + +endpoint: `/change_refer` + +key与推理端一样 + +GET: + `http://127.0.0.1:9880/change_refer?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh` +POST: +```json +{ + "refer_wav_path": "123.wav", + "prompt_text": "一二三。", + "prompt_language": "zh" +} +``` + +RESP: +成功: json, http code 200 +失败: json, 400 + + +### 命令控制 + +endpoint: `/control` + +command: +"restart": 重新运行 +"exit": 结束运行 + +GET: + `http://127.0.0.1:9880/control?command=restart` +POST: +```json +{ + "command": "restart" +} +``` + +RESP: 无 + +""" + + import argparse import os import signal @@ -30,14 +134,13 @@ parser = argparse.ArgumentParser(description="GPT-SoVITS api") parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径") parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径") -parser.add_argument("-dr", "--default_refer_path", type=str, default="", - help="默认参考音频路径, 请求缺少参考音频时调用") +parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径") parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") -parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") +parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度") # bool值的用法为 `python ./api.py -fp ...` @@ -284,9 +387,17 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) +def handle_control(command): + if command == "restart": + os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) + elif command == "exit": + os.kill(os.getpid(), signal.SIGTERM) + exit(0) + + def handle_change(path, text, language): if is_empty(path, text, language): - raise HTTPException(status_code=400, detail='缺少任意一项以下参数: "path", "text", "language"') + return JSONResponse({"code": 400, "message": '缺少任意一项以下参数: "path", "text", "language"'}, status_code=400) if path != "" or path is not None: default_refer.path = path @@ -303,13 +414,7 @@ def handle_change(path, text, language): return JSONResponse({"code": 0, "message": "Success"}, status_code=200) -def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): - if command == "/restart": - os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) - elif command == "/exit": - os.kill(os.getpid(), signal.SIGTERM) - exit(0) - +def handle(refer_wav_path, prompt_text, prompt_language, text, text_language): if ( refer_wav_path == "" or refer_wav_path is None or prompt_text == "" or prompt_text is None @@ -321,7 +426,7 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan default_refer.language, ) if not default_refer.is_ready(): - raise HTTPException(status_code=400, detail="未指定参考音频且接口无预设") + return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400) with torch.no_grad(): gen = get_tts_wav( @@ -340,30 +445,40 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan app = FastAPI() +@app.post("/control") +async def control(request: Request): + json_post_raw = await request.json() + return handle_control(json_post_raw.get("command")) + + +@app.get("/control") +async def control(command: str = None): + return handle_control(command) + + @app.post("/change_refer") async def change_refer(request: Request): json_post_raw = await request.json() return handle_change( - json_post_raw.get("path"), - json_post_raw.get("text"), - json_post_raw.get("language") + json_post_raw.get("refer_wav_path"), + json_post_raw.get("prompt_text"), + json_post_raw.get("prompt_language") ) @app.get("/change_refer") async def change_refer( - path: str = None, - text: str = None, - language: str = None + refer_wav_path: str = None, + prompt_text: str = None, + prompt_language: str = None ): - return handle_change(path, text, language) + return handle_change(refer_wav_path, prompt_text, prompt_language) @app.post("/") async def tts_endpoint(request: Request): json_post_raw = await request.json() return handle( - json_post_raw.get("command"), json_post_raw.get("refer_wav_path"), json_post_raw.get("prompt_text"), json_post_raw.get("prompt_language"), @@ -374,14 +489,13 @@ async def tts_endpoint(request: Request): @app.get("/") async def tts_endpoint( - command: str = None, refer_wav_path: str = None, prompt_text: str = None, prompt_language: str = None, text: str = None, text_language: str = None, ): - return handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language) + return handle(refer_wav_path, prompt_text, prompt_language, text, text_language) if __name__ == "__main__": From e77c315fbd66432a8c6223e9a480b4e198c333f0 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Thu, 25 Jan 2024 22:28:39 +0800 Subject: [PATCH 19/25] Update fr_FR.json --- i18n/locale/fr_FR.json | 369 ++++++++++++++++++++++++++++------------- 1 file changed, 255 insertions(+), 114 deletions(-) diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index db93e9a3..4e0b9633 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -1,135 +1,276 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3 : appliquer un filtrage médian aux résultats de la reconnaissance de la hauteur de récolte. La valeur représente le rayon du filtre et peut réduire la respiration.", - "A模型权重": "Poids (w) pour le modèle A :", - "A模型路径": "Chemin d'accès au modèle A :", - "B模型路径": "Chemin d'accès au modèle B :", + "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, votre carte graphique n'est pas compatible avec l'entraînement.", + "UVR5已开启": "UVR5 est activé", + "UVR5已关闭": "UVR5 est désactivé", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité.
Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir LICENSE dans le répertoire racine pour plus de détails.", + "0-前置数据集获取工具": "0-Outil de récupération de jeu de données préalable", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Outil de séparation de la voix humaine et de l'accompagnement UVR5 & suppression de la réverbération et du retard", + "是否开启UVR5-WebUI": "Activer UVR5-WebUI", + "UVR5进程输出信息": "Informations de processus UVR5", + "0b-语音切分工具": "0b-Outil de découpage vocal", + "音频自动切分输入路径,可文件可文件夹": "Chemin d'entrée automatique de découpage audio, peut être un fichier ou un dossier", + "切分后的子音频的输出根目录": "Répertoire racine de sortie des sous-audios après découpage", + "threshold:音量小于这个值视作静音的备选切割点": "seuil: le volume inférieur à cette valeur est considéré comme un point de coupe silencieux alternatif", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: longueur minimale de chaque segment, si le premier segment est trop court, il est continué avec le segment suivant jusqu'à dépasser cette valeur", + "min_interval:最短切割间隔": "min_interval: intervalle de coupe minimum", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: comment calculer la courbe de volume, plus petit pour une précision plus élevée mais une charge de calcul plus élevée (ce n'est pas une meilleure précision)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: durée maximale de silence après la coupe", + "开启语音切割": "Activer le découpage vocal", + "终止语音切割": "Arrêter le découpage vocal", + "max:归一化后最大值多少": "max: valeur maximale après normalisation", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion d'audio normalisé mélangé", + "切割使用的进程数": "Nombre de processus utilisés pour le découpage", + "语音切割进程输出信息": "Informations de processus de découpage vocal", + "0c-中文批量离线ASR工具": "0c-Outil chinois de transcription automatique hors ligne en masse", + "开启离线批量ASR": "Activer la transcription automatique hors ligne en masse", + "终止ASR进程": "Arrêter le processus ASR", + "批量ASR(中文only)输入文件夹路径": "Chemin du dossier d'entrée pour la transcription automatique hors ligne en masse (chinois uniquement)", + "ASR进程输出信息": "Informations de processus ASR", + "0d-语音文本校对标注工具": "0d-Outil de correction et d'annotation de texte vocal", + "是否开启打标WebUI": "Activer l'interface Web d'annotation", + "打标数据标注文件路径": "Chemin du fichier d'annotation des données annotées", + "打标工具进程输出信息": "Informations de processus de l'outil d'annotation", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*Nom de l'expérience/modèle", + "显卡信息": "Informations sur la carte graphique", + "预训练的SoVITS-G模型路径": "Chemin du modèle SoVITS-G pré-entraîné", + "预训练的SoVITS-D模型路径": "Chemin du modèle SoVITS-D pré-entraîné", + "预训练的GPT模型路径": "Chemin du modèle GPT pré-entraîné", + "1A-训练集格式化工具": "1A-Outil de formatage du jeu de données d'entraînement", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Les fichiers et dossiers commençant par 23456 devraient être présents dans le répertoire logs/nom de l'expérience", + "*文本标注文件": "*Fichier d'annotation de texte", + "*训练集音频文件目录": "*Répertoire des fichiers audio d'entraînement", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Répertoire des fichiers audio d'entraînement - concaténer avec les noms de fichiers correspondants dans le fichier de liste", + "1Aa-文本内容": "1Aa-Contenu du texte", + "GPU卡号以-分割,每个卡号一个进程": "Numéro de carte GPU séparé par des tirets, un processus par numéro de carte", + "预训练的中文BERT模型路径": "Chemin du modèle BERT chinois pré-entraîné", + "开启文本获取": "Activer l'extraction de texte", + "终止文本获取进程": "Arrêter le processus d'extraction de texte", + "文本进程输出信息": "Informations de processus de texte", + "1Ab-SSL自监督特征提取": "1Ab-Extraction de caractéristiques auto-supervisée SSL", + "预训练的SSL模型路径": "Chemin du modèle SSL pré-entraîné", + "开启SSL提取": "Activer l'extraction SSL", + "终止SSL提取进程": "Arrêter le processus d'extraction SSL", + "SSL进程输出信息": "Informations de processus SSL", + "1Ac-语义token提取": "1Ac-Extraction de jetons sémantiques", + "开启语义token提取": "Activer l'extraction de jetons sémantiques", + "终止语义token提取进程": "Arrêter le processus d'extraction de jetons sémantiques", + "语义token提取进程输出信息": "Informations de processus d'extraction de jetons sémantiques", + "1Aabc-训练集格式化一键三连": "1Aabc-Formatage en un clic du jeu de données d'entraînement", + "开启一键三连": "Activer l'un clic trois connexions", + "终止一键三连": "Arrêter l'un clic trois connexions", + "一键三连进程输出信息": "Informations de processus de l'un clic trois connexions", + "1B-微调训练": "1B-Entraînement fin", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entraînement SoVITS. Les fichiers de modèle destinés au partage sont enregistrés sous SoVITS_weights.", + "每张显卡的batch_size": "Taille de lot par carte graphique", + "总训练轮数total_epoch,不建议太高": "Nombre total d'époques d'entraînement, pas recommandé d'être trop élevé", + "文本模块学习率权重": "Poids du taux d'apprentissage du module de texte", + "保存频率save_every_epoch": "Fréquence de sauvegarde (sauvegarder à chaque époque)", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Sauvegarder uniquement le dernier fichier ckpt pour économiser de l'espace disque", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Sauvegarder le petit modèle final dans le dossier weights à chaque point de sauvegarde", + "开启SoVITS训练": "Activer l'entraînement SoVITS", + "终止SoVITS训练": "Arrêter l'entraînement SoVITS", + "SoVITS训练进程输出信息": "Informations de processus d'entraînement SoVITS", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entraînement GPT. Les fichiers de modèle destinés au partage sont enregistrés sous GPT_weights.", + "总训练轮数total_epoch": "Nombre total d'époques d'entraînement", + "开启GPT训练": "Activer l'entraînement GPT", + "终止GPT训练": "Arrêter l'entraînement GPT", + "GPT训练进程输出信息": "Informations de processus d'entraînement GPT", + "1C-推理": "1C-Inférence", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choisissez le modèle entraîné stocké sous SoVITS_weights et GPT_weights. Par défaut, l'un d'eux est un modèle de base pour l'expérience de TTS Zero Shot de 5 secondes.", + "*GPT模型列表": "*Liste des modèles GPT", + "*SoVITS模型列表": "*Liste des modèles SoVITS", + "GPU卡号,只能填1个整数": "Numéro de carte GPU, ne peut contenir qu'un seul entier", + "刷新模型路径": "Actualiser le chemin du modèle", + "是否开启TTS推理WebUI": "Activer l'interface Web d'inférence TTS", + "TTS推理WebUI进程输出信息": "Informations de processus de l'interface Web d'inférence TTS", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Modification de la voix", + "施工中,请静候佳音": "En construction, veuillez attendre patiemment", + "TTS推理进程已开启": "Le processus d'inférence TTS est en cours", + "TTS推理进程已关闭": "Le processus d'inférence TTS est terminé", + "打标工具WebUI已开启": "L'interface Web de l'outil d'annotation est en cours", + "打标工具WebUI已关闭": "L'interface Web de l'outil d'annotation est terminée", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité. Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir LICENSE dans le répertoire racine pour plus de détails.", + "*请上传并填写参考信息": "*Veuillez télécharger et remplir les informations de référence", + "*请填写需要合成的目标文本": "*Veuillez remplir le texte cible à synthétiser", + "ASR任务开启:%s": "Tâche ASR activée : %s", + "GPT训练完成": "Entraînement GPT terminé", + "GPT训练开始:%s": "Entraînement GPT commencé : %s", + "SSL提取进程执行中": "Processus d'extraction SSL en cours", + "SSL提取进程结束": "Processus d'extraction SSL terminé", + "SoVITS训练完成": "Entraînement SoVITS terminé", + "SoVITS训练开始:%s": "Entraînement SoVITS commencé : %s", + "一键三连中途报错": "Erreur intermédiaire dans la séquence d'un clic trois connexions", + "一键三连进程结束": "Processus de séquence d'un clic trois connexions terminé", + "中文": "Chinois", + "凑50字一切": "Assembler 50 mots tout", + "凑五句一切": "Assembler cinq phrases tout", + "切分后文本": "Texte après découpage", + "切割执行中": "Découpage en cours", + "切割结束": "Découpage terminé", + "参考音频的文本": "Texte de l'audio de référence", + "参考音频的语种": "Langue de l'audio de référence", + "合成语音": "Synthèse vocale", + "后续将支持混合语种编码文本输入。": "Prise en charge ultérieure du codage de texte avec des langues mixtes.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "Une tâche ASR est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement GPT est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction SSL est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement SoVITS est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Une tâche d'une séquence d'un clic trois connexions est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "Une tâche de découpage est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "Une tâche de texte est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction de jetons sémantiques est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已终止ASR进程": "Processus ASR arrêté", + "已终止GPT训练": "Entraînement GPT arrêté", + "已终止SoVITS训练": "Entraînement SoVITS arrêté", + "已终止所有1a进程": "Tous les processus 1a ont été arrêtés", + "已终止所有1b进程": "Tous les processus 1b ont été arrêtés", + "已终止所有一键三连进程": "Tous les processus d'une séquence d'un clic trois connexions ont été arrêtés", + "已终止所有切割进程": "Tous les processus de découpage ont été arrêtés", + "已终止所有语义token进程": "Tous les processus de jetons sémantiques ont été arrêtés", + "按中文句号。切": "Couper selon les points en chinois.", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Outil de découpage de texte. Un texte trop long peut ne pas donner un bon résultat, donc il est recommandé de le couper d'abord s'il est trop long. La synthèse se fera en séparant le texte par les sauts de ligne puis en les assemblant.", + "文本进程执行中": "Processus de texte en cours", + "文本进程结束": "Processus de texte terminé", + "日文": "Japonais", + "英文": "Anglais", + "语义token提取进程执行中": "Processus d'extraction de jetons sémantiques en cours", + "语义token提取进程结束": "Processus d'extraction de jetons sémantiques terminé", + "请上传参考音频": "Veuillez télécharger l'audio de référence", + "输入路径不存在": "Le chemin d'entrée n'existe pas", + "输入路径存在但既不是文件也不是文件夹": "Le chemin d'entrée existe mais n'est ni un fichier ni un dossier", + "输出的语音": "Audio de sortie", + "进度:1a-done": "Progression : 1a-done", + "进度:1a-done, 1b-ing": "Progression : 1a-done, 1b-ing", + "进度:1a-ing": "Progression : 1a-ing", + "进度:1a1b-done": "Progression : 1a1b-done", + "进度:1a1b-done, 1cing": "Progression : 1a1b-done, 1cing", + "进度:all-done": "Progression : all-done", + "需要合成的切分前文本": "Texte préalable à la synthèse", + "需要合成的文本": "Texte à synthétiser", + "需要合成的语种": "Langue de synthèse requise", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >= 3, utilisez le résultat de la reconnaissance de hauteur de récolte avec un filtre médian, la valeur est le rayon du filtre, son utilisation peut atténuer les sons sourds", + "A模型权重": "Poids du modèle A", + "A模型路径": "Chemin du modèle A", + "B模型路径": "Chemin du modèle B", "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :", - "Index Rate": "Taux d'indexation", - "Onnx导出": "Exporter en ONNX", - "Onnx输出路径": "Chemin d'exportation ONNX :", - "RVC模型路径": "Chemin du modèle RVC :", - "ckpt处理": "Traitement des fichiers .ckpt", - "harvest进程数": "Nombre de processus CPU utilisés pour l'algorithme de reconnaissance de la hauteur (pitch) dans le cadre de la récolte (harvest).", - "index文件路径不可包含中文": "Le chemin du fichier d'index ne doit pas contenir de caractères chinois.", - "pth文件路径不可包含中文": "Le chemin du fichier .pth ne doit pas contenir de caractères chinois.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte RMVPE : séparez les index GPU par des tirets \"-\", par exemple, 0-0-1 pour utiliser 2 processus sur GPU0 et 1 processus sur GPU1.", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration expérimentale. Les données expérimentales sont stockées dans le dossier 'logs', avec chaque expérience ayant un dossier distinct. Entrez manuellement le chemin du nom de l'expérience, qui contient la configuration expérimentale, les journaux et les fichiers de modèle entraînés.", - "step1:正在处理数据": "Étape 1 : Traitement des données en cours.", - "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur et extraction des caractéristiques en cours.", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers du dossier d'entraînement qui peuvent être décodés en fichiers audio et réalisation d'une normalisation par tranches. Génère 2 dossiers wav dans le répertoire de l'expérience. Actuellement, seule la formation avec un seul chanteur/locuteur est prise en charge.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Utilisez le CPU pour extraire la hauteur (si le modèle le permet), utilisez le GPU pour extraire les caractéristiques (sélectionnez l'index du GPU) :", - "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et démarrez l'entraînement du modèle ainsi que l'indexation.", - "step3a:正在训练模型": "Étape 3a : L'entraînement du modèle a commencé.", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0, optionnel, une ligne par hauteur de ton, remplace F0 et la hauteur de ton par défaut", + "Index Rate": "Taux d'index", + "Onnx导出": "Exportation Onnx", + "Onnx输出路径": "Chemin d'exportation Onnx", + "RVC模型路径": "Chemin du modèle RVC", + "ckpt处理": "Traitement des points de contrôle", + "harvest进程数": "Nombre de processus de récolte", + "index文件路径不可包含中文": "Le chemin du fichier d'index ne peut pas contenir de caractères chinois", + "pth文件路径不可包含中文": "Le chemin du fichier pth ne peut pas contenir de caractères chinois", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte rmvpe : séparez les numéros de carte utilisés en entrée par des tirets, par exemple 0-0-1 signifie 2 processus sur la carte 0 et 1 processus sur la carte 1", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration de l'expérience. Les données de l'expérience sont stockées dans le dossier logs, chaque expérience a son propre dossier. Vous devez entrer manuellement le chemin du nom de l'expérience, qui contient la configuration de l'expérience, les journaux et les fichiers de modèle entraînés.", + "step1:正在处理数据": "Étape 1 : Traitement des données en cours", + "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur tonale et des caractéristiques en cours", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers décodables en audio dans le dossier d'entraînement et normalisation par découpage. Deux dossiers wav sont générés dans le répertoire de l'expérience. Actuellement, seule la formation individuelle est prise en charge.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Extraction de la hauteur tonale avec le CPU (si le modèle a une hauteur tonale) et extraction des caractéristiques avec le GPU (choisissez le numéro de la carte)", + "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et commencez l'entraînement du modèle et de l'index", + "step3a:正在训练模型": "Étape 3a : Entraînement du modèle en cours", "一键训练": "Entraînement en un clic", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Il est également possible d'importer plusieurs fichiers audio. Si un chemin de dossier existe, cette entrée est ignorée.", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.
Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).
Le modèle est divisé en trois catégories :
1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.
2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.
3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :
  (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.
  (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.
Notes sur la suppression de la réverbération et du délai :
1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.
2. Le modèle MDX-Net-Dereverb est assez lent.
3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :", - "伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "Nom de sauvegarde :", - "保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :", - "保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :", - "保存频率save_every_epoch": "Fréquence de sauvegarde (save_every_epoch) :", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes sourdes et les bruits de respiration pour éviter les artefacts tels que le déchirement dans la musique électronique. Réglez à 0,5 pour désactiver. Diminuez la valeur pour renforcer la protection, mais cela peut réduire la précision de l'indexation :", + "也可批量输入音频文件, 二选一, 优先读文件夹": "Également possible d'entrer en lot des fichiers audio, au choix, privilégiez la lecture du dossier", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numéros de carte utilisés en entrée séparés par des tirets, par exemple 0-1-2 Utilisez les cartes 0, 1 et 2", + "伴奏人声分离&去混响&去回声": "Séparation de la voix et de l'accompagnement, suppression de la réverbération et de l'écho", + "使用模型采样率": "Taux d'échantillonnage du modèle", + "使用设备采样率": "Taux d'échantillonnage de l'appareil", + "保存名": "Nom de sauvegarde", + "保存的文件名, 默认空为和源文件同名": "Nom de fichier sauvegardé, par défaut vide pour avoir le même nom que le fichier source", + "保存的模型名不带后缀": "Nom du modèle sauvegardé sans suffixe", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes claires et les sons de respiration, éviter les artefacts tels que le déchirement du son électronique, tirer à 0.5 pour désactiver, diminuer pour augmenter la protection mais cela peut réduire l'efficacité de l'indexation", "修改": "Modifier", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pris en charge pour les petits fichiers de modèle extraits du dossier 'weights')", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)", "停止音频转换": "Arrêter la conversion audio", - "全流程结束!": "Toutes les étapes ont été terminées !", - "刷新音色列表和索引路径": "Actualiser la liste des voix et le vers l'index.", - "加载模型": "Charger le modèle.", - "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", - "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", - "单次推理": "单次推理", - "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", - "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", + "全流程结束!": "Processus complet terminé !", + "刷新音色列表和索引路径": "Actualiser la liste des timbres et les chemins d'index", + "加载模型": "Charger le modèle", + "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D", + "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G", + "单次推理": "Inférence unique", + "卸载音色省显存": "Décharger le timbre pour économiser la mémoire vidéo", + "变调(整数, 半音数量, 升八度12降八度-12)": "Changer la tonalité (entier, quantité de demi-tons, monter d'une octave 12, descendre d'une octave -12)", + "后处理重采样至最终采样率,0为不进行重采样": "Re-échantillonnage en post-traitement à la fréquence d'échantillonnage finale, 0 pour ne pas effectuer de re-échantillonnage", "否": "Non", - "启用相位声码器": "启用相位声码器", + "启用相位声码器": "Activer le codeur de phase", "响应阈值": "Seuil de réponse", "响度因子": "Facteur de volume sonore", - "处理数据": "Traitement des données", - "导出Onnx模型": "Exporter le modèle au format ONNX.", - "导出文件格式": "Format de fichier d'exportation", - "常见问题解答": "FAQ (Foire Aux Questions)", + "处理数据": "Traiter les données", + "导出Onnx模型": "Exporter le modèle Onnx", + "导出文件格式": "Format d'exportation du fichier", + "常见问题解答": "Questions fréquemment posées", "常规设置": "Paramètres généraux", - "开始音频转换": "Démarrer la conversion audio.", - "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", + "开始音频转换": "Démarrer la conversion audio", "性能设置": "Paramètres de performance", - "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", - "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", - "指定输出文件夹": "Spécifiez le dossier de sortie :", - "指定输出非主人声文件夹": "Spécifiez le dossier de sortie pour l'accompagnement :", + "批量推理": "Inférence en lot", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot, entrez le dossier audio à convertir, ou téléchargez plusieurs fichiers audio, les fichiers convertis seront enregistrés dans le dossier spécifié (opt par défaut).", + "指定输出主人声文件夹": "Spécifier le dossier de sortie pour la voix principale", + "指定输出文件夹": "Spécifier le dossier de sortie", + "指定输出非主人声文件夹": "Spécifier le dossier de sortie pour la non-voix principale", "推理时间(ms):": "Temps d'inférence (ms) :", - "推理音色": "Voix pour l'inférence", + "推理音色": "Timbre d'inférence", "提取": "Extraire", - "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour l'extraction de la hauteur et le traitement des données :", + "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour extraire la hauteur tonale et traiter les données", "是": "Oui", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Enregistrer uniquement le dernier fichier '.ckpt' pour économiser de l'espace disque :", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Enregistrer un petit modèle final dans le dossier 'weights' à chaque point de sauvegarde :", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache tous les ensembles d'entrainement dans la mémoire GPU. Mettre en cache de petits ensembles de données (moins de 10 minutes) peut accélérer l'entrainement, mais mettre en cache de grands ensembles de données consommera beaucoup de mémoire GPU et peut ne pas apporter beaucoup d'amélioration de vitesse :", - "显卡信息": "Informations sur la carte graphique (GPU)", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs qui utilisent le logiciel et distribuent les sons exportés par le logiciel en sont entièrement responsables.
Si vous n'acceptez pas cette clause, vous ne pouvez pas utiliser ou faire référence à aucun code ni fichier contenu dans le package logiciel. Consultez le fichier Agreement-LICENSE.txt dans le répertoire racine pour plus de détails.", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache ou non tous les ensembles d'entraînement dans la mémoire vidéo. Pour les petites données de moins de 10 minutes, la mise en cache peut accélérer l'entraînement, mais pour les grandes données, la mise en cache peut épuiser la mémoire vidéo sans améliorer considérablement la vitesse.", "查看": "Voir", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Afficher les informations sur le modèle (uniquement pour les petits fichiers de modèle extraits du dossier \"weights\")", - "检索特征占比": "Rapport de recherche de caractéristiques (contrôle l'intensité de l'accent, un rapport trop élevé provoque des artefacts) :", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Voir les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)", + "检索特征占比": "Pourcentage des caractéristiques extraites", "模型": "Modèle", "模型推理": "Inférence du modèle", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin d'accès au modèle du grand fichier dans le dossier \"logs\"). Cette fonction est utile si vous souhaitez arrêter l'entrainement à mi-chemin et extraire et enregistrer manuellement un petit fichier de modèle, ou si vous souhaitez tester un modèle intermédiaire :", - "模型是否带音高指导": "Indique si le modèle dispose d'un guidage en hauteur :", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Indique si le modèle dispose d'un système de guidage de la hauteur (obligatoire pour le chant, facultatif pour la parole) :", - "模型是否带音高指导,1是0否": "Le modèle dispose-t-il d'un guide de hauteur (1 : oui, 0 : non) ?", - "模型版本型号": "Version de l'architecture du modèle :", - "模型融合, 可用于测试音色融合": "Fusion de modèles, peut être utilisée pour tester la fusion de timbres", - "模型路径": "Le chemin vers le modèle :", - "每张显卡的batch_size": "Taille du batch par GPU :", - "淡入淡出长度": "Longueur de la transition", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin du modèle volumineux sous le dossier logs), utilisé lorsque l'entraînement est à mi-chemin, que vous ne voulez pas continuer l'entraînement, que le modèle n'a pas été automatiquement extrait et sauvegardé en tant que petit fichier, ou que vous souhaitez tester le modèle intermédiaire.", + "模型是否带音高指导": "Le modèle inclut-il un guidage en hauteur tonale", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Le modèle inclut-il un guidage en hauteur tonale (nécessaire pour le chant, facultatif pour la parole)", + "模型是否带音高指导,1是0否": "Le modèle inclut-il un guidage en hauteur tonale, 1 pour oui, 0 pour non", + "模型版本型号": "Numéro de version du modèle", + "模型融合, 可用于测试音色融合": "Fusion de modèles, utilisée pour tester la fusion des timbres", + "模型路径": "Chemin du modèle", + "淡入淡出长度": "Longueur du fondu enchaîné", "版本": "Version", "特征提取": "Extraction des caractéristiques", - "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.", - "目标采样率": "Taux d'échantillonnage cible :", - "算法延迟(ms):": "Délais algorithmiques (ms):", - "自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :", + "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin du fichier de bibliothèque de recherche de caractéristiques, laisser vide pour utiliser le résultat de la liste déroulante", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommandation pour la transformation homme vers femme +12 clés, femme vers homme -12 clés, ajustez vous-même si l'étendue du son explose et provoque une distorsion de la voix.", + "目标采样率": "Taux d'échantillonnage cible", + "算法延迟(ms):": "Retard de l'algorithme (ms):", + "自动检测index路径,下拉式选择(dropdown)": "Détection automatique du chemin de l'index, choix dans la liste déroulante", "融合": "Fusion", - "要改的模型信息": "Informations sur le modèle à modifier :", - "要置入的模型信息": "Informations sur le modèle à placer :", - "训练": "Entraîner", + "要改的模型信息": "Informations du modèle à modifier", + "要置入的模型信息": "Informations du modèle à insérer", + "训练": "Entraînement", "训练模型": "Entraîner le modèle", "训练特征索引": "Entraîner l'index des caractéristiques", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé. Vous pouvez consulter les rapports d'entraînement dans la console ou dans le fichier 'train.log' situé dans le dossier de l'expérience.", - "请指定说话人id": "Veuillez spécifier l'ID de l'orateur ou du chanteur :", - "请选择index文件": "Veuillez sélectionner le fichier d'index", - "请选择pth文件": "Veuillez sélectionner le fichier pth", - "请选择说话人id": "Sélectionner l'ID de l'orateur ou du chanteur :", - "转换": "Convertir", - "输入实验名": "Saisissez le nom de l'expérience :", - "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter :", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :", - "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :", - "输入监听": "Moniteur vocal d'entrée", - "输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :", - "输入设备": "Dispositif d'entrée", - "输入降噪": "Réduction du bruit d'entrée", - "输出信息": "Informations sur la sortie", - "输出变声": "Sortie voix convertie", - "输出设备": "Dispositif de sortie", - "输出降噪": "Réduction du bruit de sortie", - "输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)", - "选择.index文件": "Sélectionner le fichier .index", - "选择.pth文件": "Sélectionner le fichier .pth", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.", - "采样率:": "采样率:", - "采样长度": "Longueur de l'échantillon", - "重载设备列表": "Recharger la liste des dispositifs", - "音调设置": "Réglages de la hauteur", - "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", - "音高算法": "algorithme de détection de la hauteur", - "额外推理时长": "Temps d'inférence supplémentaire" -} + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé, vous pouvez consulter les journaux d'entraînement de la console ou le fichier train.log dans le dossier d'expérience", + "请指定说话人id": "Veuillez spécifier l'ID du locuteur", + "请选择index文件": "Veuillez choisir le fichier d'index", + "请选择pth文件": "Veuillez choisir le fichier pth", + "请选择说话人id": "Veuillez choisir l'ID du locuteur", + "转换": "Conversion", + "输入实验名": "Nom de l'expérience d'entrée", + "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers)", + "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin du fichier audio à traiter (par défaut, c'est un exemple de format correct)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Entrez le taux de fusion pour remplacer l'enveloppe de volume source par l'enveloppe de volume de sortie, plus proche de 1, plus l'enveloppe de sortie est utilisée", + "输入监听": "Entrée d'écoute", + "输入训练文件夹路径": "Entrez le chemin du dossier d'entraînement", + "输入设备": "Entrée de l'appareil", + "输入降噪": "Entrée de réduction du bruit", + "输出信息": "Sortie d'information", + "输出变声": "Sortie de la transformation de la voix", + "输出设备": "Sortie de l'appareil", + "输出降噪": "Sortie de réduction du bruit", + "输出音频(右下角三个点,点了可以下载)": "Sortie audio (trois points en bas à droite, cliquez pour télécharger)", + "选择.index文件": "Choisissez le fichier .index", + "选择.pth文件": "Choisissez le fichier .pth", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU, rmvpe a le meilleur effet et utilise légèrement le GPU", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Choisissez l'algorithme d'extraction de hauteur tonale : utilisez pm pour accélérer l'entrée de la voix, une voix de haute qualité mais nécessite une meilleure CPU ; utilisez dio pour accélérer, harvest a une meilleure qualité mais est lent, rmvpe a le meilleur effet et utilise légèrement la CPU/GPU", + "采样率:": "Taux d'échantillonnage:", + "采样长度": "Longueur d'échantillonnage", + "重载设备列表": "Recharger la liste des appareils", + "音调设置": "Paramètres de tonalité", + "音频设备(请使用同种类驱动)": "Appareil audio (veuillez utiliser un pilote de même type)", + "音高算法": "Algorithme de hauteur tonale", + "额外推理时长": "Durée d'inférence supplémentaire" + } From 3a3174ad5aa3e9367f608bb810161e6803818b8e Mon Sep 17 00:00:00 2001 From: bfloat16 <38366253+bfloat16@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:10:27 +0800 Subject: [PATCH 20/25] Remove redundant judgments --- webui.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/webui.py b/webui.py index 4461056b..4b896ff0 100644 --- a/webui.py +++ b/webui.py @@ -51,17 +51,13 @@ n_cpu=cpu_count() ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] -if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) - if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L"]): - # A10#A100#V100#A40#P40#M40#K80#A4500 - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) -if if_gpu_ok and len(gpu_infos) > 0: + gpu_infos.append("%s\t%s" % (i, gpu_name)) + mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +if len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 else: From 08074e1fb3900deae7171b68661994012d555b55 Mon Sep 17 00:00:00 2001 From: bfloat16 <38366253+bfloat16@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:13:20 +0800 Subject: [PATCH 21/25] Update Windows Batch --- go-webui.bat | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go-webui.bat b/go-webui.bat index 968a25ce..be4135e4 100644 --- a/go-webui.bat +++ b/go-webui.bat @@ -1,2 +1,4 @@ -runtime\python.exe webui.py +@echo off +chcp 65001 +"%~dp0\runtime\python.exe" "%~dp0\webui.py" pause \ No newline at end of file From 18349b20fb3f74cf09c16c661ceec8c36640901f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:21:03 +0800 Subject: [PATCH 22/25] Update webui.py --- webui.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index 4b896ff0..70bf5f0e 100644 --- a/webui.py +++ b/webui.py @@ -51,13 +51,17 @@ n_cpu=cpu_count() ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] +if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) -if len(gpu_infos) > 0: + if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L","4060"]): + # A10#A100#V100#A40#P40#M40#K80#A4500 + if_gpu_ok = True # 至少有一张能用的N卡 + gpu_infos.append("%s\t%s" % (i, gpu_name)) + mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 else: From 56fef8a59c3b871936002c05cca678edaf5d39da Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:27:47 +0800 Subject: [PATCH 23/25] Update 1-get-text.py --- GPT_SoVITS/prepare_datasets/1-get-text.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index b4a145cb..9499db4a 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -41,12 +41,18 @@ def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path shutil.move(tmp_path, "%s/%s" % (dir, name)) + txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) if os.path.exists(txt_path) == False: bert_dir = "%s/3-bert" % (opt_dir) os.makedirs(opt_dir, exist_ok=True) os.makedirs(bert_dir, exist_ok=True) - device = "cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda:0" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) if is_half == True: From d796bd40b9e436eb374c08051680aeb1dc096f2f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:28:08 +0800 Subject: [PATCH 24/25] Update 2-get-hubert-wav32k.py --- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 31e80681..26c71b74 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -47,7 +47,12 @@ os.makedirs(wav32dir,exist_ok=True) maxx=0.95 alpha=0.5 -device="cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda:0" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" model=cnhubert.get_model() # is_half=False if(is_half==True): @@ -106,4 +111,4 @@ if(len(nan_fails)>0 and is_half==True): try: name2go(wav_name) except: - print(wav_name,traceback.format_exc()) \ No newline at end of file + print(wav_name,traceback.format_exc()) From 8e54a36f2c084705ff49b516687990f3c797c02e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:28:16 +0800 Subject: [PATCH 25/25] Update 3-get-semantic.py --- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 69eea07a..a3cf0a3d 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -38,7 +38,12 @@ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) if os.path.exists(semantic_path) == False: os.makedirs(opt_dir, exist_ok=True) - device = "cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" hps = utils.get_hparams_from_file(s2config_path) vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1,