This commit is contained in:
KakaruHayate 2024-03-21 11:18:31 +08:00
commit 386f3a5b7e
89 changed files with 258571 additions and 114816 deletions

8
.dockerignore Normal file
View File

@ -0,0 +1,8 @@
docs
logs
output
reference
SoVITS_weights
GPT_weights
TEMP
.git

8
.gitignore vendored
View File

@ -4,3 +4,11 @@ __pycache__
env
runtime
.idea
output
logs
reference
GPT_weights
SoVITS_weights
TEMP

5
Docker/download.py Normal file
View File

@ -0,0 +1,5 @@
# Download moda ASR related models
from modelscope import snapshot_download
model_dir = snapshot_download('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',revision="v2.0.4")
model_dir = snapshot_download('damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',revision="v2.0.4")
model_dir = snapshot_download('damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',revision="v2.0.4")

View File

@ -2,7 +2,7 @@
FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04
LABEL maintainer="breakstring@hotmail.com"
LABEL version="dev-20240123.03"
LABEL version="dev-20240209"
LABEL description="Docker image for GPT-SoVITS"
@ -11,43 +11,35 @@ ENV DEBIAN_FRONTEND=noninteractive
ENV TZ=Etc/UTC
RUN apt-get update && \
apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && \
rm -rf /var/lib/apt/lists/* && \
git lfs install
git lfs install && \
rm -rf /var/lib/apt/lists/*
# Copy application
# Copy only requirements.txt initially to leverage Docker cache
WORKDIR /workspace
COPY requirements.txt /workspace/
RUN pip install --no-cache-dir -r requirements.txt
# Define a build-time argument for image type
ARG IMAGE_TYPE=full
# Conditional logic based on the IMAGE_TYPE argument
# Always copy the Docker directory, but only use it if IMAGE_TYPE is not "elite"
COPY ./Docker /workspace/Docker
# elite 类型的镜像里面不包含额外的模型
RUN if [ "$IMAGE_TYPE" != "elite" ]; then \
chmod +x /workspace/Docker/download.sh && \
/workspace/Docker/download.sh && \
python /workspace/Docker/download.py && \
python -m nltk.downloader averaged_perceptron_tagger cmudict; \
fi
# Copy the rest of the application
COPY . /workspace
# Download models
RUN chmod +x /workspace/Docker/download.sh && /workspace/Docker/download.sh
# Copy the rest of the application
COPY . /workspace
# 本应该从 requirements.txt 里面安装package但是由于funasr和modelscope的问题暂时先在后面手工安装依赖包吧
RUN pip install --no-cache-dir torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba psutil PyYAML
# 这里强制指定了modelscope和funasr的版本后面damo_asr的模型让它们自己下载
RUN pip install --no-cache-dir modelscope~=1.10.0 torchaudio sentencepiece funasr~=0.8.7
# 先屏蔽掉,让容器里自己下载
# Clone damo_asr
#WORKDIR /workspace/tools/damo_asr/models
#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \
# (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull)
#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \
# (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull)
#RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \
# (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull)
#RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c"
#WORKDIR /workspace
EXPOSE 9870
EXPOSE 9871
EXPOSE 9872
EXPOSE 9873
EXPOSE 9874
VOLUME /workspace/output
VOLUME /workspace/logs
VOLUME /workspace/SoVITS_weights
EXPOSE 9871 9872 9873 9874 9880
CMD ["python", "webui.py"]

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/bucketsampler.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
# reference: https://github.com/lifeiteng/vall-e
import itertools
import math
import random

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/data_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
# reference: https://github.com/lifeiteng/vall-e
from pytorch_lightning import LightningDataModule
from AR.data.bucket_sampler import DistributedBucketSampler
from AR.data.dataset import Text2SemanticDataset
@ -41,7 +42,8 @@ class Text2SemanticDataModule(LightningDataModule):
# pad_val=self.config['data']['pad_val'])
def train_dataloader(self):
batch_size = self.config["train"]["batch_size"]
batch_size=self.config["train"]["batch_size"]//2 if self.config["train"].get("if_dpo",False)==True else self.config["train"]["batch_size"]
batch_size = max(min(batch_size,len(self._train_dataset)//4),1)#防止不保存
sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
return DataLoader(
self._train_dataset,

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/t2s_dataset.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
# reference: https://github.com/lifeiteng/vall-e
import pdb
import sys

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
# reference: https://github.com/lifeiteng/vall-e
import os, sys
now_dir = os.getcwd()
@ -11,7 +12,6 @@ from AR.models.t2s_model import Text2SemanticDecoder
from AR.modules.lr_schedulers import WarmupCosineLRSchedule
from AR.modules.optim import ScaledAdam
class Text2SemanticLightningModule(LightningModule):
def __init__(self, config, output_dir, is_train=True):
super().__init__()
@ -35,7 +35,8 @@ class Text2SemanticLightningModule(LightningModule):
def training_step(self, batch: Dict, batch_idx: int):
opt = self.optimizers()
scheduler = self.lr_schedulers()
loss, acc = self.model.forward(
forward=self.model.forward if self.config["train"].get("if_dpo",False)==True else self.model.forward_old
loss, acc = forward(
batch["phoneme_ids"],
batch["phoneme_ids_len"],
batch["semantic_ids"],

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
# reference: https://github.com/lifeiteng/vall-e
import os, sys
now_dir = os.getcwd()

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
# reference: https://github.com/lifeiteng/vall-e
import torch
from tqdm import tqdm
@ -8,6 +9,9 @@ from AR.models.utils import (
sample,
logits_to_probs,
multinomial_sample_one_no_sync,
dpo_loss,
make_reject_y,
get_batch_logps
)
from AR.modules.embedding import SinePositionalEmbedding
from AR.modules.embedding import TokenEmbedding
@ -85,11 +89,104 @@ class Text2SemanticDecoder(nn.Module):
ignore_index=self.EOS,
)
def make_input_data(self, x, x_lens, y, y_lens, bert_feature):
x = self.ar_text_embedding(x)
x = x + self.bert_proj(bert_feature.transpose(1, 2))
x = self.ar_text_position(x)
x_mask = make_pad_mask(x_lens)
y_mask = make_pad_mask(y_lens)
y_mask_int = y_mask.type(torch.int64)
codes = y.type(torch.int64) * (1 - y_mask_int)
# Training
# AR Decoder
y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
x_len = x_lens.max()
y_len = y_lens.max()
y_emb = self.ar_audio_embedding(y)
y_pos = self.ar_audio_position(y_emb)
xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
ar_xy_padding_mask = xy_padding_mask
x_attn_mask = F.pad(
torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
(0, y_len),
value=True,
)
y_attn_mask = F.pad(
torch.triu(
torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
diagonal=1,
),
(x_len, 0),
value=False,
)
xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
bsz, src_len = x.shape[0], x_len + y_len
_xy_padding_mask = (
ar_xy_padding_mask.view(bsz, 1, 1, src_len)
.expand(-1, self.num_head, -1, -1)
.reshape(bsz * self.num_head, 1, src_len)
)
xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
xy_attn_mask = new_attn_mask
# x 和完整的 y 一次性输入模型
xy_pos = torch.concat([x, y_pos], dim=1)
return xy_pos, xy_attn_mask, targets
def forward(self, x, x_lens, y, y_lens, bert_feature):
"""
x: phoneme_ids
y: semantic_ids
"""
reject_y, reject_y_lens = make_reject_y(y, y_lens)
xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature)
xy_dec, _ = self.h(
(xy_pos, None),
mask=xy_attn_mask,
)
x_len = x_lens.max()
logits = self.ar_predict_layer(xy_dec[:, x_len:])
###### DPO #############
reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data(x, x_lens, reject_y, reject_y_lens, bert_feature)
reject_xy_dec, _ = self.h(
(reject_xy_pos, None),
mask=reject_xy_attn_mask,
)
x_len = x_lens.max()
reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len:])
# loss
# from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum")
acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item()
A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets)
loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True)
loss = loss_1 + loss_2
return loss, acc
def forward_old(self, x, x_lens, y, y_lens, bert_feature):
"""
x: phoneme_ids
y: semantic_ids
"""
x = self.ar_text_embedding(x)
x = x + self.bert_proj(bert_feature.transpose(1, 2))
x = self.ar_text_position(x)
@ -231,6 +328,7 @@ class Text2SemanticDecoder(nn.Module):
prompts, ####参考音频token
bert_feature,
top_k: int = -100,
top_p: int = 100,
early_stop_num: int = -1,
temperature: float = 1.0,
):
@ -240,7 +338,7 @@ class Text2SemanticDecoder(nn.Module):
# AR Decoder
y = prompts
prefix_len = y.shape[1]
x_len = x.shape[1]
x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
stop = False
@ -256,23 +354,24 @@ class Text2SemanticDecoder(nn.Module):
"first_infer": 1,
"stage": 0,
}
for idx in tqdm(range(1500)):
if cache["first_infer"] == 1:
################### first step ##########################
if y is not None:
y_emb = self.ar_audio_embedding(y)
else:
y_emb = torch.cat(
[cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
)
cache["y_emb"] = y_emb
y_len = y_emb.shape[1]
prefix_len = y.shape[1]
y_pos = self.ar_audio_position(y_emb)
# x 和逐渐增长的 y 一起输入给模型
if cache["first_infer"] == 1:
xy_pos = torch.concat([x, y_pos], dim=1)
cache["y_emb"] = y_emb
ref_free = False
else:
xy_pos = y_pos[:, -1:]
y_len = y_pos.shape[1]
###以下3个不做缓存
if cache["first_infer"] == 1:
y_emb = None
y_len = 0
prefix_len = 0
y_pos = None
xy_pos = x
y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
ref_free = True
x_attn_mask_pad = F.pad(
x_attn_mask,
(0, y_len), ###xx的纯0扩展到xx纯0+xy纯1(x,x+y)
@ -284,19 +383,12 @@ class Text2SemanticDecoder(nn.Module):
value=False,
)
xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
y.device
x.device
)
else:
###最右边一列(是错的)
# xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device)
# xy_attn_mask[:,-1]=False
###最下面一行(是对的)
xy_attn_mask = torch.zeros(
(1, x_len + y_len), dtype=torch.bool, device=xy_pos.device
)
# pdb.set_trace()
###缓存重头戏
# print(1111,xy_pos.shape,xy_attn_mask.shape,x_len,y_len)
for idx in tqdm(range(1500)):
xy_dec, _ = self.h((xy_pos, None), mask=xy_attn_mask, cache=cache)
logits = self.ar_predict_layer(
xy_dec[:, -1]
@ -305,8 +397,12 @@ class Text2SemanticDecoder(nn.Module):
if(idx==0):###第一次跑不能EOS否则没有了
logits = logits[:, :-1] ###刨除1024终止符号的概率
samples = sample(
logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35
logits[0], y, top_k=top_k, top_p=top_p, repetition_penalty=1.35, temperature=temperature
)[0].unsqueeze(0)
# 本次生成的 semantic_ids 和之前的 y 构成新的 y
# print(samples.shape)#[1,1]#第一个1是bs
y = torch.concat([y, samples], dim=1)
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
print("use early stop num:", early_stop_num)
stop = True
@ -315,13 +411,38 @@ class Text2SemanticDecoder(nn.Module):
# print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
stop = True
if stop:
if prompts.shape[1] == y.shape[1]:
# if prompts.shape[1] == y.shape[1]:
# y = torch.concat([y, torch.zeros_like(samples)], dim=1)
# print("bad zero prediction")
if y.shape[1]==0:
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
print("bad zero prediction")
print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
break
# 本次生成的 semantic_ids 和之前的 y 构成新的 y
# print(samples.shape)#[1,1]#第一个1是bs
y = torch.concat([y, samples], dim=1)
####################### update next step ###################################
cache["first_infer"] = 0
return y, idx
if cache["y_emb"] is not None:
y_emb = torch.cat(
[cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], dim = 1
)
cache["y_emb"] = y_emb
y_pos = self.ar_audio_position(y_emb)
xy_pos = y_pos[:, -1:]
else:
y_emb = self.ar_audio_embedding(y[:, -1:])
cache["y_emb"] = y_emb
y_pos = self.ar_audio_position(y_emb)
xy_pos = y_pos
y_len = y_pos.shape[1]
###最右边一列(是错的)
# xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device)
# xy_attn_mask[:,-1]=False
###最下面一行(是对的)
xy_attn_mask = torch.zeros(
(1, x_len + y_len), dtype=torch.bool, device=xy_pos.device
)
if ref_free:
return y[:, :-1], 0
return y[:, :-1], idx-1

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
# reference: https://github.com/lifeiteng/vall-e
import torch
from tqdm import tqdm
@ -57,7 +58,7 @@ def logits_to_probs(
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
v, _ = torch.topk(logits, top_k)
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, inf_tensor_value, logits)

View File

@ -1,7 +1,8 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/utils.py\
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
# reference: https://github.com/lifeiteng/vall-e
import torch
import torch.nn.functional as F
from typing import Tuple
def sequence_mask(length, max_length=None):
if max_length is None:
@ -114,6 +115,7 @@ def logits_to_probs(
top_p: Optional[int] = None,
repetition_penalty: float = 1.0,
):
if previous_tokens is not None:
previous_tokens = previous_tokens.squeeze()
# print(logits.shape,previous_tokens.shape)
# pdb.set_trace()
@ -158,3 +160,70 @@ def sample(
)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
def dpo_loss(policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
beta: float,
reference_free: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
pi_logratios = policy_chosen_logps - policy_rejected_logps
ref_logratios = reference_chosen_logps - reference_rejected_logps
if reference_free:
ref_logratios = 0
logits = pi_logratios - ref_logratios
losses = -F.logsigmoid(beta * logits)
chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses.mean(), chosen_rewards, rejected_rewards
def get_batch_logps(logits_target: torch.FloatTensor, logits_reject: torch.FloatTensor, labels_target: torch.LongTensor, labels_reject: torch.LongTensor, average_log_prob: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
# dummy token; we'll ignore the losses on these tokens later
per_token_logps_target = torch.gather(logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)).squeeze(2)
per_token_logps_reject = torch.gather(logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)).squeeze(2)
return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
def make_reject_y(y_o, y_lens):
def repeat_P(y):
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
pre = y[:range_idx[0]]
shf = y[range_idx[1]:]
range_text = y[range_idx[0]:range_idx[1]]
new_y = torch.cat([pre, range_text, range_text, shf])
return new_y
def lost_P(y):
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
pre = y[:range_idx[0]]
shf = y[range_idx[1]:]
range_text = y[range_idx[0]:range_idx[1]]
new_y = torch.cat([pre, shf])
return new_y
bs = len(y_lens)
reject_y = []
reject_y_lens = []
for b in range(bs):
process_item_idx = torch.randint(0, 1, size=(1, ))[0]
if process_item_idx == 0:
new_y = repeat_P(y_o[b])
reject_y.append(new_y)
reject_y_lens.append(len(new_y))
elif process_item_idx==1:
new_y = lost_P(y_o[b])
reject_y.append(new_y)
reject_y_lens.append(len(new_y))
max_length = max(reject_y_lens)
for b in range(bs):
pad_length = max_length - reject_y_lens[b]
reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
reject_y = torch.stack(reject_y, dim = 0)
reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
return reject_y, reject_y_lens

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/lr_schedulers.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
# reference: https://github.com/lifeiteng/vall-e
import math
import torch

View File

@ -5,8 +5,8 @@ from torch.nn.functional import (
_none_or_dtype,
_in_projection_packed,
)
# import torch
from torch.nn import functional as F
import torch
# Tensor = torch.Tensor
# from typing import Callable, List, Optional, Tuple, Union
@ -448,9 +448,11 @@ def multi_head_attention_forward_patched(
k = k.view(bsz, num_heads, src_len, head_dim)
v = v.view(bsz, num_heads, src_len, head_dim)
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
attn_output = scaled_dot_product_attention(
q, k, v, attn_mask, dropout_p, is_causal
)
attn_output = (
attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
)

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/text_processing/phonemizer.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
# reference: https://github.com/lifeiteng/vall-e
import itertools
import re
from typing import Dict

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/text_processing/symbols.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
# reference: https://github.com/lifeiteng/vall-e
PAD = "_"
PUNCTUATION = ';:,.!?¡¿—…"«»“” '
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"

340
GPT_SoVITS/inference_gui.py Normal file
View File

@ -0,0 +1,340 @@
import sys
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox
import soundfile as sf
from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
class GPTSoVITSGUI(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle('GPT-SoVITS GUI')
self.setGeometry(800, 450, 950, 850)
self.setStyleSheet("""
QWidget {
background-color: #a3d3b1;
}
QTabWidget::pane {
background-color: #a3d3b1;
}
QTabWidget::tab-bar {
alignment: left;
}
QTabBar::tab {
background: #8da4bf;
color: #ffffff;
padding: 8px;
}
QTabBar::tab:selected {
background: #2a3f54;
}
QLabel {
color: #000000;
}
QPushButton {
background-color: #4CAF50;
color: white;
padding: 8px;
border: 1px solid #4CAF50;
border-radius: 4px;
}
QPushButton:hover {
background-color: #45a049;
border: 1px solid #45a049;
box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1);
}
""")
license_text = (
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. "
"如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
license_label = QLabel(license_text)
license_label.setWordWrap(True)
self.GPT_model_label = QLabel("选择GPT模型:")
self.GPT_model_input = QLineEdit()
self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
self.GPT_model_input.setReadOnly(True)
self.GPT_model_button = QPushButton("选择GPT模型文件")
self.GPT_model_button.clicked.connect(self.select_GPT_model)
self.SoVITS_model_label = QLabel("选择SoVITS模型:")
self.SoVITS_model_input = QLineEdit()
self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
self.SoVITS_model_input.setReadOnly(True)
self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
self.ref_audio_label = QLabel("上传参考音频:")
self.ref_audio_input = QLineEdit()
self.ref_audio_input.setPlaceholderText("拖拽或选择文件")
self.ref_audio_input.setReadOnly(True)
self.ref_audio_button = QPushButton("选择音频文件")
self.ref_audio_button.clicked.connect(self.select_ref_audio)
self.ref_text_label = QLabel("参考音频文本:")
self.ref_text_input = QLineEdit()
self.ref_text_input.setPlaceholderText("拖拽或选择文件")
self.ref_text_input.setReadOnly(True)
self.ref_text_button = QPushButton("上传文本")
self.ref_text_button.clicked.connect(self.upload_ref_text)
self.language_label = QLabel("参考音频语言:")
self.language_combobox = QComboBox()
self.language_combobox.addItems(["中文", "英文", "日文"])
self.target_text_label = QLabel("合成目标文本:")
self.target_text_input = QLineEdit()
self.target_text_input.setPlaceholderText("拖拽或选择文件")
self.target_text_input.setReadOnly(True)
self.target_text_button = QPushButton("上传文本")
self.target_text_button.clicked.connect(self.upload_target_text)
self.language_label_02 = QLabel("合成音频语言:")
self.language_combobox_02 = QComboBox()
self.language_combobox_02.addItems(["中文", "英文", "日文"])
self.output_label = QLabel("输出音频路径:")
self.output_input = QLineEdit()
self.output_input.setPlaceholderText("拖拽或选择文件")
self.output_input.setReadOnly(True)
self.output_button = QPushButton("选择文件夹")
self.output_button.clicked.connect(self.select_output_path)
self.output_text = QTextEdit()
self.output_text.setReadOnly(True)
self.add_drag_drop_events([
self.GPT_model_input,
self.SoVITS_model_input,
self.ref_audio_input,
self.ref_text_input,
self.target_text_input,
self.output_input,
])
self.synthesize_button = QPushButton("合成")
self.synthesize_button.clicked.connect(self.synthesize)
self.clear_output_button = QPushButton("清空输出")
self.clear_output_button.clicked.connect(self.clear_output)
self.status_bar = QStatusBar()
main_layout = QVBoxLayout()
input_layout = QGridLayout()
input_layout.setSpacing(10)
self.setLayout(input_layout)
input_layout.addWidget(license_label, 0, 0, 1, 3)
input_layout.addWidget(self.GPT_model_label, 1, 0)
input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2)
input_layout.addWidget(self.GPT_model_button, 2, 2)
input_layout.addWidget(self.SoVITS_model_label, 3, 0)
input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2)
input_layout.addWidget(self.SoVITS_model_button, 4, 2)
input_layout.addWidget(self.ref_audio_label, 5, 0)
input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
input_layout.addWidget(self.ref_audio_button, 6, 2)
input_layout.addWidget(self.language_label, 7, 0)
input_layout.addWidget(self.language_combobox, 8, 0, 1, 1)
input_layout.addWidget(self.ref_text_label, 9, 0)
input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
input_layout.addWidget(self.ref_text_button, 10, 2)
input_layout.addWidget(self.language_label_02, 11, 0)
input_layout.addWidget(self.language_combobox_02, 12, 0, 1, 1)
input_layout.addWidget(self.target_text_label, 13, 0)
input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
input_layout.addWidget(self.target_text_button, 14, 2)
input_layout.addWidget(self.output_label, 15, 0)
input_layout.addWidget(self.output_input, 16, 0, 1, 2)
input_layout.addWidget(self.output_button, 16, 2)
main_layout.addLayout(input_layout)
output_layout = QVBoxLayout()
output_layout.addWidget(self.output_text)
main_layout.addLayout(output_layout)
main_layout.addWidget(self.synthesize_button)
main_layout.addWidget(self.clear_output_button)
main_layout.addWidget(self.status_bar)
self.central_widget = QWidget()
self.central_widget.setLayout(main_layout)
self.setCentralWidget(self.central_widget)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.acceptProposedAction()
def dropEvent(self, event):
if event.mimeData().hasUrls():
file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
def add_drag_drop_events(self, widgets):
for widget in widgets:
widget.setAcceptDrops(True)
widget.installEventFilter(self)
def eventFilter(self, obj, event):
if event.type() == QEvent.DragEnter:
mime_data = event.mimeData()
if mime_data.hasUrls():
event.acceptProposedAction()
elif event.type() == QEvent.Drop:
mime_data = event.mimeData()
if mime_data.hasUrls():
file_paths = [url.toLocalFile() for url in mime_data.urls()]
if len(file_paths) == 1:
self.update_input_paths(obj, file_paths[0])
else:
self.update_input_paths(obj, ", ".join(file_paths))
event.acceptProposedAction()
return super().eventFilter(obj, event)
def select_GPT_model(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)")
if file_path:
self.GPT_model_input.setText(file_path)
def select_SoVITS_model(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)")
if file_path:
self.SoVITS_model_input.setText(file_path)
def select_ref_audio(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.ShowDirsOnly
file_dialog = QFileDialog()
file_dialog.setOptions(options)
file_dialog.setFileMode(QFileDialog.AnyFile)
file_dialog.setNameFilter("Audio Files (*.wav *.mp3)")
if file_dialog.exec_():
file_paths = file_dialog.selectedFiles()
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
def upload_ref_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
if file_path:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.ref_text_input.setText(content)
self.update_input_paths(self.ref_text_input, file_path)
def upload_target_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
if file_path:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.target_text_input.setText(content)
self.update_input_paths(self.target_text_input, file_path)
def select_output_path(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.ShowDirsOnly
folder_dialog = QFileDialog()
folder_dialog.setOptions(options)
folder_dialog.setFileMode(QFileDialog.Directory)
if folder_dialog.exec_():
folder_path = folder_dialog.selectedFiles()[0]
self.output_input.setText(folder_path)
def update_ref_audio(self, file_path):
self.ref_audio_input.setText(file_path)
def update_input_paths(self, input_box, file_path):
input_box.setText(file_path)
def clear_output(self):
self.output_text.clear()
def synthesize(self):
GPT_model_path = self.GPT_model_input.text()
SoVITS_model_path = self.SoVITS_model_input.text()
ref_audio_path = self.ref_audio_input.text()
language_combobox = self.language_combobox.currentText()
language_combobox = i18n(language_combobox)
ref_text = self.ref_text_input.text()
language_combobox_02 = self.language_combobox_02.currentText()
language_combobox_02 = i18n(language_combobox_02)
target_text = self.target_text_input.text()
output_path = self.output_input.text()
change_gpt_weights(gpt_path=GPT_model_path)
change_sovits_weights(sovits_path=SoVITS_model_path)
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
prompt_text=ref_text,
prompt_language=language_combobox,
text=target_text,
text_language=language_combobox_02)
result_list = list(synthesis_result)
if result_list:
last_sampling_rate, last_audio_data = result_list[-1]
output_wav_path = os.path.join(output_path, "output.wav")
sf.write(output_wav_path, last_audio_data, last_sampling_rate)
result = "Audio saved to " + output_wav_path
self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
self.output_text.append("处理结果:\n" + result)
def main():
app = QApplication(sys.argv)
mainWin = GPTSoVITSGUI()
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()

View File

@ -188,38 +188,27 @@ class MultiHeadAttention(nn.Module):
query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(
query / math.sqrt(self.k_channels), key_relative_embeddings
)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
block_mask = (
torch.ones_like(scores)
.triu(-self.block_length)
.tril(self.block_length)
)
scores = scores.masked_fill(block_mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(
self.emb_rel_v, t_s
)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings
)
output = (
output.transpose(2, 3).contiguous().view(b, d, -1)
)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = (output.transpose(2, 3).contiguous().view(b, d, -1))
return output, p_attn
def _matmul_with_relative_values(self, x, y):
@ -243,16 +232,16 @@ class MultiHeadAttention(nn.Module):
def _get_relative_embeddings(self, relative_embeddings, length):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
pad_l = torch.zeros((1), dtype = torch.int64) + length - (self.window_size + 1)
pad_s = torch.zeros((1), dtype = torch.int64) + (self.window_size + 1) - length
pad_length = torch.max(pad_l, other=torch.zeros((1), dtype = torch.int64))
slice_start_position = torch.max(pad_s, other=torch.zeros((1), dtype = torch.int64))
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
)
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[
:, slice_start_position:slice_end_position
]

View File

@ -228,6 +228,7 @@ class TextEncoder(nn.Module):
)
y = self.ssl_proj(y * y_mask) * y_mask
y = self.encoder_ssl(y * y_mask, y_mask)
text_mask = torch.unsqueeze(
@ -892,6 +893,7 @@ class SynthesizerTrn(nn.Module):
if freeze_quantizer:
self.ssl_proj.requires_grad_(False)
self.quantizer.requires_grad_(False)
#self.quantizer.eval()
# self.enc_p.text_embedding.requires_grad_(False)
# self.enc_p.encoder_text.requires_grad_(False)
# self.enc_p.mrte.requires_grad_(False)
@ -958,6 +960,8 @@ class SynthesizerTrn(nn.Module):
@torch.no_grad()
def decode(self, codes, text, refer, noise_scale=0.5):
ge = None
if refer is not None:
refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device)
refer_mask = torch.unsqueeze(
commons.sequence_mask(refer_lengths, refer.size(2)), 1

View File

@ -896,9 +896,6 @@ class SynthesizerTrn(nn.Module):
refer_mask = torch.ones_like(refer[:1,:1,:])
ge = self.ref_enc(refer * refer_mask, refer_mask)
y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device)
text_lengths = torch.LongTensor([text.size(-1)]).to(text.device)
quantized = self.quantizer.decode(codes)
if self.semantic_frame_rate == "25hz":
dquantized = torch.cat([quantized, quantized]).permute(1, 2, 0)
@ -907,6 +904,7 @@ class SynthesizerTrn(nn.Module):
x, m_p, logs_p, y_mask = self.enc_p(
quantized, text, ge
)
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p)
z = self.flow(z_p, y_mask, g=ge, reverse=True)

View File

@ -140,6 +140,7 @@ class T2SModel(nn.Module):
)
onnx_encoder_export_output.save(f"onnx/{project_name}/{project_name}_t2s_encoder.onnx")
return
torch.onnx.export(
self.onnx_encoder,
(ref_seq, text_seq, ref_bert, text_bert, ssl_content),
@ -147,16 +148,16 @@ class T2SModel(nn.Module):
input_names=["ref_seq", "text_seq", "ref_bert", "text_bert", "ssl_content"],
output_names=["x", "prompts"],
dynamic_axes={
"ref_seq": [1],
"text_seq": [1],
"ref_bert": [0],
"text_bert": [0],
"ssl_content": [2],
"ref_seq": {1 : "ref_length"},
"text_seq": {1 : "text_length"},
"ref_bert": {0 : "ref_length"},
"text_bert": {0 : "text_length"},
"ssl_content": {2 : "ssl_length"},
},
opset_version=16
)
x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content)
torch.exp
torch.onnx.export(
self.first_stage_decoder,
(x, prompts),
@ -164,10 +165,10 @@ class T2SModel(nn.Module):
input_names=["x", "prompts"],
output_names=["y", "k", "v", "y_emb", "x_example"],
dynamic_axes={
"x": [1],
"prompts": [1],
"x": {1 : "x_length"},
"prompts": {1 : "prompts_length"},
},
verbose=True,
verbose=False,
opset_version=16
)
y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts)
@ -179,13 +180,13 @@ class T2SModel(nn.Module):
input_names=["iy", "ik", "iv", "iy_emb", "ix_example"],
output_names=["y", "k", "v", "y_emb", "logits", "samples"],
dynamic_axes={
"iy": [1],
"ik": [1],
"iv": [1],
"iy_emb": [1],
"ix_example": [1],
"iy": {1 : "iy_length"},
"ik": {1 : "ik_length"},
"iv": {1 : "iv_length"},
"iy_emb": {1 : "iy_emb_length"},
"ix_example": {1 : "ix_example_length"},
},
verbose=True,
verbose=False,
opset_version=16
)
@ -224,9 +225,19 @@ class GptSoVits(nn.Module):
self.vits = vits
self.t2s = t2s
def forward(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content):
def forward(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, debug=False):
pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content)
return self.vits(text_seq, pred_semantic, ref_audio)
audio = self.vits(text_seq, pred_semantic, ref_audio)
if debug:
import onnxruntime
sess = onnxruntime.InferenceSession("onnx/koharu/koharu_vits.onnx", providers=["CPU"])
audio1 = sess.run(None, {
"text_seq" : text_seq.detach().cpu().numpy(),
"pred_semantic" : pred_semantic.detach().cpu().numpy(),
"ref_audio" : ref_audio.detach().cpu().numpy()
})
return audio, audio1
return audio
def export(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, project_name):
self.t2s.export(ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name)
@ -238,11 +249,12 @@ class GptSoVits(nn.Module):
input_names=["text_seq", "pred_semantic", "ref_audio"],
output_names=["audio"],
dynamic_axes={
"text_seq": [1],
"pred_semantic": [2],
"ref_audio": [1],
"text_seq": {1 : "text_length"},
"pred_semantic": {2 : "pred_length"},
"ref_audio": {1 : "audio_length"},
},
opset_version=17
opset_version=17,
verbose=False
)
@ -261,7 +273,7 @@ def export(vits_path, gpt_path, project_name):
gpt_sovits = GptSoVits(vits, gpt)
ssl = SSLModel()
ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
ref_bert = torch.randn((ref_seq.shape[1], 1024)).float()
text_bert = torch.randn((text_seq.shape[1], 1024)).float()
ref_audio = torch.randn((1, 48000 * 5)).float()
@ -276,9 +288,17 @@ def export(vits_path, gpt_path, project_name):
ssl_content = ssl(ref_audio_16k).float()
debug = False
if debug:
a, b = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, debug=debug)
soundfile.write("out1.wav", a.cpu().detach().numpy(), vits.hps.data.sampling_rate)
soundfile.write("out2.wav", b[0], vits.hps.data.sampling_rate)
return
a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy()
# soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name)
@ -306,9 +326,9 @@ if __name__ == "__main__":
except:
pass
gpt_path = "pt_model/koharu-e20.ckpt"
vits_path = "pt_model/koharu_e20_s4960.pth"
exp_path = "koharu"
gpt_path = "GPT_weights/nahida-e25.ckpt"
vits_path = "SoVITS_weights/nahida_e30_s3930.pth"
exp_path = "nahida"
export(vits_path, gpt_path, exp_path)
# soundfile.write("out.wav", a, vits.hps.data.sampling_rate)

View File

@ -33,13 +33,13 @@ from time import time as ttime
import shutil
def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path
dir = os.path.dirname(path)
name = os.path.basename(path)
tmp_path = "%s/%s%s.pth" % (dir, ttime(), i_part)
torch.save(fea, tmp_path)
shutil.move(tmp_path, "%s/%s" % (dir, name))
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
# tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
tmp_path="%s%s.pth"%(ttime(),i_part)
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
@ -49,8 +49,8 @@ if os.path.exists(txt_path) == False:
os.makedirs(bert_dir, exist_ok=True)
if torch.cuda.is_available():
device = "cuda:0"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)

View File

@ -35,7 +35,8 @@ import shutil
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
# tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
tmp_path="%s%s.pth"%(ttime(),i_part)
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
@ -49,8 +50,8 @@ maxx=0.95
alpha=0.5
if torch.cuda.is_available():
device = "cuda:0"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
model=cnhubert.get_model()
@ -61,14 +62,13 @@ else:
model = model.to(device)
nan_fails=[]
def name2go(wav_name):
def name2go(wav_name,wav_path):
hubert_path="%s/%s.pt"%(hubert_dir,wav_name)
if(os.path.exists(hubert_path)):return
wav_path="%s/%s"%(inp_wav_dir,wav_name)
tmp_audio = load_audio(wav_path, 32000)
tmp_max = np.abs(tmp_audio).max()
if tmp_max > 2.2:
print("%s-filtered" % (wav_name, tmp_max))
print("%s-filtered,%s" % (wav_name, tmp_max))
return
tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio
tmp_audio32b = (tmp_audio / tmp_max * (maxx * alpha*1145.14)) + ((1 - alpha)*1145.14) * tmp_audio
@ -99,8 +99,14 @@ for line in lines[int(i_part)::int(all_parts)]:
try:
# wav_name,text=line.split("\t")
wav_name, spk_name, language, text = line.split("|")
wav_name=os.path.basename(wav_name)
name2go(wav_name)
if (inp_wav_dir != "" and inp_wav_dir != None):
wav_name = os.path.basename(wav_name)
wav_path = "%s/%s"%(inp_wav_dir, wav_name)
else:
wav_path=wav_name
wav_name = os.path.basename(wav_name)
name2go(wav_name,wav_path)
except:
print(line,traceback.format_exc())

View File

@ -40,8 +40,8 @@ if os.path.exists(semantic_path) == False:
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
hps = utils.get_hparams_from_file(s2config_path)

View File

@ -1,11 +1,18 @@
import traceback
from collections import OrderedDict
from time import time as ttime
import shutil,os
import torch
from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s.pth"%(ttime())
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
def savee(ckpt, name, epoch, steps, hps):
try:
@ -17,7 +24,8 @@ def savee(ckpt, name, epoch, steps, hps):
opt["weight"][key] = ckpt[key].half()
opt["config"] = hps
opt["info"] = "%sepoch_%siteration" % (epoch, steps)
torch.save(opt, "%s/%s.pth" % (hps.save_weight_dir, name))
# torch.save(opt, "%s/%s.pth" % (hps.save_weight_dir, name))
my_save(opt, "%s/%s.pth" % (hps.save_weight_dir, name))
return "Success."
except:
return traceback.format_exc()

View File

@ -24,6 +24,14 @@ torch.set_float32_matmul_precision("high")
from AR.utils import get_newest_ckpt
from collections import OrderedDict
from time import time as ttime
import shutil
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s.pth"%(ttime())
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
class my_model_ckpt(ModelCheckpoint):
@ -44,9 +52,8 @@ class my_model_ckpt(ModelCheckpoint):
self.config = config
def on_train_epoch_end(self, trainer, pl_module):
if not self._should_skip_saving_checkpoint(
trainer
) and self._should_save_on_train_epoch_end(trainer):
# if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer):
if self._should_save_on_train_epoch_end(trainer):
monitor_candidates = self._monitor_candidates(trainer)
if (
self._every_n_epochs >= 1
@ -71,7 +78,8 @@ class my_model_ckpt(ModelCheckpoint):
to_save_od["weight"][key] = dictt[key].half()
to_save_od["config"] = self.config
to_save_od["info"] = "GPT-e%s" % (trainer.current_epoch + 1)
torch.save(
# torch.save(
my_save(
to_save_od,
"%s/%s-e%s.ckpt"
% (
@ -107,18 +115,19 @@ def main(args):
dirpath=ckpt_dir,
)
logger = TensorBoardLogger(name=output_dir.stem, save_dir=output_dir)
os.environ["MASTER_ADDR"]="localhost"
trainer: Trainer = Trainer(
max_epochs=config["train"]["epochs"],
accelerator="gpu",
accelerator="gpu" if torch.cuda.is_available() else "cpu",
# val_check_interval=9999999999999999999999,###不要验证
# check_val_every_n_epoch=None,
limit_val_batches=0,
devices=-1,
devices=-1 if torch.cuda.is_available() else 1,
benchmark=False,
fast_dev_run=False,
strategy = "auto" if torch.backends.mps.is_available() else DDPStrategy(
strategy = DDPStrategy(
process_group_backend="nccl" if platform.system() != "Windows" else "gloo"
), # mps 不支持多节点训练
) if torch.cuda.is_available() else "auto",
precision=config["train"]["precision"],
logger=logger,
num_sanity_val_steps=0,

View File

@ -41,15 +41,15 @@ torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就
# from config import pretrained_s2G,pretrained_s2D
global_step = 0
device = "cpu" # cuda以外的设备等mps优化后加入
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available() or torch.backends.mps.is_available(), "Only GPU training is allowed."
if torch.backends.mps.is_available():
n_gpus = 1
else:
if torch.cuda.is_available():
n_gpus = torch.cuda.device_count()
else:
n_gpus = 1
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
@ -73,7 +73,7 @@ def run(rank, n_gpus, hps):
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
dist.init_process_group(
backend = "gloo" if os.name == "nt" or torch.backends.mps.is_available() else "nccl",
backend = "gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl",
init_method="env://",
world_size=n_gpus,
rank=rank,
@ -137,9 +137,9 @@ def run(rank, n_gpus, hps):
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
).to("mps")
).to(device)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to("mps")
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device)
for name, param in net_g.named_parameters():
if not param.requires_grad:
print(name, "not requires_grad")
@ -187,8 +187,8 @@ def run(rank, n_gpus, hps):
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
else:
net_g = net_g.to("mps")
net_d = net_d.to("mps")
net_g = net_g.to(device)
net_d = net_d.to(device)
try: # 如果能加载自动resume
_, _, _, epoch_str = utils.load_checkpoint(
@ -320,12 +320,12 @@ def train_and_evaluate(
rank, non_blocking=True
)
else:
spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps")
y, y_lengths = y.to("mps"), y_lengths.to("mps")
ssl = ssl.to("mps")
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
y, y_lengths = y.to(device), y_lengths.to(device)
ssl = ssl.to(device)
ssl.requires_grad = False
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
text, text_lengths = text.to("mps"), text_lengths.to("mps")
text, text_lengths = text.to(device), text_lengths.to(device)
with autocast(enabled=hps.train.fp16_run):
(
@ -532,10 +532,10 @@ def evaluate(hps, generator, eval_loader, writer_eval):
ssl = ssl.cuda()
text, text_lengths = text.cuda(), text_lengths.cuda()
else:
spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps")
y, y_lengths = y.to("mps"), y_lengths.to("mps")
ssl = ssl.to("mps")
text, text_lengths = text.to("mps"), text_lengths.to("mps")
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
y, y_lengths = y.to(device), y_lengths.to(device)
ssl = ssl.to(device)
text, text_lengths = text.to(device), text_lengths.to(device)
for test in [0, 1]:
y_hat, mask, *_ = generator.module.infer(
ssl, spec, spec_lengths, text, text_lengths, test=test

View File

@ -5,12 +5,11 @@ import re
import cn2an
from pypinyin import lazy_pinyin, Style
import sys
sys.path.append("/data/docker/liujing04/gpt-vits/gpt-vits-master")
from text.symbols import punctuation
from text.tone_sandhi import ToneSandhi
from text.zh_normalization.text_normlization import TextNormalizer
normalizer = lambda x: cn2an.transform(x, "an2cn")
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
@ -35,6 +34,8 @@ rep_map = {
"$": ".",
"/": ",",
"": "-",
"~": "",
"":"",
}
tone_modifier = ToneSandhi()
@ -151,12 +152,13 @@ def _g2p(segments):
def text_normalize(text):
numbers = re.findall(r"\d+(?:\.?\d+)?", text)
for number in numbers:
text = text.replace(number, cn2an.an2cn(number), 1)
text = replace_punctuation(text)
return text
# https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization
tx = TextNormalizer()
sentences = tx.normalize(text)
dest_text = ""
for sentence in sentences:
dest_text += replace_punctuation(sentence)
return dest_text
if __name__ == "__main__":

View File

@ -2,7 +2,7 @@ from text import chinese, japanese, cleaned_text_to_sequence, symbols, english
language_module_map = {"zh": chinese, "ja": japanese, "en": english}
special = [
("%", "zh", "SP"),
# ("%", "zh", "SP"),
("", "zh", "SP2"),
("^", "zh", "SP3"),
# ('@', 'zh', "SP4")#不搞鬼畜了,和第二版保持一致吧
@ -10,6 +10,9 @@ special = [
def clean_text(text, language):
if(language not in language_module_map):
language="en"
text=" "
for special_s, special_l, target_symbol in special:
if special_s in text and language == special_l:
return clean_special(text, language, special_s, target_symbol)
@ -37,13 +40,13 @@ def clean_special(text, language, special_s, target_symbol):
norm_text = language_module.text_normalize(text)
phones = language_module.g2p(norm_text)
new_ph = []
for ph in phones:
for ph in phones[0]:
assert ph in symbols
if ph == ",":
new_ph.append(target_symbol)
else:
new_ph.append(ph)
return new_ph
return new_ph, phones[1], norm_text
def text_to_sequence(text, language):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -0,0 +1 @@
CHATGPT CH AE1 T JH IY1 P IY1 T IY1

Binary file not shown.

View File

@ -1,16 +1,25 @@
import pickle
import os
import re
import wordsegment
from g2p_en import G2p
from string import punctuation
from text import symbols
import unicodedata
from builtins import str as unicode
from g2p_en.expand import normalize_numbers
from nltk.tokenize import TweetTokenizer
word_tokenize = TweetTokenizer().tokenize
from nltk import pos_tag
current_file_path = os.path.dirname(__file__)
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
_g2p = G2p()
CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
arpa = {
"AH0",
@ -88,7 +97,7 @@ arpa = {
def replace_phs(phs):
rep_map = {";": ",", ":": ",", "'": "-", '"': "-"}
rep_map = {"'": "-"}
phs_new = []
for ph in phs:
if ph in symbols:
@ -110,7 +119,7 @@ def read_dict():
if line_index >= start_line:
line = line.strip()
word_split = line.split(" ")
word = word_split[0]
word = word_split[0].lower()
syllable_split = word_split[1].split(" - ")
g2p_dict[word] = []
@ -124,6 +133,52 @@ def read_dict():
return g2p_dict
def read_dict_new():
g2p_dict = {}
with open(CMU_DICT_PATH) as f:
line = f.readline()
line_index = 1
while line:
if line_index >= 57:
line = line.strip()
word_split = line.split(" ")
word = word_split[0].lower()
g2p_dict[word] = [word_split[1].split(" ")]
line_index = line_index + 1
line = f.readline()
with open(CMU_DICT_FAST_PATH) as f:
line = f.readline()
line_index = 1
while line:
if line_index >= 0:
line = line.strip()
word_split = line.split(" ")
word = word_split[0].lower()
if word not in g2p_dict:
g2p_dict[word] = [word_split[1:]]
line_index = line_index + 1
line = f.readline()
with open(CMU_DICT_HOT_PATH) as f:
line = f.readline()
line_index = 1
while line:
if line_index >= 0:
line = line.strip()
word_split = line.split(" ")
word = word_split[0].lower()
# 自定义发音词直接覆盖字典
g2p_dict[word] = [word_split[1:]]
line_index = line_index + 1
line = f.readline()
return g2p_dict
def cache_dict(g2p_dict, file_path):
with open(file_path, "wb") as pickle_file:
pickle.dump(g2p_dict, pickle_file)
@ -134,46 +189,144 @@ def get_dict():
with open(CACHE_PATH, "rb") as pickle_file:
g2p_dict = pickle.load(pickle_file)
else:
g2p_dict = read_dict()
g2p_dict = read_dict_new()
cache_dict(g2p_dict, CACHE_PATH)
return g2p_dict
eng_dict = get_dict()
def text_normalize(text):
# todo: eng text normalize
return text.replace(";", ",")
# 适配中文及 g2p_en 标点
rep_map = {
"[;:]": ",",
'["]': "'",
"": ".",
"": "!",
"": "?",
}
for p, r in rep_map.items():
text = re.sub(p, r, text)
# 来自 g2p_en 文本格式化处理
# 增加大写兼容
text = unicode(text)
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
text = re.sub(r"(?i)i\.e\.", "that is", text)
text = re.sub(r"(?i)e\.g\.", "for example", text)
return text
class en_G2p(G2p):
def __init__(self):
super().__init__()
# 分词初始化
wordsegment.load()
# 扩展过时字典
self.cmu = get_dict()
# 剔除读音错误的几个缩写
for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
del self.cmu[word.lower()]
def __call__(self, text):
# tokenization
words = word_tokenize(text)
tokens = pos_tag(words) # tuples of (word, tag)
# steps
prons = []
for o_word, pos in tokens:
# 还原 g2p_en 小写操作逻辑
word = o_word.lower()
if re.search("[a-z]", word) is None:
pron = [word]
# 先把单字母推出去
elif len(word) == 1:
# 单读 A 发音修正, 这里需要原格式 o_word 判断大写
if o_word == "A":
pron = ['EY1']
else:
pron = self.cmu[word][0]
# g2p_en 原版多音字处理
elif word in self.homograph2features: # Check homograph
pron1, pron2, pos1 = self.homograph2features[word]
if pos.startswith(pos1):
pron = pron1
else:
pron = pron2
else:
# 递归查找预测
pron = self.qryword(word)
prons.extend(pron)
prons.extend([" "])
return prons[:-1]
def qryword(self, word):
# 查字典, 单字母除外
if len(word) > 1 and word in self.cmu: # lookup CMU dict
return self.cmu[word][0]
# oov 长度小于等于 3 直接读字母
if (len(word) <= 3):
phones = []
for w in word:
# 单读 A 发音修正, 此处不存在大写的情况
if w == "a":
phones.extend(['EY1'])
else:
phones.extend(self.cmu[w][0])
return phones
# 尝试分离所有格
if re.match(r"^([a-z]+)('s)$", word):
phones = self.qryword(word[:-2])
# P T K F TH HH 无声辅音结尾 's 发 ['S']
if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']:
phones.extend(['S'])
# S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z']
elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']:
phones.extend(['AH0', 'Z'])
# B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z']
# AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2
# ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z']
else:
phones.extend(['Z'])
return phones
# 尝试进行分词,应对复合词
comps = wordsegment.segment(word.lower())
# 无法分词的送回去预测
if len(comps)==1:
return self.predict(word)
# 可以分词的递归处理
return [phone for comp in comps for phone in self.qryword(comp)]
_g2p = en_G2p()
def g2p(text):
phones = []
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
if w.upper() in eng_dict:
phns = eng_dict[w.upper()]
for ph in phns:
phones += ph
else:
phone_list = list(filter(lambda p: p != " ", _g2p(w)))
for ph in phone_list:
if ph in arpa:
phones.append(ph)
else:
phones.append(ph)
# g2p_en 整段推理剔除不存在的arpa返回
phone_list = _g2p(text)
phones = [ph if ph != "<unk>" else "UNK" for ph in phone_list if ph not in [" ", "<pad>", "UW", "</s>", "<s>"]]
return replace_phs(phones)
if __name__ == "__main__":
# print(get_dict())
print(g2p("hello"))
print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
# all_phones = set()
# for k, syllables in eng_dict.items():
# for group in syllables:
# for ph in group:
# all_phones.add(ph)
# print(all_phones)
print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture.")))
print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")))

View File

@ -4,8 +4,8 @@ import sys
import pyopenjtalk
from text import symbols
from text import symbols
# Regular expression matching Japanese without punctuation marks:
_japanese_characters = re.compile(
r"[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]"
@ -71,7 +71,7 @@ def symbols_to_japanese(text):
return text
def preprocess_jap(text):
def preprocess_jap(text, with_prosody=False):
"""Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html"""
text = symbols_to_japanese(text)
sentences = re.split(_japanese_marks, text)
@ -79,10 +79,15 @@ def preprocess_jap(text):
text = []
for i, sentence in enumerate(sentences):
if re.match(_japanese_characters, sentence):
if with_prosody:
text += pyopenjtalk_g2p_prosody(sentence)[1:-1]
else:
p = pyopenjtalk.g2p(sentence)
text += p.split(" ")
if i < len(marks):
if marks[i] == " ":# 防止意外的UNK
continue
text += [marks[i].replace(" ", "")]
return text
@ -91,16 +96,96 @@ def text_normalize(text):
# todo: jap text normalize
return text
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
def pyopenjtalk_g2p_prosody(text, drop_unvoiced_vowels=True):
"""Extract phoneme + prosoody symbol sequence from input full-context labels.
def g2p(norm_text):
phones = preprocess_jap(norm_text)
The algorithm is based on `Prosodic features control by symbols as input of
sequence-to-sequence acoustic modeling for neural TTS`_ with some r9y9's tweaks.
Args:
text (str): Input text.
drop_unvoiced_vowels (bool): whether to drop unvoiced vowels.
Returns:
List[str]: List of phoneme + prosody symbols.
Examples:
>>> from espnet2.text.phoneme_tokenizer import pyopenjtalk_g2p_prosody
>>> pyopenjtalk_g2p_prosody("こんにちは。")
['^', 'k', 'o', '[', 'N', 'n', 'i', 'ch', 'i', 'w', 'a', '$']
.. _`Prosodic features control by symbols as input of sequence-to-sequence acoustic
modeling for neural TTS`: https://doi.org/10.1587/transinf.2020EDP7104
"""
labels = pyopenjtalk.make_label(pyopenjtalk.run_frontend(text))
N = len(labels)
phones = []
for n in range(N):
lab_curr = labels[n]
# current phoneme
p3 = re.search(r"\-(.*?)\+", lab_curr).group(1)
# deal unvoiced vowels as normal vowels
if drop_unvoiced_vowels and p3 in "AEIOU":
p3 = p3.lower()
# deal with sil at the beginning and the end of text
if p3 == "sil":
assert n == 0 or n == N - 1
if n == 0:
phones.append("^")
elif n == N - 1:
# check question form or not
e3 = _numeric_feature_by_regex(r"!(\d+)_", lab_curr)
if e3 == 0:
phones.append("$")
elif e3 == 1:
phones.append("?")
continue
elif p3 == "pau":
phones.append("_")
continue
else:
phones.append(p3)
# accent type and position info (forward or backward)
a1 = _numeric_feature_by_regex(r"/A:([0-9\-]+)\+", lab_curr)
a2 = _numeric_feature_by_regex(r"\+(\d+)\+", lab_curr)
a3 = _numeric_feature_by_regex(r"\+(\d+)/", lab_curr)
# number of mora in accent phrase
f1 = _numeric_feature_by_regex(r"/F:(\d+)_", lab_curr)
a2_next = _numeric_feature_by_regex(r"\+(\d+)\+", labels[n + 1])
# accent phrase border
if a3 == 1 and a2_next == 1 and p3 in "aeiouAEIOUNcl":
phones.append("#")
# pitch falling
elif a1 == 0 and a2_next == a2 + 1 and a2 != f1:
phones.append("]")
# pitch rising
elif a2 == 1 and a2_next == 2:
phones.append("[")
return phones
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
def _numeric_feature_by_regex(regex, s):
match = re.search(regex, s)
if match is None:
return -50
return int(match.group(1))
def g2p(norm_text, with_prosody=False):
phones = preprocess_jap(norm_text, with_prosody)
phones = [post_replace_ph(i) for i in phones]
# todo: implement tones and word2ph
return phones
if __name__ == "__main__":
for line in open("../../../Downloads/transcript_utf8.txt").readlines():
text = line.split(":")[1]
phones = g2p(text)
phones = g2p("こんにちは, hello, AKITOです,よろしくお願いしますね!")
print(phones)

View File

@ -315,6 +315,10 @@ ja_symbols = [
"w",
"y",
"z",
# "[", #上升调型
# "]", #下降调型
# "$", #结束符
# "^", #开始符
]
arpa = {

View File

@ -455,6 +455,35 @@ class ToneSandhi:
"电子",
"人人",
"虎虎",
"幺幺",
"干嘛",
"学子",
"哈哈",
"数数",
"袅袅",
"局地",
"以下",
"娃哈哈",
"花花草草",
"留得",
"耕地",
"想想",
"熙熙",
"攘攘",
"卵子",
"死死",
"冉冉",
"恳恳",
"佼佼",
"吵吵",
"打打",
"考考",
"整整",
"莘莘",
"落地",
"算子",
"家家户户",
"青青",
}
self.punc = ":,;。?!“”‘’':,;.?!"
@ -643,6 +672,7 @@ class ToneSandhi:
and i + 1 < len(seg)
and seg[i - 1][0] == seg[i + 1][0]
and seg[i - 1][1] == "v"
and seg[i + 1][1] == "v"
):
new_seg[i - 1][0] = new_seg[i - 1][0] + "" + new_seg[i - 1][0]
else:

View File

@ -0,0 +1,16 @@
## Supported NSW (Non-Standard-Word) Normalization
|NSW type|raw|normalized|
|:--|:-|:-|
|serial number|电影中梁朝伟扮演的陈永仁的编号27149|电影中梁朝伟扮演的陈永仁的编号二七一四九|
|cardinal|这块黄金重达324.75克<br>我们班的最高总分为583分|这块黄金重达三百二十四点七五克<br>我们班的最高总分为五百八十三分|
|numeric range |12\~23<br>-1.5\~2|十二到二十三<br>负一点五到二|
|date|她出生于86年8月18日她弟弟出生于1995年3月1日|她出生于八六年八月十八日, 她弟弟出生于一九九五年三月一日|
|time|等会请在12:05请通知我|等会请在十二点零五分请通知我
|temperature|今天的最低气温达到-10°C|今天的最低气温达到零下十度
|fraction|现场有7/12的观众投出了赞成票|现场有十二分之七的观众投出了赞成票|
|percentage|明天有62的概率降雨|明天有百分之六十二的概率降雨|
|money|随便来几个价格12块534.5元20.1万|随便来几个价格十二块五,三十四点五元,二十点一万|
|telephone|这是固话0421-33441122<br>这是手机+86 18544139121|这是固话零四二一三三四四一一二二<br>这是手机八六一八五四四一三九一二一|
## References
[Pull requests #658 of DeepSpeech](https://github.com/PaddlePaddle/DeepSpeech/pull/658/files)

View File

@ -0,0 +1,14 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from text.zh_normalization.text_normlization import *

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,134 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .num import DIGITS
from .num import num2str
from .num import verbalize_cardinal
from .num import verbalize_digit
def _time_num2str(num_string: str) -> str:
"""A special case for verbalizing number in time."""
result = num2str(num_string.lstrip('0'))
if num_string.startswith('0'):
result = DIGITS['0'] + result
return result
# 时刻表达式
RE_TIME = re.compile(r'([0-1]?[0-9]|2[0-3])'
r':([0-5][0-9])'
r'(:([0-5][0-9]))?')
# 时间范围如8:30-12:30
RE_TIME_RANGE = re.compile(r'([0-1]?[0-9]|2[0-3])'
r':([0-5][0-9])'
r'(:([0-5][0-9]))?'
r'(~|-)'
r'([0-1]?[0-9]|2[0-3])'
r':([0-5][0-9])'
r'(:([0-5][0-9]))?')
def replace_time(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
is_range = len(match.groups()) > 5
hour = match.group(1)
minute = match.group(2)
second = match.group(4)
if is_range:
hour_2 = match.group(6)
minute_2 = match.group(7)
second_2 = match.group(9)
result = f"{num2str(hour)}"
if minute.lstrip('0'):
if int(minute) == 30:
result += ""
else:
result += f"{_time_num2str(minute)}"
if second and second.lstrip('0'):
result += f"{_time_num2str(second)}"
if is_range:
result += ""
result += f"{num2str(hour_2)}"
if minute_2.lstrip('0'):
if int(minute) == 30:
result += ""
else:
result += f"{_time_num2str(minute_2)}"
if second_2 and second_2.lstrip('0'):
result += f"{_time_num2str(second_2)}"
return result
RE_DATE = re.compile(r'(\d{4}|\d{2})年'
r'((0?[1-9]|1[0-2])月)?'
r'(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?')
def replace_date(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
year = match.group(1)
month = match.group(3)
day = match.group(5)
result = ""
if year:
result += f"{verbalize_digit(year)}"
if month:
result += f"{verbalize_cardinal(month)}"
if day:
result += f"{verbalize_cardinal(day)}{match.group(9)}"
return result
# 用 / 或者 - 分隔的 YY/MM/DD 或者 YY-MM-DD 日期
RE_DATE2 = re.compile(
r'(\d{4})([- /.])(0[1-9]|1[012])\2(0[1-9]|[12][0-9]|3[01])')
def replace_date2(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
year = match.group(1)
month = match.group(3)
day = match.group(4)
result = ""
if year:
result += f"{verbalize_digit(year)}"
if month:
result += f"{verbalize_cardinal(month)}"
if day:
result += f"{verbalize_cardinal(day)}"
return result

View File

@ -0,0 +1,62 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import string
from pypinyin.constants import SUPPORT_UCS4
# 全角半角转换
# 英文字符全角 -> 半角映射表 (num: 52)
F2H_ASCII_LETTERS = {
ord(char) + 65248: ord(char)
for char in string.ascii_letters
}
# 英文字符半角 -> 全角映射表
H2F_ASCII_LETTERS = {value: key for key, value in F2H_ASCII_LETTERS.items()}
# 数字字符全角 -> 半角映射表 (num: 10)
F2H_DIGITS = {ord(char) + 65248: ord(char) for char in string.digits}
# 数字字符半角 -> 全角映射表
H2F_DIGITS = {value: key for key, value in F2H_DIGITS.items()}
# 标点符号全角 -> 半角映射表 (num: 32)
F2H_PUNCTUATIONS = {ord(char) + 65248: ord(char) for char in string.punctuation}
# 标点符号半角 -> 全角映射表
H2F_PUNCTUATIONS = {value: key for key, value in F2H_PUNCTUATIONS.items()}
# 空格 (num: 1)
F2H_SPACE = {'\u3000': ' '}
H2F_SPACE = {' ': '\u3000'}
# 非"有拼音的汉字"的字符串可用于NSW提取
if SUPPORT_UCS4:
RE_NSW = re.compile(r'(?:[^'
r'\u3007' #
r'\u3400-\u4dbf' # CJK扩展A:[3400-4DBF]
r'\u4e00-\u9fff' # CJK基本:[4E00-9FFF]
r'\uf900-\ufaff' # CJK兼容:[F900-FAFF]
r'\U00020000-\U0002A6DF' # CJK扩展B:[20000-2A6DF]
r'\U0002A703-\U0002B73F' # CJK扩展C:[2A700-2B73F]
r'\U0002B740-\U0002B81D' # CJK扩展D:[2B740-2B81D]
r'\U0002F80A-\U0002FA1F' # CJK兼容扩展:[2F800-2FA1F]
r'])+')
else:
RE_NSW = re.compile( # pragma: no cover
r'(?:[^'
r'\u3007' #
r'\u3400-\u4dbf' # CJK扩展A:[3400-4DBF]
r'\u4e00-\u9fff' # CJK基本:[4E00-9FFF]
r'\uf900-\ufaff' # CJK兼容:[F900-FAFF]
r'])+')

View File

@ -0,0 +1,253 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rules to verbalize numbers into Chinese characters.
https://zh.wikipedia.org/wiki/中文数字#現代中文
"""
import re
from collections import OrderedDict
from typing import List
DIGITS = {str(i): tran for i, tran in enumerate('零一二三四五六七八九')}
UNITS = OrderedDict({
1: '',
2: '',
3: '',
4: '',
8: '亿',
})
COM_QUANTIFIERS = '(封|艘|把|目|套|段|人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|十|)吨|(亿|千万|百万|万|千|百|)块|角|毛|分)'
# 分数表达式
RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)')
def replace_frac(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
sign = match.group(1)
nominator = match.group(2)
denominator = match.group(3)
sign: str = "" if sign else ""
nominator: str = num2str(nominator)
denominator: str = num2str(denominator)
result = f"{sign}{denominator}分之{nominator}"
return result
# 百分数表达式
RE_PERCENTAGE = re.compile(r'(-?)(\d+(\.\d+)?)%')
def replace_percentage(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
sign = match.group(1)
percent = match.group(2)
sign: str = "" if sign else ""
percent: str = num2str(percent)
result = f"{sign}百分之{percent}"
return result
# 整数表达式
# 带负号的整数 -10
RE_INTEGER = re.compile(r'(-)' r'(\d+)')
def replace_negative_num(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
sign = match.group(1)
number = match.group(2)
sign: str = "" if sign else ""
number: str = num2str(number)
result = f"{sign}{number}"
return result
# 编号-无符号整形
# 00078
RE_DEFAULT_NUM = re.compile(r'\d{3}\d*')
def replace_default_num(match):
"""
Args:
match (re.Match)
Returns:
str
"""
number = match.group(0)
return verbalize_digit(number, alt_one=True)
# 数字表达式
# 纯小数
RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))')
# 正整数 + 量词
RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几\+])?" + COM_QUANTIFIERS)
RE_NUMBER = re.compile(r'(-?)((\d+)(\.\d+)?)' r'|(\.(\d+))')
def replace_positive_quantifier(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
number = match.group(1)
match_2 = match.group(2)
if match_2 == "+":
match_2 = ""
match_2: str = match_2 if match_2 else ""
quantifiers: str = match.group(3)
number: str = num2str(number)
result = f"{number}{match_2}{quantifiers}"
return result
def replace_number(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
sign = match.group(1)
number = match.group(2)
pure_decimal = match.group(5)
if pure_decimal:
result = num2str(pure_decimal)
else:
sign: str = "" if sign else ""
number: str = num2str(number)
result = f"{sign}{number}"
return result
# 范围表达式
# match.group(1) and match.group(8) are copy from RE_NUMBER
RE_RANGE = re.compile(
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))[-~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
def replace_range(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
first, second = match.group(1), match.group(8)
first = RE_NUMBER.sub(replace_number, first)
second = RE_NUMBER.sub(replace_number, second)
result = f"{first}{second}"
return result
# ~至表达式
RE_TO_RANGE = re.compile(
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)[~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)')
def replace_to_range(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
result = match.group(0).replace('~', '')
return result
def _get_value(value_string: str, use_zero: bool=True) -> List[str]:
stripped = value_string.lstrip('0')
if len(stripped) == 0:
return []
elif len(stripped) == 1:
if use_zero and len(stripped) < len(value_string):
return [DIGITS['0'], DIGITS[stripped]]
else:
return [DIGITS[stripped]]
else:
largest_unit = next(
power for power in reversed(UNITS.keys()) if power < len(stripped))
first_part = value_string[:-largest_unit]
second_part = value_string[-largest_unit:]
return _get_value(first_part) + [UNITS[largest_unit]] + _get_value(
second_part)
def verbalize_cardinal(value_string: str) -> str:
if not value_string:
return ''
# 000 -> '零' , 0 -> '零'
value_string = value_string.lstrip('0')
if len(value_string) == 0:
return DIGITS['0']
result_symbols = _get_value(value_string)
# verbalized number starting with '一十*' is abbreviated as `十*`
if len(result_symbols) >= 2 and result_symbols[0] == DIGITS[
'1'] and result_symbols[1] == UNITS[1]:
result_symbols = result_symbols[1:]
return ''.join(result_symbols)
def verbalize_digit(value_string: str, alt_one=False) -> str:
result_symbols = [DIGITS[digit] for digit in value_string]
result = ''.join(result_symbols)
if alt_one:
result = result.replace("", "")
return result
def num2str(value_string: str) -> str:
integer_decimal = value_string.split('.')
if len(integer_decimal) == 1:
integer = integer_decimal[0]
decimal = ''
elif len(integer_decimal) == 2:
integer, decimal = integer_decimal
else:
raise ValueError(
f"The value string: '${value_string}' has more than one point in it."
)
result = verbalize_cardinal(integer)
decimal = decimal.rstrip('0')
if decimal:
# '.22' is verbalized as '零点二二'
# '3.20' is verbalized as '三点二
result = result if result else ""
result += '' + verbalize_digit(decimal)
return result

View File

@ -0,0 +1,63 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .num import verbalize_digit
# 规范化固话/手机号码
# 手机
# http://www.jihaoba.com/news/show/13680
# 移动139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
# 联通130、131、132、156、155、186、185、176
# 电信133、153、189、180、181、177
RE_MOBILE_PHONE = re.compile(
r"(?<!\d)((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})(?!\d)")
RE_TELEPHONE = re.compile(
r"(?<!\d)((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})(?!\d)")
# 全国统一的号码400开头
RE_NATIONAL_UNIFORM_NUMBER = re.compile(r"(400)(-)?\d{3}(-)?\d{4}")
def phone2str(phone_string: str, mobile=True) -> str:
if mobile:
sp_parts = phone_string.strip('+').split()
result = ''.join(
[verbalize_digit(part, alt_one=True) for part in sp_parts])
return result
else:
sil_parts = phone_string.split('-')
result = ''.join(
[verbalize_digit(part, alt_one=True) for part in sil_parts])
return result
def replace_phone(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
return phone2str(match.group(0), mobile=False)
def replace_mobile(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
return phone2str(match.group(0))

View File

@ -0,0 +1,63 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .num import num2str
# 温度表达式,温度会影响负号的读法
# -3°C 零下三度
RE_TEMPERATURE = re.compile(r'(-?)(\d+(\.\d+)?)(°C|℃|度|摄氏度)')
measure_dict = {
"cm2": "平方厘米",
"cm²": "平方厘米",
"cm3": "立方厘米",
"cm³": "立方厘米",
"cm": "厘米",
"db": "分贝",
"ds": "毫秒",
"kg": "千克",
"km": "千米",
"m2": "平方米",
"": "平方米",
"": "立方米",
"m3": "立方米",
"ml": "毫升",
"m": "",
"mm": "毫米",
"s": ""
}
def replace_temperature(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
sign = match.group(1)
temperature = match.group(2)
unit = match.group(3)
sign: str = "零下" if sign else ""
temperature: str = num2str(temperature)
unit: str = "摄氏度" if unit == "摄氏度" else ""
result = f"{sign}{temperature}{unit}"
return result
def replace_measure(sentence) -> str:
for q_notation in measure_dict:
if q_notation in sentence:
sentence = sentence.replace(q_notation, measure_dict[q_notation])
return sentence

View File

@ -0,0 +1,158 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List
from .char_convert import tranditional_to_simplified
from .chronology import RE_DATE
from .chronology import RE_DATE2
from .chronology import RE_TIME
from .chronology import RE_TIME_RANGE
from .chronology import replace_date
from .chronology import replace_date2
from .chronology import replace_time
from .constants import F2H_ASCII_LETTERS
from .constants import F2H_DIGITS
from .constants import F2H_SPACE
from .num import RE_DECIMAL_NUM
from .num import RE_DEFAULT_NUM
from .num import RE_FRAC
from .num import RE_INTEGER
from .num import RE_NUMBER
from .num import RE_PERCENTAGE
from .num import RE_POSITIVE_QUANTIFIERS
from .num import RE_RANGE
from .num import RE_TO_RANGE
from .num import replace_default_num
from .num import replace_frac
from .num import replace_negative_num
from .num import replace_number
from .num import replace_percentage
from .num import replace_positive_quantifier
from .num import replace_range
from .num import replace_to_range
from .phonecode import RE_MOBILE_PHONE
from .phonecode import RE_NATIONAL_UNIFORM_NUMBER
from .phonecode import RE_TELEPHONE
from .phonecode import replace_mobile
from .phonecode import replace_phone
from .quantifier import RE_TEMPERATURE
from .quantifier import replace_measure
from .quantifier import replace_temperature
class TextNormalizer():
def __init__(self):
self.SENTENCE_SPLITOR = re.compile(r'([:、,;。?!,;?!][”’]?)')
def _split(self, text: str, lang="zh") -> List[str]:
"""Split long text into sentences with sentence-splitting punctuations.
Args:
text (str): The input text.
Returns:
List[str]: Sentences.
"""
# Only for pure Chinese here
if lang == "zh":
text = text.replace(" ", "")
# 过滤掉特殊字符
text = re.sub(r'[——《》【】<=>{}()#&@“”^_|\\]', '', text)
text = self.SENTENCE_SPLITOR.sub(r'\1\n', text)
text = text.strip()
sentences = [sentence.strip() for sentence in re.split(r'\n+', text)]
return sentences
def _post_replace(self, sentence: str) -> str:
sentence = sentence.replace('/', '')
# sentence = sentence.replace('~', '至')
# sentence = sentence.replace('', '至')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('', '')
sentence = sentence.replace('α', '阿尔法')
sentence = sentence.replace('β', '贝塔')
sentence = sentence.replace('γ', '伽玛').replace('Γ', '伽玛')
sentence = sentence.replace('δ', '德尔塔').replace('Δ', '德尔塔')
sentence = sentence.replace('ε', '艾普西龙')
sentence = sentence.replace('ζ', '捷塔')
sentence = sentence.replace('η', '依塔')
sentence = sentence.replace('θ', '西塔').replace('Θ', '西塔')
sentence = sentence.replace('ι', '艾欧塔')
sentence = sentence.replace('κ', '喀帕')
sentence = sentence.replace('λ', '拉姆达').replace('Λ', '拉姆达')
sentence = sentence.replace('μ', '')
sentence = sentence.replace('ν', '')
sentence = sentence.replace('ξ', '克西').replace('Ξ', '克西')
sentence = sentence.replace('ο', '欧米克伦')
sentence = sentence.replace('π', '').replace('Π', '')
sentence = sentence.replace('ρ', '')
sentence = sentence.replace('ς', '西格玛').replace('Σ', '西格玛').replace(
'σ', '西格玛')
sentence = sentence.replace('τ', '')
sentence = sentence.replace('υ', '宇普西龙')
sentence = sentence.replace('φ', '服艾').replace('Φ', '服艾')
sentence = sentence.replace('χ', '')
sentence = sentence.replace('ψ', '普赛').replace('Ψ', '普赛')
sentence = sentence.replace('ω', '欧米伽').replace('Ω', '欧米伽')
# re filter special characters, have one more character "-" than line 68
sentence = re.sub(r'[-——《》【】<=>{}()#&@“”^_|\\]', '', sentence)
return sentence
def normalize_sentence(self, sentence: str) -> str:
# basic character conversions
sentence = tranditional_to_simplified(sentence)
sentence = sentence.translate(F2H_ASCII_LETTERS).translate(
F2H_DIGITS).translate(F2H_SPACE)
# number related NSW verbalization
sentence = RE_DATE.sub(replace_date, sentence)
sentence = RE_DATE2.sub(replace_date2, sentence)
# range first
sentence = RE_TIME_RANGE.sub(replace_time, sentence)
sentence = RE_TIME.sub(replace_time, sentence)
# 处理~波浪号作为至的替换
sentence = RE_TO_RANGE.sub(replace_to_range, sentence)
sentence = RE_TEMPERATURE.sub(replace_temperature, sentence)
sentence = replace_measure(sentence)
sentence = RE_FRAC.sub(replace_frac, sentence)
sentence = RE_PERCENTAGE.sub(replace_percentage, sentence)
sentence = RE_MOBILE_PHONE.sub(replace_mobile, sentence)
sentence = RE_TELEPHONE.sub(replace_phone, sentence)
sentence = RE_NATIONAL_UNIFORM_NUMBER.sub(replace_phone, sentence)
sentence = RE_RANGE.sub(replace_range, sentence)
sentence = RE_INTEGER.sub(replace_negative_num, sentence)
sentence = RE_DECIMAL_NUM.sub(replace_number, sentence)
sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier,
sentence)
sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence)
sentence = RE_NUMBER.sub(replace_number, sentence)
sentence = self._post_replace(sentence)
return sentence
def normalize(self, text: str) -> List[str]:
sentences = self._split(text)
sentences = [self.normalize_sentence(sent) for sent in sentences]
return sentences

View File

@ -64,6 +64,14 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False
)
return model, optimizer, learning_rate, iteration
from time import time as ttime
import shutil
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s.pth"%(ttime())
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
logger.info(
@ -75,7 +83,8 @@ def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path)
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
torch.save(
# torch.save(
my_save(
{
"model": state_dict,
"iteration": iteration,

152
GPT_SoVITS_Inference.ipynb Normal file
View File

@ -0,0 +1,152 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# Credits for bubarino giving me the huggingface import code (感谢 bubarino 给了我 huggingface 导入代码)"
],
"metadata": {
"id": "himHYZmra7ix"
}
},
{
"cell_type": "code",
"metadata": {
"id": "e9b7iFV3dm1f"
},
"source": [
"!git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n",
"%cd GPT-SoVITS\n",
"!apt-get update && apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && git lfs install\n",
"!pip install -r requirements.txt"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Download pretrained models 下载预训练模型\n",
"!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n",
"!mkdir -p /content/GPT-SoVITS/tools/uvr5\n",
"%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
"%cd /content/GPT-SoVITS/tools/damo_asr/models\n",
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
"# @title UVR5 pretrains 安装uvr5模型\n",
"%cd /content/GPT-SoVITS/tools/uvr5\n",
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
"!git config core.sparseCheckout true\n",
"!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/"
],
"metadata": {
"id": "0NgxXg5sjv7z",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Create folder models 创建文件夹模型\n",
"import os\n",
"base_directory = \"/content/GPT-SoVITS\"\n",
"folder_names = [\"SoVITS_weights\", \"GPT_weights\"]\n",
"\n",
"for folder_name in folder_names:\n",
" if os.path.exists(os.path.join(base_directory, folder_name)):\n",
" print(f\"The folder '{folder_name}' already exists. (文件夹'{folder_name}'已经存在。)\")\n",
" else:\n",
" os.makedirs(os.path.join(base_directory, folder_name))\n",
" print(f\"The folder '{folder_name}' was created successfully! (文件夹'{folder_name}'已成功创建!)\")\n",
"\n",
"print(\"All folders have been created. (所有文件夹均已创建。)\")"
],
"metadata": {
"cellView": "form",
"id": "cPDEH-9czOJF"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import requests\n",
"import zipfile\n",
"import shutil\n",
"import os\n",
"\n",
"#@title Import model 导入模型 (HuggingFace)\n",
"hf_link = 'https://huggingface.co/modelloosrvcc/Nagisa_Shingetsu_GPT-SoVITS/resolve/main/Nagisa.zip' #@param {type: \"string\"}\n",
"\n",
"output_path = '/content/'\n",
"\n",
"response = requests.get(hf_link)\n",
"with open(output_path + 'file.zip', 'wb') as file:\n",
" file.write(response.content)\n",
"\n",
"with zipfile.ZipFile(output_path + 'file.zip', 'r') as zip_ref:\n",
" zip_ref.extractall(output_path)\n",
"\n",
"os.remove(output_path + \"file.zip\")\n",
"\n",
"source_directory = output_path\n",
"SoVITS_destination_directory = '/content/GPT-SoVITS/SoVITS_weights'\n",
"GPT_destination_directory = '/content/GPT-SoVITS/GPT_weights'\n",
"\n",
"for filename in os.listdir(source_directory):\n",
" if filename.endswith(\".pth\"):\n",
" source_path = os.path.join(source_directory, filename)\n",
" destination_path = os.path.join(SoVITS_destination_directory, filename)\n",
" shutil.move(source_path, destination_path)\n",
"\n",
"for filename in os.listdir(source_directory):\n",
" if filename.endswith(\".ckpt\"):\n",
" source_path = os.path.join(source_directory, filename)\n",
" destination_path = os.path.join(GPT_destination_directory, filename)\n",
" shutil.move(source_path, destination_path)\n",
"\n",
"print(f'Model downloaded. (模型已下载。)')"
],
"metadata": {
"cellView": "form",
"id": "vbZY-LnM0tzq"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title launch WebUI 启动WebUI\n",
"!/usr/local/bin/pip install ipykernel\n",
"!sed -i '10s/False/True/' /content/GPT-SoVITS/config.py\n",
"%cd /content/GPT-SoVITS/\n",
"!/usr/local/bin/python webui.py"
],
"metadata": {
"id": "4oRGUzkrk8C7",
"cellView": "form"
},
"execution_count": null,
"outputs": []
}
]
}

170
README.md
View File

@ -3,30 +3,22 @@
<h1>GPT-SoVITS-WebUI</h1>
A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange
)](https://github.com/RVC-Boss/GPT-SoVITS)
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md)
[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) | [**한국어**](./docs/ko/README.md)
</div>
------
> Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here!
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
For users in China region, you can use AutoDL Cloud Docker to experience the full functionality online: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official
---
## Features:
1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion.
2. **Few-shot TTS:** Fine-tune the model with just 1 minute of training data for improved voice similarity and realism.
@ -35,51 +27,65 @@ For users in China region, you can use AutoDL Cloud Docker to experience the ful
4. **WebUI Tools:** Integrated tools include voice accompaniment separation, automatic training set segmentation, Chinese ASR, and text labeling, assisting beginners in creating training datasets and GPT/SoVITS models.
## Environment Preparation
**Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here!**
If you are a Windows user (tested with win>=10) you can install directly via the prezip. Just download the [prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true), unzip it and double-click go-webui.bat to start GPT-SoVITS-WebUI.
Unseen speakers few-shot fine-tuning demo:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
**User guide: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
## Installation
For users in China region, you can [click here](https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official) to use AutoDL Cloud Docker to experience the full functionality online.
### Tested Environments
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS)
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon)
_Note: numba==0.56.4 require py<3.11_
_Note: numba==0.56.4 requires py<3.11_
### For Mac Users
If you are a Mac user, please install by using the following commands:
#### Create Environment
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### Install Requirements
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select GPU for operation. Additionally, there may be memory leak issues when using Mac for inference, restarting the inference webUI can release the memory._
### Quick Install with Conda
### Windows
If you are a Windows user (tested with win>=10), you can directly download the [pre-packaged distribution](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) and double-click on _go-webui.bat_ to start GPT-SoVITS-WebUI.
Users in China region can download the file by clicking [here](https://www.icloud.com.cn/iclouddrive/061bfkcVJcBfsMfLF5R2XKdTQ#GPT-SoVITS-beta0217) and then selecting "Download a copy."
### Linux
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
bash install.sh
```
### macOS
**Note: The models trained with GPUs on Macs result in significantly lower quality compared to those trained on other devices, so we are temporarily using CPUs instead.**
First make sure you have installed FFmpeg by running `brew install ffmpeg` or `conda install ffmpeg`, then install by using the following commands:
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### Install Manually
#### Pip Packages
#### Install Dependences
```bash
pip install -r requirements.txt
```
#### FFmpeg
#### Install FFmpeg
##### Conda Users
```bash
conda install ffmpeg
```
@ -92,39 +98,25 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS Users
```bash
brew install ffmpeg
```
##### Windows Users
Download and place [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) and [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) in the GPT-SoVITS root.
### Pretrained Models
Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`.
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`.
For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`.
### Using Docker
#### docker-compose.yaml configuration
0. Regarding image tags: Due to rapid updates in the codebase and the slow process of packaging and testing images, please check [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) for the currently packaged latest images and select as per your situation, or alternatively, build locally using a Dockerfile according to your own needs.
1. Environment Variables
- is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation.
- is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation.
2. Volumes ConfigurationThe application's root directory inside the container is set to /workspace. The default docker-compose.yaml lists some practical examples for uploading/downloading content.
3. shm_size The default available memory for Docker Desktop on Windows is too small, which can cause abnormal operations. Adjust according to your own situation.
4. Under the deploy section, GPU-related settings should be adjusted cautiously according to your system and actual circumstances.
#### Running with docker compose
```
docker compose -f "docker-compose.yaml" up -d
```
@ -132,10 +124,24 @@ docker compose -f "docker-compose.yaml" up -d
#### Running with docker command
As above, modify the corresponding parameters based on your actual situation, then run the following command:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
## Pretrained Models
Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`.
For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`.
Users in China region can download these two models by entering the links below and clicking "Download a copy"
- [GPT-SoVITS Models](https://www.icloud.com.cn/iclouddrive/056y_Xog_HXpALuVUjscIwTtg#GPT-SoVITS_Models)
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`.
## Dataset Format
@ -156,12 +162,14 @@ Example:
```
D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
```
## Todo List
- [ ] **High Priority:**
- [ ] Localization in Japanese and English.
- [ ] User guide.
- [ ] Japanese and English dataset fine tune training.
- [x] Localization in Japanese and English.
- [x] User guide.
- [x] Japanese and English dataset fine tune training.
- [ ] **Features:**
- [ ] Zero-shot voice conversion (5s) / few-shot voice conversion (1min).
@ -170,31 +178,71 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] Experiment with changing SoVITS token inputs to probability distribution of vocabs.
- [ ] Improve English and Japanese text frontend.
- [ ] Develop tiny and larger-sized TTS models.
- [ ] Colab scripts.
- [x] Colab scripts.
- [ ] Try expand training dataset (2k hours -> 10k hours).
- [ ] better sovits base model (enhanced audio quality)
- [ ] model mix
## (Optional) If you need, here will provide the command line operation mode
Use the command line to open the WebUI for UVR5
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
```
If you can't open a browser, follow the format below for UVR processing,This is using mdxnet for audio processing
```
python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
```
This is how the audio segmentation of the dataset is done using the command line
```
python audio_slicer.py \
--input_path "<path_to_original_audio_file_or_directory>" \
--output_root "<directory_where_subdivided_audio_clips_will_be_saved>" \
--threshold <volume_threshold> \
--min_length <minimum_duration_of_each_subclip> \
--min_interval <shortest_time_gap_between_adjacent_subclips>
--hop_size <step_size_for_computing_volume_curve>
```
This is how dataset ASR processing is done using the command line(Only Chinese)
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
```
ASR processing is performed through Faster_Whisper(ASR marking except Chinese)
(No progress bars, GPU performance may cause time delays)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
```
A custom list save path is enabled
## Credits
Special thanks to the following projects and contributors:
### Theoretical
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### Pretrained Models
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### Text Frontend for Inference
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI Tools
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## Thanks to all contributors for their efforts
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
</a>

66
api.py
View File

@ -13,7 +13,7 @@
`-dt` - `默认参考音频文本`
`-dl` - `默认参考音频语种, "中文","英文","日文","zh","en","ja"`
`-d` - `推理设备, "cuda","cpu","mps"`
`-d` - `推理设备, "cuda","cpu"`
`-a` - `绑定地址, 默认"127.0.0.1"`
`-p` - `绑定端口, 默认9880, 可在 config.py 中指定`
`-fp` - `覆盖 config.py 使用全精度`
@ -104,8 +104,13 @@ RESP: 无
import argparse
import os
import signal
import sys
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))
import signal
from time import time as ttime
import torch
import librosa
@ -138,8 +143,8 @@ parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="
parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu / mps")
parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu")
parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度")
parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度")
@ -222,6 +227,44 @@ def is_full(*items): # 任意一项为空返回False
return False
return True
def change_sovits_weights(sovits_path):
global vq_model, hps
dict_s2 = torch.load(sovits_path, map_location="cpu")
hps = dict_s2["config"]
hps = DictToAttrRecursive(hps)
hps.model.semantic_frame_rate = "25hz"
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model
)
if ("pretrained" not in sovits_path):
del vq_model.enc_q
if is_half == True:
vq_model = vq_model.half().to(device)
else:
vq_model = vq_model.to(device)
vq_model.eval()
print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
with open("./sweight.txt", "w", encoding="utf-8") as f:
f.write(sovits_path)
def change_gpt_weights(gpt_path):
global hz, max_sec, t2s_model, config
hz = 50
dict_s1 = torch.load(gpt_path, map_location="cpu")
config = dict_s1["config"]
max_sec = config["data"]["max_sec"]
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
t2s_model.load_state_dict(dict_s1["weight"])
if is_half == True:
t2s_model = t2s_model.half()
t2s_model = t2s_model.to(device)
t2s_model.eval()
total = sum([param.nelement() for param in t2s_model.parameters()])
print("Number of parameter: %.2fM" % (total / 1e6))
with open("./gweight.txt", "w", encoding="utf-8") as f: f.write(gpt_path)
def get_bert_feature(text, word2ph):
with torch.no_grad():
@ -439,12 +482,25 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
wav.seek(0)
torch.cuda.empty_cache()
torch.mps.empty_cache()
return StreamingResponse(wav, media_type="audio/wav")
app = FastAPI()
#clark新增-----2024-02-21
#可在启动后动态修改模型以此满足同一个api不同的朗读者请求
@app.post("/set_model")
async def set_model(request: Request):
json_post_raw = await request.json()
global gpt_path
gpt_path=json_post_raw.get("gpt_model_path")
global sovits_path
sovits_path=json_post_raw.get("sovits_model_path")
print("gptpath"+gpt_path+";vitspath"+sovits_path)
change_sovits_weights(sovits_path)
change_gpt_weights(gpt_path)
return "ok"
# 新增-----end------
@app.post("/control")
async def control(request: Request):

96
colab_webui.ipynb Normal file
View File

@ -0,0 +1,96 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"source": [
"环境配置 environment"
],
"metadata": {
"id": "_o6a8GS2lWQM"
}
},
{
"cell_type": "code",
"metadata": {
"id": "e9b7iFV3dm1f"
},
"source": [
"!pip install -q condacolab\n",
"# Setting up condacolab and installing packages\n",
"import condacolab\n",
"condacolab.install_from_url(\"https://repo.anaconda.com/miniconda/Miniconda3-py39_23.11.0-2-Linux-x86_64.sh\")\n",
"%cd -q /content\n",
"!git clone https://github.com/RVC-Boss/GPT-SoVITS\n",
"!conda install -y -q -c pytorch -c nvidia cudatoolkit\n",
"%cd -q /content/GPT-SoVITS\n",
"!conda install -y -q -c conda-forge gcc gxx ffmpeg cmake -c pytorch -c nvidia\n",
"!/usr/local/bin/pip install -r requirements.txt"
],
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Download pretrained models 下载预训练模型\n",
"!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n",
"!mkdir -p /content/GPT-SoVITS/tools/uvr5\n",
"%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
"%cd /content/GPT-SoVITS/tools/damo_asr/models\n",
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
"# @title UVR5 pretrains 安装uvr5模型\n",
"%cd /content/GPT-SoVITS/tools/uvr5\n",
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
"!git config core.sparseCheckout true\n",
"!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/"
],
"metadata": {
"id": "0NgxXg5sjv7z"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title launch WebUI 启动WebUI\n",
"!/usr/local/bin/pip install ipykernel\n",
"!sed -i '10s/False/True/' /content/GPT-SoVITS/config.py\n",
"%cd /content/GPT-SoVITS/\n",
"!/usr/local/bin/python webui.py"
],
"metadata": {
"id": "4oRGUzkrk8C7"
},
"execution_count": null,
"outputs": []
}
]
}

View File

@ -5,8 +5,10 @@ import torch
# 推理用的指定模型
sovits_path = ""
gpt_path = ""
is_half = eval(os.environ.get("is_half","True"))
is_share=False
is_half_str = os.environ.get("is_half", "True")
is_half = True if is_half_str.lower() == 'true' else False
is_share_str = os.environ.get("is_share","False")
is_share= True if is_share_str.lower() == 'true' else False
cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
bert_path = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
@ -17,8 +19,6 @@ exp_root = "logs"
python_exec = sys.executable or "python"
if torch.cuda.is_available():
infer_device = "cuda"
elif torch.backends.mps.is_available():
infer_device = "mps"
else:
infer_device = "cpu"
@ -29,15 +29,16 @@ webui_port_subfix = 9871
api_port = 9880
gpu_name = torch.cuda.get_device_name(0)
if (
if infer_device == "cuda":
gpu_name = torch.cuda.get_device_name(0)
if (
("16" in gpu_name and "V100" not in gpu_name.upper())
or "P40" in gpu_name.upper()
or "P10" in gpu_name.upper()
or "1060" in gpu_name
or "1070" in gpu_name
or "1080" in gpu_name
):
):
is_half=False
if(infer_device=="cpu"):is_half=False

View File

@ -2,10 +2,11 @@ version: '3.8'
services:
gpt-sovits:
image: breakstring/gpt-sovits:dev-20240123.03
image: breakstring/gpt-sovits:latest # please change the image name and tag base your environment. If the tag contains the word 'elite', such as "latest-elite", it indicates that the image does not include the necessary models such as GPT-SoVITS, UVR5, Damo ASR, etc. You will need to download them yourself and map them into the container.
container_name: gpt-sovits-container
environment:
- is_half=False
- is_share=False
volumes:
- ./output:/workspace/output
- ./logs:/workspace/logs
@ -13,7 +14,7 @@ services:
- ./reference:/workspace/reference
working_dir: /workspace
ports:
- "9870:9870"
- "9880:9880"
- "9871:9871"
- "9872:9872"
- "9873:9873"

21
dockerbuild.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
# 获取当前日期,格式为 YYYYMMDD
DATE=$(date +%Y%m%d)
# 获取最新的 Git commit 哈希值的前 7 位
COMMIT_HASH=$(git rev-parse HEAD | cut -c 1-7)
# 构建 full 版本的镜像
docker build --build-arg IMAGE_TYPE=full -t breakstring/gpt-sovits:latest .
# 为同一个镜像添加带日期的标签
docker tag breakstring/gpt-sovits:latest breakstring/gpt-sovits:dev-$DATE
# 为同一个镜像添加带当前代码库Commit哈希值的标签
docker tag breakstring/gpt-sovits:latest breakstring/gpt-sovits:dev-$COMMIT_HASH
# 构建 elite 版本的镜像(无模型下载步骤,需手工将模型下载安装进容器)
docker build --build-arg IMAGE_TYPE=elite -t breakstring/gpt-sovits:latest-elite .
# 为同一个镜像添加带日期的标签
docker tag breakstring/gpt-sovits:latest-elite breakstring/gpt-sovits:dev-$DATE-elite
# 为同一个镜像添加带当前代码库Commit哈希值的标签
docker tag breakstring/gpt-sovits:latest-elite breakstring/gpt-sovits:dev-$COMMIT_HASH-elite

View File

@ -46,5 +46,111 @@
7-自动识别不支持半精度的卡强制单精度。cpu推理下强制单精度。
### 20240128更新
1-修复数字转汉字念法问题
2-修复句首少量字容易吞字的问题
3-通过限制排除不合理的参考音频长度
4-修复GPT训练不保存ckpt的问题
5-完善Dockerfile的下载模型流程
### 20240129更新
1-16系等半精度训练有问题的显卡把训练配置改为单精度训练
2-测试更新可用的colab版本
3-修复git clone modelscope funasr仓库+老版本funasr导致接口不对齐报错的问题
### 20240130更新
1-所有涉及路径的地方双引号自动去除,小白复制路径带双引号不会报错
2-修复中英文标点切割问题和句首句尾补标点的问题
3-增加按标点符号切分
### 20240201更新
1-修复uvr5读取格式错误导致分离失败的问题
2-支持中日英混合多种文本自动切分识别语种
### 20240202更新
1-修复asr路径尾缀带/保存文件名报错
2-引入paddlespeech的Normalizer https://github.com/RVC-Boss/GPT-SoVITS/pull/377 修复一些问题例如xx.xx%(带百分号类),元/吨 会读成 元吨 而不是元每吨,下划线不再会报错
### 20240207更新
1-修正语种传参混乱导致中文推理效果下降 https://github.com/RVC-Boss/GPT-SoVITS/issues/391
2-uvr5适配高版本librosa https://github.com/RVC-Boss/GPT-SoVITS/pull/403
3-修复uvr5 inf everywhere报错的问题(is_half传参未转换bool导致恒定半精度推理16系显卡会inf) https://github.com/RVC-Boss/GPT-SoVITS/commit/14a285109a521679f8846589c22da8f656a46ad8
4-优化英文文本前端
5-修复gradio依赖
6-支持三连根目录留空自动读取.list全路径
7-集成faster whisper ASR日文英文
### 20240208更新
1-GPT训练卡死win10 1909和https://github.com/RVC-Boss/GPT-SoVITS/issues/232 系统语言繁体GPT训练报错[尝试修复](https://github.com/RVC-Boss/GPT-SoVITS/commit/59f35adad85815df27e9c6b33d420f5ebfd8376b)。
### 20240212更新
1-faster whisper和funasr逻辑优化。faster whisper转镜像站下载规避huggingface连不上的问题。
2-DPO Loss实验性训练选项开启通过构造负样本训练缓解GPT重复漏字问题。推理界面公开几个推理参数。 https://github.com/RVC-Boss/GPT-SoVITS/pull/457
### 20240214更新
1-训练支持中文实验名(原来会报错)
2-DPO训练改为可勾选选项而非必须。如勾选batch size自动减半。修复推理界面新参数不传参的问题。
### 20240216更新
1-支持无参考文本输入
2-修复中文文本前端bug https://github.com/RVC-Boss/GPT-SoVITS/issues/475
### 20240221更新
1-数据处理添加语音降噪选项降噪为只剩16k采样率除非底噪很大先不急着用哦。
2-中文日文前端处理优化 https://github.com/RVC-Boss/GPT-SoVITS/pull/559 https://github.com/RVC-Boss/GPT-SoVITS/pull/556 https://github.com/RVC-Boss/GPT-SoVITS/pull/532 https://github.com/RVC-Boss/GPT-SoVITS/pull/507 https://github.com/RVC-Boss/GPT-SoVITS/pull/509
3-mac CPU推理更快因此把推理设备从mps改到CPU
4-colab修复不开启公网url
### 20240306更新
1-推理加速50%RTX3090+pytorch2.2.1+cu11.8+win10+py39 testedhttps://github.com/RVC-Boss/GPT-SoVITS/pull/672
2-如果用faster whisper非中文ASR不再需要先下中文funasr模型
3-修复uvr5去混响模型 是否混响 反的 https://github.com/RVC-Boss/GPT-SoVITS/pull/610
4-faster whisper如果无cuda可用自动cpu推理 https://github.com/RVC-Boss/GPT-SoVITS/pull/675
5-修改is_half的判断使在Mac上能正常CPU推理 https://github.com/RVC-Boss/GPT-SoVITS/pull/573
todolist
1-中文多音字推理优化(有没有人来测试的欢迎把测试结果写在pr评论区里) https://github.com/RVC-Boss/GPT-SoVITS/pull/488

View File

@ -3,81 +3,89 @@
<h1>GPT-SoVITS-WebUI</h1>
强大的少样本语音转换与语音合成Web用户界面。<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange
)](https://github.com/RVC-Boss/GPT-SoVITS)
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[**English**](./README.md) | [**中文简体**](./README_ZH.md)
[**English**](../../README.md) | [**中文简体**](./README.md) | [**日本語**](../ja/README.md) | [**한국어**](../ko/README.md)
</div>
------
> 查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
中国地区用户可使用AutoDL云端镜像进行体验https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official
---
## 功能:
1. **零样本文本到语音TTS** 输入5秒的声音样本即刻体验文本到语音转换。
2. **少样本TTS** 仅需1分钟的训练数据即可微调模型提升声音相似度和真实感。
1. **零样本文本到语音TTS** 输入 5 秒的声音样本,即刻体验文本到语音转换。
2. **少样本 TTS** 仅需 1 分钟的训练数据即可微调模型,提升声音相似度和真实感。
3. **跨语言支持:** 支持与训练数据集不同语言的推理,目前支持英语、日语和中文。
4. **WebUI工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注协助初学者创建训练数据集和GPT/SoVITS模型。
4. **WebUI 工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注,协助初学者创建训练数据集和 GPT/SoVITS 模型。
## 环境准备
**查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)**
如果你是Windows用户已在win>=10上测试可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true)解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。
未见过的说话者 few-shot 微调演示:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
### 测试通过的Python和PyTorch版本
**用户手册: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
- Python 3.9、PyTorch 2.0.1和CUDA 11
- Python 3.10.13, PyTorch 2.1.2和CUDA 12.3
- Python 3.9、Pytorch 2.3.0.dev20240122和macOS 14.3Apple 芯片MPS
## 安装
中国地区用户可[点击此处](https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official)使用 AutoDL 云端镜像进行体验。
### 测试通过的环境
- Python 3.9、PyTorch 2.0.1 和 CUDA 11
- Python 3.10.13, PyTorch 2.1.2 和 CUDA 12.3
- Python 3.9、Pytorch 2.3.0.dev20240122 和 macOS 14.3Apple 芯片)
_注意: numba==0.56.4 需要 python<3.11_
### Mac 用户
如果你是Mac用户请使用以下命令安装
#### 创建环境
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### 安装依赖
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
_注意如需使用UVR5进行预处理建议[下载原项目GUI](https://github.com/Anjok07/ultimatevocalremovergui)勾选GPU运行。另外使用Mac推理时可能存在内存泄漏问题重启推理UI即可释放内存。_
### 使用Conda快速安装
### Windows
如果你是 Windows 用户(已在 win>=10 上测试),可以直接下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击 go-webui.bat 即可启动 GPT-SoVITS-WebUI。
中国地区用户可以进入[此处](https://www.icloud.com.cn/iclouddrive/061bfkcVJcBfsMfLF5R2XKdTQ#GPT-SoVITS-beta0217)并点击“下载副本”进行下载。
### Linux
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
bash install.sh
```
### 手动安装包
#### Pip包
### macOS
**注:在 Mac 上使用 GPU 训练的模型效果显著低于其他设备训练的模型所以我们暂时使用CPU进行训练。**
首先确保你已通过运行 `brew install ffmpeg``conda install ffmpeg` 安装 FFmpeg然后运行以下命令安装
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 手动安装
#### 安装依赖
```bash
pip install -r requirements.txt
```
#### FFmpeg
#### 安装 FFmpeg
##### Conda 使用者
```bash
conda install ffmpeg
```
@ -90,12 +98,6 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS 使用者
```bash
brew install ffmpeg
```
##### Windows 使用者
下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。
@ -104,16 +106,17 @@ brew install ffmpeg
#### docker-compose.yaml 设置
0. image 的标签:由于代码库更新很快,镜像的打包和测试又很慢,所以请自行在 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) 查看当前打包好的最新的镜像并根据自己的情况选用,或者在本地根据您自己的需求通过 Dockerfile 进行构建。
1. 环境变量:
- is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时一般都是它引起的可以根据实际情况来调整为True或者False。
2. Volume设置容器内的应用根目录设置为 /workspace。 默认的 docker-compose.yaml 中列出了一些实际的例子,便于上传/下载内容。
3. shm_sizeWindows下的Docker Desktop默认可用内存过小会导致运行异常根据自己情况酌情设置。
4. deploy小节下的gpu相关内容请根据您的系统和实际情况酌情设置。
- is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时,一般都是它引起的,可以根据实际情况来调整为 True 或者 False。
2. Volume 设置,容器内的应用根目录设置为 /workspace。 默认的 docker-compose.yaml 中列出了一些实际的例子,便于上传/下载内容。
3. shm_sizeWindows 下的 Docker Desktop 默认可用内存过小,会导致运行异常,根据自己情况酌情设置。
4. deploy 小节下的 gpu 相关内容,请根据您的系统和实际情况酌情设置。
#### 通过 docker compose 运行
#### 通过 docker compose运行
```
docker compose -f "docker-compose.yaml" up -d
```
@ -121,20 +124,24 @@ docker compose -f "docker-compose.yaml" up -d
#### 通过 docker 命令运行
同上,根据您自己的实际情况修改对应的参数,然后运行如下命令:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
### 预训练模型
## 预训练模型
从 [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) 下载预训练模型,并将它们放置在 `GPT_SoVITS\pretrained_models` 中。
对于中文自动语音识别(另外),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。
对于 UVR5人声/伴奏分离和混响移除,附加),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。
对于UVR5人声/伴奏分离和混响移除,另外),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。
中国地区用户可以进入以下链接并点击“下载副本”下载以上两个模型:
- [GPT-SoVITS Models](https://www.icloud.com.cn/iclouddrive/056y_Xog_HXpALuVUjscIwTtg#GPT-SoVITS_Models)
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。
## 数据集格式
@ -155,25 +162,57 @@ vocal_path|speaker_name|language|text
```
D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
```
## 待办事项清单
- [ ] **高优先级:**
- [ ] 日语和英语的本地化。
- [x] 日语和英语的本地化。
- [ ] 用户指南。
- [ ] 日语和英语数据集微调训练。
- [x] 日语和英语数据集微调训练。
- [ ] **Features:**
- [ ] 零样本声音转换5秒/ 少样本声音转换1分钟
- [ ] TTS语速控制。
- [ ] 增强的TTS情感控制。
- [ ] 尝试将SoVITS令牌输入更改为词汇的概率分布。
- [ ] 零样本声音转换5 秒)/ 少样本声音转换1 分钟)。
- [ ] TTS 语速控制。
- [ ] 增强的 TTS 情感控制。
- [ ] 尝试将 SoVITS 令牌输入更改为词汇的概率分布。
- [ ] 改进英语和日语文本前端。
- [ ] 开发体积小和更大的TTS模型。
- [ ] Colab脚本。
- [ ] 扩展训练数据集从2k小时到10k小时
- [ ] 更好的sovits基础模型增强的音频质量
- [ ] 开发体积小和更大的 TTS 模型。
- [x] Colab 脚本。
- [ ] 扩展训练数据集(从 2k 小时到 10k 小时)。
- [ ] 更好的 sovits 基础模型(增强的音频质量)。
- [ ] 模型混合。
## (可选)命令行的操作方式
使用命令行打开UVR5的WebUI
````
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
````
如果打不开浏览器请按照下面的格式进行UVR处理这是使用mdxnet进行音频处理的方式
````
python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
````
这是使用命令行完成数据集的音频切分的方式
````
python audio_slicer.py \
--input_path "<path_to_original_audio_file_or_directory>" \
--output_root "<directory_where_subdivided_audio_clips_will_be_saved>" \
--threshold <volume_threshold> \
--min_length <minimum_duration_of_each_subclip> \
--min_interval <shortest_time_gap_between_adjacent_subclips>
--hop_size <step_size_for_computing_volume_curve>
````
这是使用命令行完成数据集ASR处理的方式仅限中文
````
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
````
通过Faster_Whisper进行ASR处理除中文之外的ASR标记
没有进度条GPU性能可能会导致时间延迟
````
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
````
启用自定义列表保存路径
## 致谢
特别感谢以下项目和贡献者:
@ -194,6 +233,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [gradio](https://github.com/gradio-app/gradio)
## 感谢所有贡献者的努力
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
</a>

85
docs/ja/Changelog_JA.md Normal file
View File

@ -0,0 +1,85 @@
### 20240121 更新
1. `config``is_share`を追加し、Colab などの環境でこれを`True`に設定すると、webui を公共ネットワークにマッピングできます。
2. WebUI に英語システムの英語翻訳を追加しました。
3. `cmd-asr`は damo モデルが既に含まれているかどうかを自動的に確認し、デフォルトのパスにない場合は modelscope から自動的にダウンロードします。
4. [SoVITS 训练报错 ZeroDivisionError](https://github.com/RVC-Boss/GPT-SoVITS/issues/79) 修復を試みます(長さ 0 のサンプルをフィルタリングなど)
5. TEMP ファイルフォルダからオーディオやその他のファイルをクリーンアップして最適化します。
6. 合成オーディオがリファレンスオーディオの終わりを含む問題を大幅に改善しました。
### 20240122 更新
1. 短すぎる出力ファイルが重複したリファレンスオーディオを返す問題を修正しました。
2. 英語-日本語学習がスムーズに進む QA を完了しました。(ただし、日本語学習はルートディレクトリに英語以外の文字が含まれていない必要があります)
3. オーディオパスをチェックします。間違ったパスを読み取ろうとすると、「パスが存在しません」というエラーメッセージが返されます。これは ffmpeg モジュールのエラーではありません。
### 20240123 更新
1. hubert から nan 抽出による SoVITS/GPT 学習中の ZeroDivisionError 関連エラーを修正しました。
2. 推論インターフェースでモデルを素早く切り替えることができるようにサポートしました。
3. モデルファイルのソートロジックを最適化しました。
4. 中国語の分析に`jieba_fast``jieba`に置き換えました。
### 20240126 更新
1. 中国語と英語、日本語と英語が混在した出力テキストをサポートします。
2. 出力で選択的な分割モードをサポートします。
3. uvr5 がディレクトリを読み取り、自動的に終了する問題を修正しました。
4. 複数の改行による推論エラーを修正しました。
5. 推論インターフェースから不要なログを削除しました。
6. MacOS での学習と推論をサポートします。
7. 半精度をサポートしていないカードを自動的に識別して単精度を強制し、CPU 推論では単精度を強制します。
### 20240128 更新
1. 数字を漢字で読む問題を修正しました。
2. 文章の先頭の一部の単語が欠落する問題を修正しました。
3. 不適切な長さのリファレンスオーディオを制限しました。
4. GPT 学習時の ckpt が保存されない問題を修正しました。
5. Dockerfile のモデルダウンロードプロセスを改善しました。
### 20240129 更新
1. 16 系などの半精度学習に問題があるカードは、学習構成を単精度学習に変更しました。
2. Colab でも使用可能なバージョンをテストして更新しました。
3. `git clone modelscope funasr`リポジトリと古いバージョンの funasr を使用してインターフェースが一致しないエラーを修正しました。
### 20240130 更新
1. パスと関連する文字列を解析して、二重引用符を自動的に削除します。また、パスをコピーする場合、二重引用符が含まれていてもエラーが発生しません。
2. 中国語と英語、日本語と英語の混合出力をサポートします。
3. 出力で選択的な分割モードをサポートします。
todolist
1. 同音異義語(中国語)の推論の最適化
2. 英語大文字認識と英語ハイフン [問題](https://github.com/RVC-Boss/GPT-SoVITS/issues/271)
3. テキストに%記号が含まれているとエラーが発生し、推論が不可能です。また、「元/吨」が「元吨」ではなく「元每吨」と読まれるなどの問題があります。このような問題を解決するには、どのライブラリを使用する必要があり、それに対する改善を検討しています。
4. 中-日-英、中-英、日-英を含む 5 つの言語をサポートすることを目標にしています。

View File

@ -3,78 +3,83 @@
<h1>GPT-SoVITS-WebUI</h1>
パワフルな数発音声変換・音声合成 WebUI。<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange
)](https://github.com/RVC-Boss/GPT-SoVITS)
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](./README.md)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](./README.md) | [**한국어**](../ko/README.md)
</div>
------
> [デモ動画](https://www.bilibili.com/video/BV12g4y1m7Uw)をチェック!
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
---
## 機能:
1. **ゼロショット TTS:** 5秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。
2. **数ショット TTS:** わずか1分間のトレーニングデータでモデルを微調整し、音声の類似性とリアリズムを向上。
1. **ゼロショット TTS:** 5 秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。
2. **数ショット TTS:** わずか 1 分間のトレーニングデータでモデルを微調整し、音声の類似性とリアリズムを向上。
3. **多言語サポート:** 現在、英語、日本語、中国語をサポートしています。
4. **WebUI ツール:** 統合されたツールには、音声伴奏の分離、トレーニングセットの自動セグメンテーション、中国語 ASR、テキストラベリングが含まれ、初心者がトレーニングデータセットと GPT/SoVITS モデルを作成するのを支援します。
## 環境の準備
**[デモ動画](https://www.bilibili.com/video/BV12g4y1m7Uw)をチェック!**
Windows ユーザーであればwin>=10 にてテスト済み、prezip 経由で直接インストールできます。[prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) をダウンロードして解凍し、go-webui.bat をダブルクリックするだけで GPT-SoVITS-WebUI が起動します。
未見の話者数ショット微調整デモ:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
## インストール
### テスト済みの環境
### Python と PyTorch のバージョン
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS)
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon)
_注記: numba==0.56.4 は py<3.11 が必要です_
### Macユーザーへ
Macユーザーの方は、以下のコマンドを使用してインストールしてください。
#### 環境作成
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### Pip パッケージ
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
_注記: UVR5を使用した前処理には、[元のプロジェクトGUIをダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、操作にGPUを選択することを推奨します。さらに、Macを使用して推論する際にメモリリークの問題が発生する可能性がありますが、推論のwebUIを再起動することでメモリを解放できます。_
### Conda によるクイックインストール
### Windows
Windows ユーザーの場合win>=10 でテスト済み)、[事前にパッケージ化されたディストリビューション](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true)を直接ダウンロードし、_go-webui.bat_ をダブルクリックして GPT-SoVITS-WebUI を起動することができます。
### Linux
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
bash install.sh
```
### macOS
**注MacでGPUを使用して訓練されたモデルは、他のデバイスで訓練されたモデルと比較して著しく品質が低下するため、当面はCPUを使用して訓練します。**
まず、`brew install ffmpeg`または`conda install ffmpeg`を実行してFFmpegをインストールしたことを確認してください。次に、以下のコマンドを使用してインストールします
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 手動インストール
#### Pip パッケージ
#### 依存関係をインストールします
```bash
pip install -r requirementx.txt
```
#### FFmpeg
#### FFmpegをインストールします。
##### Conda ユーザー
```bash
conda install ffmpeg
```
@ -87,42 +92,38 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS ユーザー
```bash
brew install ffmpeg
```
##### Windows ユーザー
[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) と [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) をダウンロードし、GPT-SoVITS のルートディレクトリに置きます。
### Dockerの使用
### Docker の使用
#### docker-compose.yamlの設定
#### docker-compose.yaml の設定
0. イメージのタグについて:コードベースの更新が速く、イメージのパッケージングとテストが遅いため、[Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) で現在パッケージされている最新のイメージをご覧になり、ご自身の状況に応じて選択するか、またはご自身のニーズに応じて Dockerfile を使用してローカルで構築してください。
1. 環境変数:
- `is_half`:半精度/倍精度の制御。"SSL抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じてTrueまたはFalseに調整してください。
- `is_half`:半精度/倍精度の制御。"SSL 抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じて True または False に調整してください。
2. ボリューム設定:コンテナ内のアプリケーションのルートディレクトリは`/workspace`に設定されます。デフォルトの`docker-compose.yaml`には、アップロード/ダウンロードの内容の実例がいくつか記載されています。
3. `shm_size`WindowsのDocker Desktopのデフォルトの利用可能メモリが小さすぎるため、異常な動作を引き起こす可能性があります。状況に応じて適宜設定してください。
4. `deploy`セクションのGPUに関連する内容は、システムと実際の状況に応じて慎重に設定してください。
3. `shm_size`Windows の Docker Desktop のデフォルトの利用可能メモリが小さすぎるため、異常な動作を引き起こす可能性があります。状況に応じて適宜設定してください。
4. `deploy`セクションの GPU に関連する内容は、システムと実際の状況に応じて慎重に設定してください。
#### docker compose で実行する
#### docker composeで実行する
```markdown
docker compose -f "docker-compose.yaml" up -d
```
#### dockerコマンドで実行する
#### docker コマンドで実行する
上記と同様に、実際の状況に基づいて対応するパラメータを変更し、次のコマンドを実行します:
```markdown
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
### 事前訓練済みモデル
## 事前訓練済みモデル
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。
@ -130,7 +131,6 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker
UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。
## データセット形式
TTS アノテーション .list ファイル形式:
@ -150,25 +150,57 @@ vocal_path|speaker_name|language|text
```
D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
```
## Todo リスト
- [ ] **優先度 高:**
- [ ] 日本語と英語でのローカライズ。
- [x] 日本語と英語でのローカライズ。
- [ ] ユーザーガイド。
- [ ] 日本語データセットと英語データセットのファインチューニングトレーニング。
- [x] 日本語データセットと英語データセットのファインチューニングトレーニング。
- [ ] **機能:**
- [ ] ゼロショット音声変換5秒数ショット音声変換1分
- [ ] ゼロショット音声変換5 数ショット音声変換1 分)。
- [ ] TTS スピーキングスピードコントロール。
- [ ] TTS の感情コントロールの強化。
- [ ] SoVITS トークン入力を語彙の確率分布に変更する実験。
- [ ] 英語と日本語のテキストフロントエンドを改善。
- [ ] 小型と大型の TTS モデルを開発する。
- [ ] Colab のスクリプト。
- [x] Colab のスクリプト。
- [ ] トレーニングデータセットを拡張する2k→10k
- [ ] より良い sovits ベースモデル(音質向上)
- [ ] モデルミックス
## (オプション) 必要に応じて、コマンドライン操作モードが提供されます。
コマンド ラインを使用して UVR5 の WebUI を開きます
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
```
ブラウザを開けない場合は、以下の形式に従って UVR 処理を行ってください。これはオーディオ処理に mdxnet を使用しています。
```
python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
```
コマンド ラインを使用してデータセットのオーディオ セグメンテーションを行う方法は次のとおりです。
```
python audio_slicer.py \
--input_path "<path_to_original_audio_file_or_directory>" \
--output_root "<directory_where_subdivided_audio_clips_will_be_saved>" \
--threshold <volume_threshold> \
--min_length <minimum_duration_of_each_subclip> \
--min_interval <shortest_time_gap_between_adjacent_subclips>
--hop_size <step_size_for_computing_volume_curve>
```
コマンドラインを使用してデータセット ASR 処理を行う方法です (中国語のみ)
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
```
ASR処理はFaster_Whisperを通じて実行されます(中国語を除くASRマーキング)
(進行状況バーは表示されません。GPU のパフォーマンスにより時間遅延が発生する可能性があります)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
```
カスタムリストの保存パスが有効になっています
## クレジット
以下のプロジェクトとコントリビューターに感謝します:
@ -189,6 +221,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [gradio](https://github.com/gradio-app/gradio)
## すべてのコントリビューターに感謝します
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
</a>

109
docs/ko/Changelog_KO.md Normal file
View File

@ -0,0 +1,109 @@
### 20240121 업데이트
1. `config``is_share` 추가, Colab 등의 환경에서 이를 `True`로 설정하여 webui를 공용 네트워크에 매핑되도록 할 수 있습니다.
2. WebUI에 영어 번역이 추가되었습니다.
3. `cmd-asr`은 damo 모델이 이미 포함되어 있는지 자동으로 확인하고, 기본 경로에 없는 경우 modelscope에서 자동 다운로드 되도록 수정하였습니다.
4. [SoVITS 학습 중 ZeroDivisionError가 발생](https://github.com/RVC-Boss/GPT-SoVITS/issues/79)하는 경우 복구를 시도합니다. (길이가 0인 샘플 필터링 등)
5. TEMP 파일 폴더에서 오디오 및 기타 파일을 정리하여 최적화합니다.
6. 합성 오디오가 레퍼런스 오디오의 끝부분을 포함하는 문제를 개선하였습니다.
### 20240122 업데이트
1. 너무 짧은 출력 파일이 중복된 레퍼런스 오디오를 반환하는 문제 수정하였습니다.
2. 영어-일본어 학습이 원활하게 진행되는 QA를 완료하였습니다. (다만, 일본어 학습은 루트 디렉토리에 영어 이외의 문자가 없어야 합니다)
3. 오디오 경로를 검사합니다. 잘못된 경로를 읽으려고 할 때 '경로가 존재하지 않습니다'라는 에러 메시지를 반환하도록 수정하였습니다. 이는 ffmpeg 모듈의 에러가 아닙니다.
### 20240123 업데이트
1. hubert에서 nan 추출로 인한 SoVITS/GPT 학습 중 ZeroDivisionError 관련 에러를 해결하였습니다.
2. 추론 인터페이스에서 모델을 빠르게 전환할 수 있도록 지원하도록 수정되었습니다.
3. 모델 파일 정렬 로직 최적화하였습니다.
4. 중문 분석에 `jieba_fast``jieba`로 대체하였습니다.
### 20240126 업데이트
1. 중국어와 영어, 일본어와 영어가 혼합된 출력 텍스트를 지원합니다.
2. 출력에서 선택적 분할 모드를 지원합니다.
3. uvr5가 디렉토리를 읽고 자동으로 종료되는 문제를 수정하였습니다.
4. 여러 줄바꿈으로 인한 추론 오류를 수정하였습니다.
5. 추론 인터페이스에서 불필요한 로그 제거하였습니다.
6. MacOS에서의 학습 및 추론을 지원합니다.
7. 반정밀을 지원하지 않는 카드를 자동으로 식별하여 단일 정밀도를 강제 적용하고, CPU 추론에서 단일 정밀도를 강제 적용합니다.
### 20240128 업데이트
1. 숫자를 한자로 읽는 문제를 수정했습니다.
2. 문장 시작 부분의 일부 단어가 누락되는 문제 수정하였습니다.
3. 부적절한 길이의 레퍼런스 오디오를 제한하였습니다.
4. GPT 학습 시 ckpt가 저장되지 않는 문제 수정하였습니다.
5. Dockerfile에서 모델 다운로드 프로세스 개선하였습니다.
### 20240129 업데이트
1. 반정밀도 훈련에 문제가 있는 16 시리즈 및 기타 그래픽 카드의 훈련 구성을 단정밀도 훈련으로 변경했습니다.
2. Colab에서도 사용이 가능한 버전을 테스트 및 업데이트 하였습니다.
3. `git clone modelscope funasr` 저장소와 오래된 버전의 funasr 사용으로 인해 인터페이스가 일치하지 않는 오류를 수정하였습니다.
### 20240130 업데이트
1. 경로와 관련된 문자열을 파싱하여 큰따옴표를 자동으로 제거합니다. 또한, 경로를 복사하는 경우 큰따옴표가 포함되어도 오류가 발생하지 않습니다.
2. 중국어 및 영어 문자열의 문장 부호가 잘리는 문제 및 문장의 시작과 끝에 문장 부호가 추가되는 문제를 수정했습니다.
3. 문장 부호의 수를 확장하였습니다.
### 20240201 업데이트
1. uvr5가 잘못된 형식으로 읽어들이는 문제를 수정하였습니다.
2. 중국어, 일본어, 영어가 혼합된 여러 텍스트를 자동으로 분리하여 언어를 인식합니다.
### 20240202 업데이트
1. asr 경로의 끝에 `/`가 포함되어 있는 경우 오류가 발생하는 문제를 수정하였습니다.
2. paddlespeech의 Normalizer를 도입하여 [문제를 해결](https://github.com/RVC-Boss/GPT-SoVITS/pull/377)하여, 예를 들어 xx.xx%(백분율), 元/吨이 元吨으로 읽히는 문제를 해결하였습니다. 또한, 밑줄이 더 이상 오류를 발생시키지 않습니다.
### 20240207 업데이트
1. 언어 전달 매개변수가 혼란스러워져 [중국어 추론 효과가 저하되는 문제](https://github.com/RVC-Boss/GPT-SoVITS/issues/391)를 수정하였습니다.
2. uvr5가 `inf everywhere` [오류를 반환하는 문제](https://github.com/RVC-Boss/GPT-SoVITS/pull/403)를 수정하였습니다.
3. uvr5의 `is_half` 매개변수가 bool로 변환되지 않아 항상 반정밀도 추론으로 설정되어 16 시리즈 그래픽 카드에서 `inf`가 반환되는 [문제](https://github.com/RVC-Boss/GPT-SoVITS/commit/14a285109a521679f8846589c22da8f656a46ad8)를 수정하였습니다.
4. 영어 텍스트 입력을 최적화하였습니다.
5. gradio 종속성을 지원합니다.
6. 루트 디렉토리가 비어 있으면 `.list` 전체 경로를 자동으로 읽습니다.
7. faster whisper ASR 일본어 및 영어를 지원합니다.
### 20240208 업데이트
1. GPT 학습이 카드에 따라 멈추는 문제와 [GPT 학습 중 ZeroDivisionError](https://github.com/RVC-Boss/GPT-SoVITS/commit/59f35adad85815df27e9c6b33d420f5ebfd8376b) 문제를 수정하였습니다.
### 20240212 업데이트
1. faster whisper 및 funasr 로직을 최적화하였습니다. faster whisper는 이미지 스토어에서 다운로드하여 huggingface에 연결하지 못하는 문제를 회피합니다.
2. DPO Loss 실험적 학습 옵션을 활성화하여 부정적 샘플을 생성하여 [GPT 반복 및 누락 문자 문제](https://github.com/RVC-Boss/GPT-SoVITS/pull/457)를 완화합니다. 추론 인터페이스에 몇 가지 추론 매개변수를 공개합니다.
### 20240214 업데이트
1. 학습에서 중국어 실험 이름을 지원합니다. (이전에 오류가 발생했습니다)
2. DPO 학습을 선택적으로 설정할 수 있도록 변경하였습니다. 배치 크기를 선택하면 자동으로 절반으로 줄어듭니다. 추론 인터페이스에서 새로운 매개변수를 전달하지 않는 문제를 수정하였습니다.
### 20240216 업데이트
1. 참조 텍스트 입력을 지원합니다.
2. 프론트엔드에 있던 중국어 텍스트 입력 버그를 수정하였습니다.
### 20240221 업데이트
1. 데이터 처리에 음성 노이즈 감소 옵션을 추가하였습니다. (노이즈 감소는 16k 샘플링률만 남기며, 노이즈가 크지 않다면 사용하지 마십시오.)
2. 중국어 및 일본어 프론트엔드 처리를 최적화하였습니다. https://github.com/RVC-Boss/GPT-SoVITS/pull/559 https://github.com/RVC-Boss/GPT-SoVITS/pull/556 https://github.com/RVC-Boss/GPT-SoVITS/pull/532 https://github.com/RVC-Boss/GPT-SoVITS/pull/507 https://github.com/RVC-Boss/GPT-SoVITS/pull/509
3. Mac에서 CPU 추론이 더 빨라졌으므로 추론 장치를 mps에서 CPU로 변경하였습니다.
4. colab에서 공용 URL을 열지 않는 문제를 수정하였습니다.
### 20240306 업데이트
1. 추론 속도를 50% 빠르게 하였습니다. (RTX3090+pytorch2.2.1+cu11.8+win10+py39 테스트 완료) https://github.com/RVC-Boss/GPT-SoVITS/pull/672
2. faster whisper를 사용할 때 중국어 ASR을 먼저 다운로드할 필요가 없습니다.
3. uvr5의 잔향 제거 모델이 잔향이 있는지 여부를 반대로 반환하는 문제를 수정하였습니다.
4. faster whisper가 CUDA를 사용할 수 없는 경우 자동으로 CPU 추론을 사용하도록 수정하였습니다.
5. is_half의 판단을 수정하여 Mac에서 CPU 추론이 정상적으로 작동하도록 수정하였습니다.
todolist
1. 중국어 다양한 발음 단어 추론 최적화(테스트 결과를 작성하시는 분은 pr 코멘트 영역에 작성해주시면 감사하겠습니다)

231
docs/ko/README.md Normal file
View File

@ -0,0 +1,231 @@
<div align="center">
<h1>GPT-SoVITS-WebUI</h1>
소량의 데이터로 음성 변환 및 음성 합성을 지원하는 강력한 WebUI.<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](../ja/README.md) | [**한국어**](./README.md)
</div>
---
## 기능:
1. **제로샷 텍스트 음성 변환 (TTS):** 5초의 음성 샘플을 입력하면 즉시 텍스트를 음성으로 변환할 수 있습니다.
2. **소량의 데이터 TTS:** 1분의 훈련 데이터만으로 모델을 미세 조정하여 음성 유사도와 실제감을 향상시킬 수 있습니다.
3. **다국어 지원:** 훈련 데이터셋과 다른 언어의 추론을 지원하며, 현재 영어, 일본어, 중국어를 지원합니다.
4. **WebUI 도구:** 음성 반주 분리, 자동 훈련 데이터셋 분할, 중국어 자동 음성 인식(ASR) 및 텍스트 주석 등의 도구를 통합하여 초보자가 훈련 데이터셋과 GPT/SoVITS 모델을 생성하는 데 도움을 줍니다.
**데모 비디오를 확인하세요! [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)**
보지 못한 발화자의 퓨샷(few-shot) 파인튜닝 데모:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
## 설치
### 테스트 통과 환경
- Python 3.9, PyTorch 2.0.1 및 CUDA 11
- Python 3.10.13, PyTorch 2.1.2 및 CUDA 12.3
- Python 3.9, Pytorch 2.3.0.dev20240122 및 macOS 14.3 (Apple Slilicon)
_참고: numba==0.56.4 는 python<3.11 필요로 합니다._
### Windows
Windows 사용자이며 (win>=10에서 테스트 완료) [미리 패키지된 배포판](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true)을 직접 다운로드하여 _go-webui.bat_을 더블클릭하면 GPT-SoVITS-WebUI를 시작할 수 있습니다.
### Linux
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
bash install.sh
```
### macOS
**주의: Mac에서 GPU로 훈련된 모델은 다른 OS에서 훈련된 모델에 비해 품질이 낮습니다. 해당 문제를 해결하기 전까지 MacOS에선 CPU를 사용하여 훈련을 진행합니다.**
먼저 `brew install ffmpeg` 또는 `conda install ffmpeg`를 실행하여 FFmpeg가 설치되었는지 확인한 다음, 다음 명령어를 사용하여 설치하세요:
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 수동 설치
#### 의존성 설치
```bash
pip install -r requirements.txt
```
#### FFmpeg 설치
##### Conda 사용자
```bash
conda install ffmpeg
```
##### Ubuntu/Debian 사용자
```bash
sudo apt install ffmpeg
sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### Windows 사용자
[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe)와 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe)를 GPT-SoVITS root 디렉토리에 넣습니다.
### Docker에서 사용
#### docker-compose.yaml 설정
0. 이미지 태그: 코드 저장소가 빠르게 업데이트되고 패키지가 느리게 빌드되고 테스트되므로, 현재 빌드된 최신 도커 이미지를 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits)에서 확인하고 필요에 따라 Dockerfile을 사용하여 로컬에서 빌드할 수 있습니다.
1. 환경 변수:
- is_half: 반정밀/배정밀 제어. "SSL 추출" 단계에서 4-cnhubert/5-wav32k 디렉토리의 내용을 올바르게 생성할 수 없는 경우, 일반적으로 이것 때문입니다. 실제 상황에 따라 True 또는 False로 조정할 수 있습니다.
2. 볼륨 설정, 컨테이너 내의 애플리케이션 루트 디렉토리를 /workspace로 설정합니다. 기본 docker-compose.yaml에는 실제 예제가 나열되어 있으므로 업로드/다운로드를 쉽게 할 수 있습니다.
3. shm_size: Windows의 Docker Desktop의 기본 사용 가능한 메모리가 너무 작아 오류가 발생할 수 있으므로 실제 상황에 따라 조정합니다.
4. deploy 섹션의 gpu 관련 내용은 시스템 및 실제 상황에 따라 조정합니다.
#### docker compose로 실행
```
docker compose -f "docker-compose.yaml" up -d
```
#### docker 명령으로 실행
위와 동일하게 실제 상황에 맞게 매개변수를 수정한 다음 다음 명령을 실행합니다:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
## 사전 훈련된 모델
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS)에서 사전 훈련된 모델을 다운로드하고 `GPT_SoVITS\pretrained_models`에 넣습니다.
중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/damo_asr/models`에 넣습니다.
UVR5(음성/반주 분리 및 잔향 제거)를 위해 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights)에서 모델을 다운로드하고 `tools/uvr5/uvr5_weights`에 넣습니다.
## 데이터셋 형식
텍스트 음성 합성(TTS) 주석 .list 파일 형식:
```
vocal_path|speaker_name|language|text
```
언어 사전:
- 'zh': 중국어
- 'ja': 일본어
- 'en': 영어
예시:
```
D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
```
## 할 일 목록
- [ ] **최우선순위:**
- [x] 일본어 및 영어 지역화.
- [ ] 사용자 가이드.
- [x] 일본어 및 영어 데이터셋 미세 조정 훈련.
- [ ] **기능:**
- [ ] 제로샷 음성 변환 (5초) / 소량의 음성 변환 (1분).
- [ ] TTS 속도 제어.
- [ ] 향상된 TTS 감정 제어.
- [ ] SoVITS 토큰 입력을 단어 확률 분포로 변경해 보세요.
- [ ] 영어 및 일본어 텍스트 프론트 엔드 개선.
- [ ] 작은 크기와 큰 크기의 TTS 모델 개발.
- [x] Colab 스크립트.
- [ ] 훈련 데이터셋 확장 (2k 시간에서 10k 시간).
- [ ] 더 나은 sovits 기본 모델 (향상된 오디오 품질).
- [ ] 모델 블렌딩.
## (선택 사항) 필요한 경우 여기에서 명령줄 작업 모드를 제공합니다.
명령줄을 사용하여 UVR5용 WebUI 열기
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
```
브라우저를 열 수 없는 경우 UVR 처리를 위해 아래 형식을 따르십시오. 이는 오디오 처리를 위해 mdxnet을 사용하는 것입니다.
```
python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
```
명령줄을 사용하여 데이터세트의 오디오 분할을 수행하는 방법은 다음과 같습니다.
```
python audio_slicer.py \
--input_path "<path_to_original_audio_file_or_directory>" \
--output_root "<directory_where_subdivided_audio_clips_will_be_saved>" \
--threshold <volume_threshold> \
--min_length <minimum_duration_of_each_subclip> \
--min_interval <shortest_time_gap_between_adjacent_subclips>
--hop_size <step_size_for_computing_volume_curve>
```
명령줄을 사용하여 데이터 세트 ASR 처리를 수행하는 방법입니다(중국어만 해당).
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
```
ASR 처리는 Faster_Whisper(중국어를 제외한 ASR 마킹)를 통해 수행됩니다.
(진행률 표시줄 없음, GPU 성능으로 인해 시간 지연이 발생할 수 있음)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
```
사용자 정의 목록 저장 경로가 활성화되었습니다.
## 감사의 말
특별히 다음 프로젝트와 기여자에게 감사드립니다:
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
## 모든 기여자들에게 감사드립니다 ;)
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
</a>

View File

@ -1,4 +1,2 @@
@echo off
chcp 65001
"%~dp0\runtime\python.exe" "%~dp0\webui.py"
runtime\python.exe webui.py
pause

4
go-webui.ps1 Normal file
View File

@ -0,0 +1,4 @@
$ErrorActionPreference = "SilentlyContinue"
chcp 65001
& "$PSScriptRoot\runtime\python.exe" "$PSScriptRoot\webui.py"
pause

218
gpt-sovits_kaggle.ipynb Normal file
View File

@ -0,0 +1,218 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "45857cb2",
"metadata": {
"_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
"execution": {
"iopub.execute_input": "2024-02-18T14:43:46.735480Z",
"iopub.status.busy": "2024-02-18T14:43:46.735183Z",
"iopub.status.idle": "2024-02-18T14:48:10.724175Z",
"shell.execute_reply": "2024-02-18T14:48:10.723059Z"
},
"papermill": {
"duration": 263.994935,
"end_time": "2024-02-18T14:48:10.726613",
"exception": false,
"start_time": "2024-02-18T14:43:46.731678",
"status": "completed"
},
"tags": []
},
"outputs": [],
"source": [
"!git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n",
"%cd GPT-SoVITS\n",
"!apt-get update && apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && git lfs install\n",
"!pip install -r requirements.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b9d346b4",
"metadata": {
"execution": {
"iopub.execute_input": "2024-02-18T14:48:10.815802Z",
"iopub.status.busy": "2024-02-18T14:48:10.814899Z",
"iopub.status.idle": "2024-02-18T14:50:31.253276Z",
"shell.execute_reply": "2024-02-18T14:50:31.252024Z"
},
"papermill": {
"duration": 140.484893,
"end_time": "2024-02-18T14:50:31.255720",
"exception": false,
"start_time": "2024-02-18T14:48:10.770827",
"status": "completed"
},
"tags": []
},
"outputs": [],
"source": [
"# @title Download pretrained models 下载预训练模型\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/tools/damo_asr/models\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/tools/uvr5\n",
"%cd /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
"%cd /kaggle/working/GPT-SoVITS/tools/damo_asr/models\n",
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
"# # @title UVR5 pretrains 安装uvr5模型\n",
"%cd /kaggle/working/GPT-SoVITS/tools/uvr5\n",
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
"!git config core.sparseCheckout true\n",
"!mv /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models/"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ea94d245",
"metadata": {
"execution": {
"iopub.execute_input": "2024-02-18T14:29:01.071549Z",
"iopub.status.busy": "2024-02-18T14:29:01.070592Z",
"iopub.status.idle": "2024-02-18T14:40:45.318368Z",
"shell.execute_reply": "2024-02-18T14:40:45.317130Z",
"shell.execute_reply.started": "2024-02-18T14:29:01.071512Z"
},
"papermill": {
"duration": null,
"end_time": null,
"exception": false,
"start_time": "2024-02-18T14:50:31.309013",
"status": "running"
},
"tags": []
},
"outputs": [],
"source": [
"# @title launch WebUI 启动WebUI\n",
"%cd /kaggle/working/GPT-SoVITS/\n",
"!npm install -g localtunnel\n",
"import subprocess\n",
"import threading\n",
"import time\n",
"import socket\n",
"import urllib.request\n",
"def iframe_thread(port):\n",
" while True:\n",
" time.sleep(0.5)\n",
" sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
" result = sock.connect_ex(('127.0.0.1', port))\n",
" if result == 0:\n",
" break\n",
" sock.close()\n",
"\n",
" from colorama import Fore, Style\n",
" print (Fore.GREEN + \"\\nIP: \", Fore. RED, urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"), \"\\n\", Style. RESET_ALL)\n",
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
" for line in p.stdout:\n",
" print(line.decode(), end='')\n",
"threading.Thread (target=iframe_thread, daemon=True, args=(9874,)).start()\n",
"\n",
"!python webui.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dda88a6d",
"metadata": {
"execution": {
"iopub.execute_input": "2024-02-18T14:40:56.880608Z",
"iopub.status.busy": "2024-02-18T14:40:56.879879Z"
},
"papermill": {
"duration": null,
"end_time": null,
"exception": null,
"start_time": null,
"status": "pending"
},
"tags": []
},
"outputs": [],
"source": [
"# 开启推理页面\n",
"%cd /kaggle/working/GPT-SoVITS/\n",
"!npm install -g localtunnel\n",
"import subprocess\n",
"import threading\n",
"import time\n",
"import socket\n",
"import urllib.request\n",
"def iframe_thread(port):\n",
" while True:\n",
" time.sleep(0.5)\n",
" sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
" result = sock.connect_ex(('127.0.0.1', port))\n",
" if result == 0:\n",
" break\n",
" sock.close()\n",
"\n",
" from colorama import Fore, Style\n",
" print (Fore.GREEN + \"\\nIP: \", Fore. RED, urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"), \"\\n\", Style. RESET_ALL)\n",
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
" for line in p.stdout:\n",
" print(line.decode(), end='')\n",
"threading.Thread (target=iframe_thread, daemon=True, args=(9872,)).start()\n",
"\n",
"!python ./GPT_SoVITS/inference_webui.py"
]
}
],
"metadata": {
"kaggle": {
"accelerator": "nvidiaTeslaT4",
"dataSources": [
{
"datasetId": 4459328,
"sourceId": 7649639,
"sourceType": "datasetVersion"
}
],
"dockerImageVersionId": 30646,
"isGpuEnabled": true,
"isInternetEnabled": true,
"language": "python",
"sourceType": "notebook"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
},
"papermill": {
"default_parameters": {},
"duration": null,
"end_time": null,
"environment_variables": {},
"exception": null,
"input_path": "__notebook__.ipynb",
"output_path": "__notebook__.ipynb",
"parameters": {},
"start_time": "2024-02-18T14:43:44.011910",
"version": "2.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -2,14 +2,34 @@
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
"UVR5已开启": "UVR5 opened ",
"UVR5已关闭": "UVR5 closed",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE.txt</b> for details.",
"输入文件夹路径": "Input folder path",
"输出文件夹路径": "Output folder path",
"ASR 模型": "ASR model",
"ASR 模型尺寸": "ASR model size",
"ASR 语言设置": "ASR language",
"模型切换": "Model switch",
"是否开启dpo训练选项(实验性)": "Enable DPO training (experimental feature)",
"开启无参考文本模式。不填参考文本亦相当于开启。": "Enable no reference mode. If you don't fill 'Text for reference audio', no reference mode will be enabled.",
"使用无参考文本模式时建议使用微调的GPT": "Please use your trained GPT model if you don't use reference audio.",
"后续将支持转音素、手工修改音素、语音合成分步执行。": " Step-to-step phoneme transformation and modification coming soon!",
"gpt采样参数(无参考文本时不要太低)": "GPT parameters:",
"按标点符号切": "Slice by every punct",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE</b> for details.",
"0-前置数据集获取工具": "0-Fetch dataset",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)",
"是否开启UVR5-WebUI": "Open UVR5-WebUI",
"UVR5进程输出信息": "UVR5 process output log",
"0b-语音切分工具": "0b-Audio slicer",
".list标注文件的路径": ".list annotation file path",
"GPT模型列表": "GPT weight list",
"SoVITS模型列表": "SoVITS weight list",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "Fill in the directory of segmented audio. The complete path of the read audio file is equal to the directory concatenated with the waveform's corresponding filename from the list file (not the full path).",
"音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)",
"切分后的子音频的输出根目录": "Audio slicer output folder",
"怎么切": "How to slice the sentence",
"不切": "No slice",
"凑四句一切": "Slice once every 4 sentences",
"按英文句号.切": "Slice by English punct",
"threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "Minimum length",
"min_interval:最短切割间隔": "Minumum interval for audio cutting",
@ -86,13 +106,15 @@
"TTS推理WebUI进程输出信息": "TTS inference webui output log",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer",
"施工中,请静候佳音": "In construction, please wait",
"参考音频在3~10秒范围外请更换": "Reference audio is outside the 3-10 second range, please choose another one!",
"请上传3~10秒内参考音频超过会报错": "Please upload a reference audio within the 3-10 second range; if it exceeds this duration, it will raise errors.",
"TTS推理进程已开启": "TTS inference process is opened",
"TTS推理进程已关闭": "TTS inference process closed",
"打标工具WebUI已开启": "proofreading tool webui is opened",
"打标工具WebUI已关闭": "proofreading tool webui is closed",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.",
"*请上传并填写参考信息": "*Please upload and fill reference information",
"*请填写需要合成的目标文本": "*Please fill the text that needs inference",
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*Please fill the text that needs inference. Select Chinese for mixed Chinese and English text, choose Japanese for mixed Japanese and English text. Mixed Chinese and Japanese is currently not supported; non-target language text will be automatically discarded.",
"ASR任务开启%s": "ASR training started: %s",
"GPT训练完成": "Finished GPT training",
"GPT训练开始%s": "GPT training started: %s",

View File

@ -1,135 +1,284 @@
{
">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音": "Si es >=3, entonces use el resultado del reconocimiento de tono de 'harvest' con filtro de mediana, el valor es el radio del filtro, su uso puede debilitar el sonido sordo",
"A模型权重": "Un peso modelo para el modelo A.",
"A模型路径": "Modelo A ruta.",
"B模型路径": "Modelo B ruta.",
"很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica compatible para admitir su entrenamiento.",
"UVR5已开启": "UVR5 está habilitado",
"UVR5已关闭": "UVR5 está deshabilitado",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. <br>Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo <b>LICENSE</b> en el directorio raíz para obtener más detalles.",
"0-前置数据集获取工具": "0-Herramienta de obtención de conjunto de datos previo",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Herramienta de separación de voz y acompañamiento UVR5 y eliminación de reverberación y retardo",
"是否开启UVR5-WebUI": "¿Habilitar UVR5-WebUI?",
"UVR5进程输出信息": "Información de salida del proceso UVR5",
"0b-语音切分工具": "0b-Herramienta de división de voz",
".list标注文件的路径": "Ruta del archivo de anotación .list",
"GPT模型列表": "Lista de modelos GPT",
"SoVITS模型列表": "Lista de modelos SoVITS",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "Directorio donde se guardan los archivos de audio después del corte! Ruta completa del archivo de audio a leer = este directorio - nombre de archivo correspondiente a la forma de onda en el archivo de lista (no la ruta completa).",
"音频自动切分输入路径,可文件可文件夹": "Ruta de entrada para la división automática de audio, puede ser un archivo o una carpeta",
"切分后的子音频的输出根目录": "Directorio raíz de salida de los sub-audios después de la división",
"怎么切": "Cómo cortar",
"不切": "No cortar",
"凑四句一切": "Completa cuatro oraciones para rellenar todo",
"按英文句号.切": "Cortar por puntos en inglés.",
"threshold:音量小于这个值视作静音的备选切割点": "umbral: puntos de corte alternativos considerados como silencio si el volumen es menor que este valor",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: duración mínima de cada segmento, si el primer segmento es demasiado corto, se conecta continuamente con los siguientes hasta que supera este valor",
"min_interval:最短切割间隔": "min_interval: intervalo mínimo de corte",
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: cómo calcular la curva de volumen, cuanto más pequeño, mayor precisión pero mayor carga computacional (mayor precisión no significa mejor rendimiento)",
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: duración máxima del silencio después del corte",
"开启语音切割": "Habilitar la división de voz",
"终止语音切割": "Terminar la división de voz",
"max:归一化后最大值多少": "max: valor máximo después de la normalización",
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proporción de mezcla de audio normalizado que entra",
"切割使用的进程数": "Número de procesos utilizados para la división",
"语音切割进程输出信息": "Información de salida del proceso de división de voz",
"0c-中文批量离线ASR工具": "0c-Herramienta de ASR en lote fuera de línea en chino",
"开启离线批量ASR": "¿Habilitar ASR en lote fuera de línea?",
"终止ASR进程": "Terminar el proceso ASR",
"批量ASR(中文only)输入文件夹路径": "Ruta de la carpeta de entrada para ASR en lote (solo en chino)",
"ASR进程输出信息": "Información de salida del proceso ASR",
"0d-语音文本校对标注工具": "0d-Herramienta de corrección y etiquetado de texto de voz",
"是否开启打标WebUI": "¿Habilitar la interfaz web de etiquetado?",
"打标数据标注文件路径": "Ruta del archivo de etiquetado de datos",
"打标工具进程输出信息": "Información de salida del proceso de la herramienta de etiquetado",
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
"*实验/模型名": "*Nombre del experimento/modelo",
"显卡信息": "Información de la tarjeta gráfica",
"预训练的SoVITS-G模型路径": "Ruta del modelo SoVITS-G preentrenado",
"预训练的SoVITS-D模型路径": "Ruta del modelo SoVITS-D preentrenado",
"预训练的GPT模型路径": "Ruta del modelo GPT preentrenado",
"1A-训练集格式化工具": "1A-Herramienta de formateo del conjunto de datos de entrenamiento",
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Debe haber archivos y carpetas que comiencen con 23456 en el directorio logs/nombre del experimento",
"*文本标注文件": "*Archivo de etiquetado de texto",
"*训练集音频文件目录": "*Directorio de archivos de audio de entrenamiento",
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directorio de archivos de audio de entrenamiento, concatenar con los nombres de archivo correspondientes en el archivo list.",
"1Aa-文本内容": "1Aa-Contenido del texto",
"GPU卡号以-分割,每个卡号一个进程": "Número de tarjeta GPU separado por '-', cada número de tarjeta es un proceso",
"预训练的中文BERT模型路径": "Ruta del modelo BERT en chino preentrenado",
"开启文本获取": "¿Habilitar la obtención de texto?",
"终止文本获取进程": "Terminar el proceso de obtención de texto",
"文本进程输出信息": "Información de salida del proceso de obtención de texto",
"1Ab-SSL自监督特征提取": "1Ab-Extracción de características auto-supervisada SSL",
"预训练的SSL模型路径": "Ruta del modelo SSL preentrenado",
"开启SSL提取": "¿Habilitar la extracción SSL?",
"终止SSL提取进程": "Terminar el proceso de extracción SSL",
"SSL进程输出信息": "Información de salida del proceso SSL",
"1Ac-语义token提取": "1Ac-Extracción de tokens semánticos",
"开启语义token提取": "¿Habilitar la extracción de tokens semánticos?",
"终止语义token提取进程": "Terminar el proceso de extracción de tokens semánticos",
"语义token提取进程输出信息": "Información de salida del proceso de extracción de tokens semánticos",
"1Aabc-训练集格式化一键三连": "1Aabc-Formateo del conjunto de datos de entrenamiento en un solo paso",
"开启一键三连": "¿Habilitar un solo paso de formateo?",
"终止一键三连": "Terminar el proceso de un solo paso de formateo",
"一键三连进程输出信息": "Información de salida del proceso de triple acción",
"1B-微调训练": "1B-Entrenamiento de ajuste fino",
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entrenamiento de SoVITS. Los archivos de modelo para compartir se encuentran en SoVITS_weights.",
"每张显卡的batch_size": "Tamaño de lote por tarjeta gráfica",
"总训练轮数total_epoch不建议太高": "Número total de épocas de entrenamiento, no se recomienda demasiado alto",
"文本模块学习率权重": "Peso de la tasa de aprendizaje del módulo de texto",
"保存频率save_every_epoch": "Frecuencia de guardado (cada epoch)",
"是否仅保存最新的ckpt文件以节省硬盘空间": "¿Guardar solo el último archivo ckpt para ahorrar espacio en disco?",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el modelo final pequeño en la carpeta de pesos en cada punto de guardado?",
"开启SoVITS训练": "Iniciar entrenamiento de SoVITS",
"终止SoVITS训练": "Detener entrenamiento de SoVITS",
"SoVITS训练进程输出信息": "Información de salida del proceso de entrenamiento de SoVITS",
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entrenamiento de GPT. Los archivos de modelo para compartir se encuentran en GPT_weights.",
"总训练轮数total_epoch": "Número total de épocas de entrenamiento",
"开启GPT训练": "Iniciar entrenamiento de GPT",
"终止GPT训练": "Detener entrenamiento de GPT",
"GPT训练进程输出信息": "Información de salida del proceso de entrenamiento de GPT",
"1C-推理": "1C-Inferencia",
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模体验5秒Zero Shot TTS用。": "Seleccione el modelo almacenado en SoVITS_weights y GPT_weights después del entrenamiento. Uno de ellos es el modelo base, útil para experimentar con TTS de 5 segundos sin entrenamiento.",
"*GPT模型列表": "*Lista de modelos GPT",
"*SoVITS模型列表": "*Lista de modelos SoVITS",
"GPU卡号,只能填1个整数": "Número de tarjeta GPU, solo se puede ingresar un número entero",
"刷新模型路径": "Actualizar la ruta del modelo",
"是否开启TTS推理WebUI": "¿Habilitar la interfaz web de inferencia TTS?",
"TTS推理WebUI进程输出信息": "Información de salida del proceso de interfaz web de inferencia TTS",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Cambio de voz",
"施工中,请静候佳音": "En construcción, por favor espere pacientemente",
"TTS推理进程已开启": "Proceso de inferencia TTS iniciado",
"TTS推理进程已关闭": "Proceso de inferencia TTS cerrado",
"打标工具WebUI已开启": "Interfaz web de la herramienta de etiquetado iniciada",
"打标工具WebUI已关闭": "Interfaz web de la herramienta de etiquetado cerrada",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo LICENSE en el directorio raíz para obtener más detalles.",
"*请上传并填写参考信息": "*Por favor, suba y complete la información de referencia",
"*请填写需要合成的目标文本": "*Por favor, complete el texto objetivo que necesita ser sintetizado",
"ASR任务开启%s": "Tarea ASR iniciada: %s",
"GPT训练完成": "Entrenamiento de GPT completado",
"GPT训练开始%s": "Entrenamiento de GPT iniciado: %s",
"SSL提取进程执行中": "Proceso de extracción SSL en ejecución",
"SSL提取进程结束": "Proceso de extracción SSL finalizado",
"SoVITS训练完成": "Entrenamiento de SoVITS completado",
"SoVITS训练开始%s": "Entrenamiento de SoVITS iniciado: %s",
"一键三连中途报错": "Error intermedio en triple acción",
"一键三连进程结束": "Proceso de triple acción finalizado",
"中文": "Chino",
"凑50字一切": "Todo para alcanzar las 50 palabras",
"凑五句一切": "Todo para alcanzar las cinco frases",
"切分后文本": "Texto después de la división",
"切割执行中": "División en proceso",
"切割结束": "División finalizada",
"参考音频的文本": "Texto de referencia del audio",
"参考音频的语种": "Idioma del audio de referencia",
"合成语音": "Síntesis de voz",
"后续将支持混合语种编码文本输入。": "En el futuro, se admitirá la entrada de texto con codificación de idiomas mixtos.",
"已有正在进行的ASR任务需先终止才能开启下一次任务": "Ya hay una tarea ASR en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的GPT训练任务需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de GPT en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的SSL提取任务需先终止才能开启下一次任务": "Ya hay una tarea de extracción SSL en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的SoVITS训练任务需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de SoVITS en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Ya hay una tarea de triple acción en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的切割任务,需先终止才能开启下一次任务": "Ya hay una tarea de división en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的文本任务,需先终止才能开启下一次任务": "Ya hay una tarea de texto en curso, debe detenerla antes de comenzar la siguiente tarea",
"已有正在进行的语义token提取任务需先终止才能开启下一次任务": "Ya hay una tarea de extracción de tokens semánticos en curso, debe detenerla antes de comenzar la siguiente tarea",
"已终止ASR进程": "Proceso ASR terminado",
"已终止GPT训练": "Entrenamiento de GPT terminado",
"已终止SoVITS训练": "Entrenamiento de SoVITS terminado",
"已终止所有1a进程": "Se han terminado todos los procesos 1a",
"已终止所有1b进程": "Se han terminado todos los procesos 1b",
"已终止所有一键三连进程": "Se han terminado todos los procesos de triple acción",
"已终止所有切割进程": "Proceso de corte terminado",
"已终止所有语义token进程": "Proceso de extracción de tokens semánticos terminado",
"按中文句号。切": "Cortar según puntos en chino",
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Herramienta de división de texto. El resultado de la síntesis puede no ser bueno para textos demasiado largos, por lo que se recomienda dividirlos primero. La síntesis se realiza separando el texto según los saltos de línea y luego uniendo los fragmentos.",
"文本进程执行中": "Proceso de texto en ejecución",
"文本进程结束": "Proceso de texto finalizado",
"日文": "Japonés",
"英文": "Inglés",
"语义token提取进程执行中": "Proceso de extracción de tokens semánticos en ejecución",
"语义token提取进程结束": "Proceso de extracción de tokens semánticos finalizado",
"请上传参考音频": "Por favor, suba el audio de referencia",
"输入路径不存在": "La ruta de entrada no existe",
"输入路径存在但既不是文件也不是文件夹": "La ruta de entrada existe pero no es ni un archivo ni una carpeta",
"输出的语音": "Audio de salida",
"进度1a-done": "Progreso: 1a-hecho",
"进度1a-done, 1b-ing": "Progreso: 1a-hecho, 1b-en proceso",
"进度1a-ing": "Progreso: 1a-en proceso",
"进度1a1b-done": "Progreso: 1a1b-hecho",
"进度1a1b-done, 1cing": "Progreso: 1a1b-hecho, 1c-en proceso",
"进度all-done": "Progreso: todo hecho",
"需要合成的切分前文本": "Texto a sintetizar antes de la división",
"需要合成的文本": "Texto a sintetizar",
"需要合成的语种": "Idioma para la síntesis",
">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音": "Si es >=3, se utiliza la mediana para filtrar los resultados del reconocimiento de altura tonal de harvest, el valor es el radio del filtro. Su uso puede debilitar los sonidos sordos.",
"A模型权重": "Peso del modelo A",
"A模型路径": "Ruta del modelo A",
"B模型路径": "Ruta del modelo B",
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, un tono por línea, en lugar de F0 predeterminado y cambio de tono",
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, una línea por altura tonal, en lugar de F0 y cambio de tono predeterminados",
"Index Rate": "Tasa de índice",
"Onnx导出": "Exportar Onnx",
"Onnx输出路径": "Ruta de salida Onnx",
"Onnx导出": "Exportar a Onnx",
"Onnx输出路径": "Ruta de salida de Onnx",
"RVC模型路径": "Ruta del modelo RVC",
"ckpt处理": "Procesamiento de recibos",
"harvest进程数": "Número de procesos",
"index文件路径不可包含中文": "La ruta del archivo .index no debe contener caracteres chinos.",
"pth文件路径不可包含中文": "La ruta del archivo .pth no debe contener caracteres chinos.",
"rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.",
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Complete la configuración del experimento. Los datos del experimento se almacenan en el directorio 'logs', con cada experimento en una carpeta separada. La ruta del nombre del experimento debe ingresarse manualmente y debe contener la configuración del experimento, los registros y los archivos del modelo entrenado.",
"ckpt处理": "Procesamiento de ckpt",
"harvest进程数": "Número de procesos de harvest",
"index文件路径不可包含中文": "La ruta del archivo de índice no puede contener caracteres chinos",
"pth文件路径不可包含中文": "La ruta del archivo pth no puede contener caracteres chinos",
"rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuración de números de tarjeta rmvpe: usando - para separar los números de tarjeta de diferentes procesos de entrada, por ejemplo, 0-0-1 para ejecutar 2 procesos en la tarjeta 0 y 1 proceso en la tarjeta 1",
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Completa la configuración del experimento. Los datos del experimento se encuentran en logs, cada experimento en una carpeta, debe ingresar manualmente la ruta del nombre del experimento, que incluye la configuración del experimento, el registro y los archivos del modelo entrenado.",
"step1:正在处理数据": "Paso 1: Procesando datos",
"step2:正在提取音高&正在提取特征": "Paso 2: Extracción del tono y extracción de características",
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorra automáticamente la carpeta de capacitación y corte y normalice todos los archivos de audio que se pueden decodificar en audio. Se generarán dos carpetas 'wav' en el directorio del experimento. Actualmente, solo se admite la capacitación de una sola persona.",
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Use la CPU para extraer el tono (si el modelo tiene guía de tono) y la GPU para extraer características (seleccione el número de tarjeta).",
"step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Complete la configuración de entrenamiento y comience a entrenar el modelo y el índice.",
"step2:正在提取音高&正在提取特征": "Paso 2: Extrayendo tono y características",
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorre automáticamente todos los archivos en la carpeta de entrenamiento que se pueden decodificar en archivos de audio y realiza la normalización de segmentos. Genera 2 carpetas de audio en el directorio del experimento; por ahora, solo es compatible con el entrenamiento de una sola persona.",
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Extraer tono con CPU (si el modelo incluye tono) y extraer características con GPU (seleccionar número de tarjeta)",
"step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Completa la configuración de entrenamiento y comienza a entrenar el modelo e indexar",
"step3a:正在训练模型": "Paso 3a: Entrenando el modelo",
"一键训练": "Entrenamiento con un clic",
"也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden importar varios archivos de audio. Si existe una ruta de carpeta, esta entrada se ignora.",
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声不带和声的音频选这个对主人声保留比HP5更好。内置HP2和HP3两个模型HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点 <br>2、仅保留主人声带和声的音频选这个对主人声可能有削弱。内置HP5一个模型 <br> 3、去混响、去延迟模型by FoxJoy<br>(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br>&emsp;(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底DeReverb额外去除混响可去除单声道混响但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍<br>2、MDX-Net-Dereverb模型挺慢的<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Procesamiento por lotes para la separación de acompañamiento vocal utilizando el modelo UVR5.<br>Ejemplo de formato de ruta de carpeta válido: D:\\ruta\\a\\la\\carpeta\\de\\entrada (copiar desde la barra de direcciones del administrador de archivos).<br>El modelo se divide en tres categorías:<br>1. Preservar voces: Elija esta opción para audio sin armonías. Preserva las voces mejor que HP5. Incluye dos modelos incorporados: HP2 y HP3. HP3 puede filtrar ligeramente el acompañamiento pero conserva las voces un poco mejor que HP2.<br>2. Preservar solo voces principales: Elija esta opción para audio con armonías. Puede debilitar las voces principales. Incluye un modelo incorporado: HP5.<br>3. Modelos de des-reverberación y des-retardo (por FoxJoy):<br>(1) MDX-Net: La mejor opción para la eliminación de reverberación estéreo pero no puede eliminar la reverberación mono;<br>&emsp;(234) DeEcho: Elimina efectos de retardo. El modo Agresivo elimina más a fondo que el modo Normal. DeReverb adicionalmente elimina la reverberación y puede eliminar la reverberación mono, pero no muy efectivamente para contenido de alta frecuencia fuertemente reverberado.<br>Notas de des-reverberación/des-retardo:<br>1. El tiempo de procesamiento para el modelo DeEcho-DeReverb es aproximadamente el doble que los otros dos modelos DeEcho.<br>2. El modelo MDX-Net-Dereverb es bastante lento.<br>3. La configuración más limpia recomendada es aplicar primero MDX-Net y luego DeEcho-Agresivo.",
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.",
"伴奏人声分离&去混响&去回声": "Separación de voz acompañante & eliminación de reverberación & eco",
"使用模型采样率": "使用模型采样率",
"使用设备采样率": "使用设备采样率",
"保存名": "Guardar nombre",
"保存的文件名, 默认空为和源文件同名": "Nombre del archivo que se guardará, el valor predeterminado es el mismo que el nombre del archivo de origen",
"保存的模型名不带后缀": "Nombre del modelo guardado sin extensión.",
"保存频率save_every_epoch": "Frecuencia de guardado (save_every_epoch)",
"保护清辅音和呼吸声防止电音撕裂等artifact拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y la respiración, prevenir artefactos como la distorsión de sonido electrónico, 0.5 no está activado, reducir aumentará la protección pero puede reducir el efecto del índice",
"也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden ingresar archivos de audio por lotes, seleccionar uno, prioridad para leer carpetas",
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Usar - para separar los números de tarjeta utilizados como entrada, por ejemplo, 0-1-2 para usar las tarjetas 0, 1 y 2",
"伴奏人声分离&去混响&去回声": "Separación de acompañamiento y voz principal y eliminación de reverberación y eco",
"使用模型采样率": "Usar tasa de muestreo del modelo",
"使用设备采样率": "Usar tasa de muestreo del dispositivo",
"保存名": "Nombre de guardado",
"保存的文件名, 默认空为和源文件同名": "Nombre de archivo guardado, vacío por defecto para tener el mismo nombre que el archivo fuente",
"保存的模型名不带后缀": "Nombre del modelo guardado sin extensión",
"保护清辅音和呼吸声防止电音撕裂等artifact拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y los sonidos de respiración, evitando artefactos como el desgarro eléctrico. No activar al tirar hasta 0.5, reducir para aumentar la protección, pero puede disminuir la efectividad del índice",
"修改": "Modificar",
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar la información del modelo (solo admite archivos de modelos pequeños extraídos en la carpeta weights)",
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar información del modelo (solo compatible con archivos de modelo pequeños extraídos en la carpeta weights)",
"停止音频转换": "Detener la conversión de audio",
"全流程结束!": "¡Todo el proceso ha terminado!",
"刷新音色列表和索引路径": "Actualizar la lista de modelos e índice de rutas",
"全流程结束!": Proceso completo!",
"刷新音色列表和索引路径": "Actualizar lista de tonos e índice de ruta",
"加载模型": "Cargar modelo",
"加载预训练底模D路径": "Cargue la ruta del modelo D base pre-entrenada.",
"加载预训练底模G路径": "Cargue la ruta del modelo G base pre-entrenada.",
"单次推理": "单次推理",
"卸载音色省显存": "Descargue la voz para ahorrar memoria GPU",
"变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)",
"后处理重采样至最终采样率0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear",
"加载预训练底模D路径": "Cargar ruta del modelo D preentrenado",
"加载预训练底模G路径": "Cargar ruta del modelo G preentrenado",
"单次推理": "Inferencia única",
"卸载音色省显存": "Descargar tono para ahorrar memoria de video",
"变调(整数, 半音数量, 升八度12降八度-12)": "Cambiar tono (número entero, cantidad de semitonos, subir octava 12 bajar octava -12)",
"后处理重采样至最终采样率0为不进行重采样": "Reprocesar y remuestrear a la tasa de muestreo final, 0 para no remuestrear",
"否": "No",
"启用相位声码器": "启用相位声码器",
"启用相位声码器": "Activar codificador de fase",
"响应阈值": "Umbral de respuesta",
"响度因子": "factor de sonoridad",
"响度因子": "Factor de sonoridad",
"处理数据": "Procesar datos",
"导出Onnx模型": "Exportar modelo Onnx",
"导出文件格式": "Formato de archivo de exportación",
"常见问题解答": "Preguntas frecuentes",
"常规设置": "Configuración general",
"开始音频转换": "Iniciar conversión de audio",
"很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento",
"性能设置": "Configuración de rendimiento",
"总训练轮数total_epoch": "Total de épocas de entrenamiento (total_epoch)",
"批量推理": "批量推理",
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).",
"指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal",
"批量推理": "Inferencia por lotes",
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta de audio a convertir o cargue varios archivos de audio, la salida se realiza en la carpeta especificada (opt por defecto). ",
"指定输出主人声文件夹": "Especificar carpeta de salida de voz principal",
"指定输出文件夹": "Especificar carpeta de salida",
"指定输出非主人声文件夹": "Especifique la carpeta de salida para las voces no principales",
"推理时间(ms):": "Inferir tiempo (ms):",
"推理音色": "inferencia de voz",
"指定输出非主人声文件夹": "Especificar carpeta de salida de no voz principal",
"推理时间(ms):": "Tiempo de inferencia (ms):",
"推理音色": "Tono de inferencia",
"提取": "Extraer",
"提取音高和处理数据使用的CPU进程数": "Número de procesos de CPU utilizados para extraer el tono y procesar los datos",
"提取音高和处理数据使用的CPU进程数": "Número de procesadores de CPU utilizados para extraer tono y procesar datos",
"是": "Sí",
"是否仅保存最新的ckpt文件以节省硬盘空间": "Guardar solo el archivo ckpt más reciente para ahorrar espacio en disco",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Guardar pequeño modelo final en la carpeta 'weights' en cada punto de guardado",
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Si almacenar en caché todos los conjuntos de entrenamiento en la memoria de la GPU. Los conjuntos de datos pequeños (menos de 10 minutos) se pueden almacenar en caché para acelerar el entrenamiento, pero el almacenamiento en caché de conjuntos de datos grandes puede causar errores de memoria en la GPU y no aumenta la velocidad de manera significativa.",
"显卡信息": "información de la GPU",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Este software es de código abierto bajo la licencia MIT, el autor no tiene ningún control sobre el software, y aquellos que usan el software y difunden los sonidos exportados por el software son los únicos responsables.<br>Si no está de acuerdo con esta cláusula , no puede utilizar ni citar ningún código ni archivo del paquete de software Consulte el directorio raíz <b>Agreement-LICENSE.txt</b> para obtener más información.",
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Almacenar en caché todos los conjuntos de entrenamiento en la memoria de video. Pequeños conjuntos de datos menores a 10 minutos pueden almacenarse en caché para acelerar el entrenamiento; almacenar en caché grandes conjuntos de datos puede saturar la memoria de video y no acelerará mucho.",
"查看": "Ver",
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo aplicable a archivos de modelos pequeños extraídos de la carpeta 'pesos')",
"检索特征占比": "Proporción de función de búsqueda",
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo compatible con archivos pequeños extraídos en la carpeta weights)",
"检索特征占比": "Proporción de características de búsqueda",
"模型": "Modelo",
"模型推理": "inferencia del modelo",
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingrese la ruta de un archivo de modelo grande en la carpeta 'logs'), aplicable cuando desea extraer un archivo de modelo pequeño después de entrenar a mitad de camino y no se guardó automáticamente, o cuando desea probar un modelo intermedio",
"模型是否带音高指导": "Si el modelo tiene guía de tono.",
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Si el modelo tiene guía de tono (necesaria para cantar, pero no para hablar)",
"模型是否带音高指导,1是0否": "Si el modelo tiene guía de tono, 1 para sí, 0 para no",
"模型推理": "Inferencia de modelo",
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingresar la ruta del modelo grande en la carpeta logs), útil cuando se quiere dejar de entrenar a la mitad y el modelo no ha extraído automáticamente un modelo pequeño guardado, o para probar la situación del modelo intermedio",
"模型是否带音高指导": "¿El modelo incluye guía de altura tonal?",
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "¿El modelo incluye guía de altura tonal? (Necesario para cantar, opcional para voz)",
"模型是否带音高指导,1是0否": "¿El modelo incluye guía de altura tonal? 1 para sí, 0 para no",
"模型版本型号": "Versión y modelo del modelo",
"模型融合, 可用于测试音色融合": "Fusión de modelos, se puede utilizar para fusionar diferentes voces",
"模型融合, 可用于测试音色融合": "Fusión de modelos, útil para probar la mezcla de tonos",
"模型路径": "Ruta del modelo",
"每张显卡的batch_size": "Tamaño del lote (batch_size) por tarjeta gráfica",
"淡入淡出长度": "Duración del fundido de entrada/salida",
"淡入淡出长度": "Longitud de desvanecimiento",
"版本": "Versión",
"特征提取": "Extracción de características",
"特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de características, si está vacío, se utilizará el resultado de la selección desplegable",
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tecla +12 recomendada para conversión de voz de hombre a mujer, tecla -12 para conversión de voz de mujer a hombre. Si el rango de tono es demasiado amplio y causa distorsión, ajústelo usted mismo a un rango adecuado.",
"特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de búsqueda de características, si está vacío, se utiliza el resultado seleccionado en el menú desplegable",
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recomendación para cambiar de hombre a mujer +12 teclas, cambiar de mujer a hombre -12 teclas. Si la amplitud del rango tonal causa distorsión del tono, también puede ajustarse manualmente al rango tonal adecuado. ",
"目标采样率": "Tasa de muestreo objetivo",
"算法延迟(ms):": "算法延迟(ms):",
"自动检测index路径,下拉式选择(dropdown)": "Detección automática de la ruta del índice, selección desplegable (dropdown)",
"算法延迟(ms):": "Retardo del algoritmo (ms):",
"自动检测index路径,下拉式选择(dropdown)": "Detectar automáticamente la ruta del índice, seleccionar en menú desplegable",
"融合": "Fusión",
"要改的模型信息": "Información del modelo a modificar",
"要置入的模型信息": "Información del modelo a colocar.",
"训练": "Entrenamiento",
"训练模型": "Entrenar Modelo",
"训练特征索引": "Índice de características",
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento finalizado, puede ver el registro de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento",
"请指定说话人id": "ID del modelo",
"请选择index文件": "Seleccione el archivo .index",
"请选择pth文件": "Seleccione el archivo .pth",
"请选择说话人id": "Seleccione una identificación de altavoz",
"转换": "Conversión",
"输入实验名": "Ingrese el nombre del modelo",
"输入待处理音频文件夹路径": "Ingrese la ruta a la carpeta de audio que se procesará",
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta a la carpeta de audio que se procesará (simplemente cópiela desde la barra de direcciones del administrador de archivos)",
"输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo del audio que se procesará (el formato predeterminado es el ejemplo correcto)",
"输入源音量包络替换输出音量包络融合比例越靠近1越使用输出包络": "Proporción de fusión para reemplazar el sobre de volumen de entrada con el sobre de volumen de salida, cuanto más cerca de 1, más se utiliza el sobre de salida",
"输入监听": "输入监听",
"输入训练文件夹路径": "Introduzca la ruta de la carpeta de entrenamiento",
"要改的模型信息": "Información del modelo a cambiar",
"要置入的模型信息": "Información del modelo a insertar",
"训练": "Entrenar",
"训练模型": "Entrenar modelo",
"训练特征索引": "Entrenar índice de características",
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento terminado, puede ver registros de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento",
"请指定说话人id": "Por favor, especifique el ID del hablante",
"请选择index文件": "Seleccione el archivo index, por favor",
"请选择pth文件": "Seleccione el archivo pth, por favor",
"请选择说话人id": "Seleccione el ID del hablante, por favor",
"转换": "Convertir",
"输入实验名": "Ingrese el nombre del experimento",
"输入待处理音频文件夹路径": "Ingrese la ruta de la carpeta de audio a procesar",
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta de la carpeta de audio a procesar (puede copiarla desde la barra de direcciones del administrador de archivos)",
"输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo de audio a procesar (el formato predeterminado es un ejemplo correcto)",
"输入源音量包络替换输出音量包络融合比例越靠近1越使用输出包络": "Ingrese la proporción de fusión para reemplazar el sobre de volumen de origen con el sobre de volumen de salida; cuanto más cercano a 1, más se utiliza el sobre de salida",
"输入监听": "Entrada de monitoreo",
"输入训练文件夹路径": "Ingrese la ruta de la carpeta de entrenamiento",
"输入设备": "Dispositivo de entrada",
"输入降噪": "Reducción de ruido de entrada",
"输入降噪": "Entrada de reducción de ruido",
"输出信息": "Información de salida",
"输出变声": "输出变声",
"输出变声": "Salida de cambio de voz",
"输出设备": "Dispositivo de salida",
"输出降噪": "Reducción de ruido de salida",
"输出音频(右下角三个点,点了可以下载)": "Salida de audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)",
"选择.index文件": "Seleccione el archivo .index",
"选择.pth文件": "Seleccione el archivo .pth",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono, las voces de entrada se pueden acelerar con pm, harvest tiene buenos graves pero es muy lento, crepe es bueno pero se come las GPUs",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono, use 'pm' para acelerar la entrada de canto, 'harvest' es bueno para los graves pero extremadamente lento, 'crepe' tiene buenos resultados pero consume GPU",
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: la canción de entrada se puede acelerar con pm, la voz de alta calidad pero CPU pobre se puede acelerar con dio, harvest es mejor pero más lento, rmvpe es el mejor y se come ligeramente la CPU/GPU",
"采样率:": "采样率:",
"输出降噪": "Salida de reducción de ruido",
"输出音频(右下角三个点,点了可以下载)": "Salida de audio (los tres puntos en la esquina inferior derecha, haga clic para descargar)",
"选择.index文件": "Seleccione el archivo .index, por favor",
"选择.pth文件": "Seleccione el archivo .pth, por favor",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU, rmvpe tiene el mejor efecto y consume poco GPU",
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: para voz, pm acelera con buena calidad de audio pero CPU deficiente, dio acelera pero harvest tiene mejor calidad aunque es más lento, rmvpe tiene el mejor efecto y consume poco CPU/GPU",
"采样率:": "Tasa de muestreo:",
"采样长度": "Longitud de muestreo",
"重载设备列表": "Actualizar lista de dispositivos",
"音调设置": "Ajuste de tono",
"音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)",
"重载设备列表": "Recargar lista de dispositivos",
"音调设置": "Configuración de tono",
"音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice controladores del mismo tipo)",
"音高算法": "Algoritmo de tono",
"额外推理时长": "Tiempo de inferencia adicional"
"额外推理时长": "Tiempo adicional de inferencia"
}

View File

@ -8,8 +8,16 @@
"是否开启UVR5-WebUI": "Activer UVR5-WebUI",
"UVR5进程输出信息": "Informations de processus UVR5",
"0b-语音切分工具": "0b-Outil de découpage vocal",
".list标注文件的路径": "Chemin du fichier d'annotation .list",
"GPT模型列表": "Liste des modèles GPT",
"SoVITS模型列表": "Liste des modèles SoVITS",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "Répertoire où sont enregistrés les fichiers audio après la découpe ! Chemin complet du fichier audio à lire = ce répertoire - nom du fichier correspondant à la forme d'onde dans le fichier liste (pas le chemin complet).",
"音频自动切分输入路径,可文件可文件夹": "Chemin d'entrée automatique de découpage audio, peut être un fichier ou un dossier",
"切分后的子音频的输出根目录": "Répertoire racine de sortie des sous-audios après découpage",
"怎么切": "Comment découper",
"不切": "Pas de découpe",
"凑四句一切": "Composez quatre phrases pour tout remplir",
"按英文句号.切": "Découpez par des points en anglais",
"threshold:音量小于这个值视作静音的备选切割点": "seuil: le volume inférieur à cette valeur est considéré comme un point de coupe silencieux alternatif",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: longueur minimale de chaque segment, si le premier segment est trop court, il est continué avec le segment suivant jusqu'à dépasser cette valeur",
"min_interval:最短切割间隔": "min_interval: intervalle de coupe minimum",

View File

@ -1,135 +1,276 @@
{
">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音": "Se >=3: applica il filtro mediano ai risultati del pitch raccolto. ",
"A模型权重": "Peso (w) per il modello A:",
"A模型路径": "Percorso per il modello A:",
"B模型路径": "Percorso per il modello B:",
"很遗憾您这没有能用的显卡来支持您训练": "Purtroppo non hai una scheda grafica utilizzabile per supportare il tuo addestramento",
"UVR5已开启": "UVR5 è attivato",
"UVR5已关闭": "UVR5 è disattivato",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. <br>Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale<b>LICENSE</b> per i dettagli.",
"0-前置数据集获取工具": "0-Strumento di acquisizione del dataset preliminare",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Strumento di separazione voce e accompagnamento UVR5 & Rimozione riverbero e ritardo",
"是否开启UVR5-WebUI": "Attivare UVR5-WebUI",
"UVR5进程输出信息": "Informazioni sull'output del processo UVR5",
"0b-语音切分工具": "0b-Strumento di segmentazione vocale",
"音频自动切分输入路径,可文件可文件夹": "Percorso di input per la segmentazione automatica dell'audio, può essere un file o una cartella",
"切分后的子音频的输出根目录": "Directory radice di output per gli audio segmentati",
"threshold:音量小于这个值视作静音的备选切割点": "threshold: Punto di taglio alternativo considerato silenzioso se il volume è inferiore a questo valore",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: Lunghezza minima di ogni segmento. Se il primo segmento è troppo corto, verrà unito agli segmenti successivi fino a superare questo valore",
"min_interval:最短切割间隔": "min_interval: Intervallo minimo di taglio",
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: Come calcolare la curva del volume. Più piccolo è, maggiore è la precisione ma aumenta la complessità computazionale (non significa che una maggiore precisione dà risultati migliori)",
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: Massima durata del silenzio dopo il taglio",
"开启语音切割": "Attivare la segmentazione vocale",
"终止语音切割": "Terminare la segmentazione vocale",
"max:归一化后最大值多少": "max: Massimo valore dopo la normalizzazione",
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Quanta proporzione dell'audio normalizzato deve essere miscelata",
"切割使用的进程数": "Numero di processi utilizzati per il taglio",
"语音切割进程输出信息": "Informazioni sull'output del processo di segmentazione vocale",
"0c-中文批量离线ASR工具": "0c-Strumento di ASR offline batch in cinese",
"开启离线批量ASR": "Attivare ASR offline batch",
"终止ASR进程": "Terminare il processo ASR",
"批量ASR(中文only)输入文件夹路径": "Percorso della cartella di input per ASR offline batch (solo cinese)",
"ASR进程输出信息": "Informazioni sull'output del processo ASR",
"0d-语音文本校对标注工具": "0d-Strumento di correzione e annotazione testo vocale",
"是否开启打标WebUI": "Attivare l'interfaccia utente Web di annotazione",
"打标数据标注文件路径": "Percorso del file di annotazione dei dati contrassegnati",
"打标工具进程输出信息": "Informazioni sull'output del processo di annotazione",
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
"*实验/模型名": "*Nome dell'esperimento/modello",
"显卡信息": "Informazioni sulla scheda grafica",
"预训练的SoVITS-G模型路径": "Percorso del modello preaddestrato SoVITS-G",
"预训练的SoVITS-D模型路径": "Percorso del modello preaddestrato SoVITS-D",
"预训练的GPT模型路径": "Percorso del modello preaddestrato GPT",
"1A-训练集格式化工具": "1A-Strumento di formattazione del set di addestramento",
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Nella cartella logs/nome dell'esperimento dovrebbero esserci file e cartelle che iniziano con 23456",
"*文本标注文件": "*File di annotazione del testo",
"*训练集音频文件目录": "*Directory dei file audio del set di addestramento",
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directory dei file audio del set di addestramento, concatenare il nome del file corrispondente nella lista",
"1Aa-文本内容": "1Aa-Contenuto del testo",
"GPU卡号以-分割,每个卡号一个进程": "Numero di GPU separati da '-'; ogni numero corrisponde a un processo",
"预训练的中文BERT模型路径": "Percorso del modello BERT cinese preaddestrato",
"开启文本获取": "Attivare l'estrazione del testo",
"终止文本获取进程": "Terminare il processo di estrazione del testo",
"文本进程输出信息": "Informazioni sull'output del processo di estrazione del testo",
"1Ab-SSL自监督特征提取": "1Ab-Estrazione di caratteristiche auto-supervisionata SSL",
"预训练的SSL模型路径": "Percorso del modello SSL preaddestrato",
"开启SSL提取": "Attivare l'estrazione SSL",
"终止SSL提取进程": "Terminare il processo di estrazione SSL",
"SSL进程输出信息": "Informazioni sull'output del processo SSL",
"1Ac-语义token提取": "1Ac-Estrazione del token semantico",
"开启语义token提取": "Attivare l'estrazione del token semantico",
"终止语义token提取进程": "Terminare il processo di estrazione del token semantico",
"语义token提取进程输出信息": "Informazioni sull'output del processo di estrazione del token semantico",
"1Aabc-训练集格式化一键三连": "1Aabc-Strumento di formattazione del set di addestramento con tre passaggi",
"开启一键三连": "Attivare la formattazione con tre passaggi",
"终止一键三连": "Terminare la formattazione con tre passaggi",
"一键三连进程输出信息": "Informazioni sull'output del processo di 'One Click Three Connect'",
"1B-微调训练": "1B-Allenamento di affinamento",
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Allenamento di SoVITS. I file del modello destinati alla condivisione sono salvati in SoVITS_weights.",
"每张显卡的batch_size": "Batch size per ogni scheda grafica",
"总训练轮数total_epoch不建议太高": "Numero totale di epoche di addestramento, non raccomandato troppo alto",
"文本模块学习率权重": "Peso del tasso di apprendimento del modulo di testo",
"保存频率save_every_epoch": "Frequenza di salvataggio ogni epoca",
"是否仅保存最新的ckpt文件以节省硬盘空间": "Salvare solo il file ckpt più recente per risparmiare spazio su disco",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Salvare il modello finale più piccolo nella cartella weights ad ogni punto di salvataggio",
"开启SoVITS训练": "Attivare l'allenamento di SoVITS",
"终止SoVITS训练": "Terminare l'allenamento di SoVITS",
"SoVITS训练进程输出信息": "Informazioni sull'output del processo di allenamento di SoVITS",
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Allenamento di GPT. I file del modello destinati alla condivisione sono salvati in GPT_weights.",
"总训练轮数total_epoch": "Numero totale di epoche di addestramento",
"开启GPT训练": "Attivare l'allenamento di GPT",
"终止GPT训练": "Terminare l'allenamento di GPT",
"GPT训练进程输出信息": "Informazioni sull'output del processo di allenamento di GPT",
"1C-推理": "1C-Inferenza",
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模体验5秒Zero Shot TTS用。": "Scegli il modello salvato in SoVITS_weights e GPT_weights dopo l'addestramento. Uno di default è il modello di base, utilizzato per l'esperienza di Zero Shot TTS in 5 secondi.",
"*GPT模型列表": "*Lista dei modelli GPT",
"*SoVITS模型列表": "*Lista dei modelli SoVITS",
"GPU卡号,只能填1个整数": "Numero della scheda grafica, può essere inserito solo un numero intero",
"刷新模型路径": "Aggiorna il percorso del modello",
"是否开启TTS推理WebUI": "Attivare l'interfaccia utente Web per l'inferenza TTS",
"TTS推理WebUI进程输出信息": "Informazioni sull'output del processo dell'interfaccia utente Web per l'inferenza TTS",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voce modificata",
"施工中,请静候佳音": "In costruzione, attendi pazientemente le buone notizie",
"TTS推理进程已开启": "Il processo di inferenza TTS è stato avviato",
"TTS推理进程已关闭": "Il processo di inferenza TTS è stato chiuso",
"打标工具WebUI已开启": "L'interfaccia utente Web dello strumento di annotazione è stata avviata",
"打标工具WebUI已关闭": "L'interfaccia utente Web dello strumento di annotazione è stata chiusa",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale LICENSE per i dettagli.",
"*请上传并填写参考信息": "*Carica e compila le informazioni di riferimento",
"*请填写需要合成的目标文本": "*Compila il testo di destinazione da sintetizzare",
"ASR任务开启%s": "Attività ASR avviata: %s",
"GPT训练完成": "Allenamento di GPT completato",
"GPT训练开始%s": "Inizio dell'allenamento di GPT: %s",
"SSL提取进程执行中": "Processo di estrazione SSL in corso",
"SSL提取进程结束": "Processo di estrazione SSL completato",
"SoVITS训练完成": "Allenamento di SoVITS completato",
"SoVITS训练开始%s": "Inizio dell'allenamento di SoVITS: %s",
"一键三连中途报错": "Errore durante 'One Click Three Connect'",
"一键三连进程结束": "Processo di 'One Click Three Connect' completato",
"中文": "Cinese",
"凑50字一切": "Riempire con 50 caratteri per tutto",
"凑五句一切": "Riempire con cinque frasi per tutto",
"切分后文本": "Testo dopo il taglio",
"切割执行中": "Taglio in corso",
"切割结束": "Taglio completato",
"参考音频的文本": "Testo dell'audio di riferimento",
"参考音频的语种": "Lingua dell'audio di riferimento",
"合成语音": "Sintesi vocale",
"后续将支持混合语种编码文本输入。": "In futuro sarà supportata l'input di testi con codifica mista di lingue.",
"已有正在进行的ASR任务需先终止才能开启下一次任务": "È già in corso un'attività ASR. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的GPT训练任务需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di GPT. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的SSL提取任务需先终止才能开启下一次任务": "È già in corso un'attività di estrazione SSL. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的SoVITS训练任务需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di SoVITS. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "È già in corso un'attività di 'One Click Three Connect'. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的切割任务,需先终止才能开启下一次任务": "È già in corso un'attività di taglio. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的文本任务,需先终止才能开启下一次任务": "È già in corso un'attività di testo. Devi interromperla prima di avviare una nuova attività.",
"已有正在进行的语义token提取任务需先终止才能开启下一次任务": "È già in corso un'attività di estrazione di token semantici. Devi interromperla prima di avviare una nuova attività.",
"已终止ASR进程": "Il processo ASR è stato terminato",
"已终止GPT训练": "L'allenamento di GPT è stato terminato",
"已终止SoVITS训练": "Allenamento SoVITS terminato",
"已终止所有1a进程": "Processi 1a terminati",
"已终止所有1b进程": "Processi 1b terminati",
"已终止所有一键三连进程": "Processi One Click Three Connect terminati",
"已终止所有切割进程": "Processi di taglio terminati",
"已终止所有语义token进程": "Processi di estrazione token semantici terminati",
"按中文句号。切": "Taglia secondo il punto cinese.",
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Strumento di divisione del testo. I testi troppo lunghi potrebbero non avere un buon effetto di sintesi, quindi è consigliabile dividerli prima della sintesi. La sintesi verrà separata in base ai ritorni a capo nel testo e successivamente ricomposta.",
"文本进程执行中": "Processo di testo in esecuzione",
"文本进程结束": "Processo di testo terminato",
"日文": "Giapponese",
"英文": "Inglese",
"语义token提取进程执行中": "Processo di estrazione token semantici in esecuzione",
"语义token提取进程结束": "Processo di estrazione token semantici terminato",
"请上传参考音频": "Carica l'audio di riferimento",
"输入路径不存在": "Il percorso di input non esiste",
"输入路径存在但既不是文件也不是文件夹": "Il percorso di input esiste ma non è né un file né una cartella",
"输出的语音": "Audio di output",
"进度1a-done": "Progresso: 1a-done",
"进度1a-done, 1b-ing": "Progresso: 1a-done, 1b-ing",
"进度1a-ing": "Progresso: 1a-ing",
"进度1a1b-done": "Progresso: 1a1b-done",
"进度1a1b-done, 1cing": "Progresso: 1a1b-done, 1cing",
"进度all-done": "Progresso: all-done",
"需要合成的切分前文本": "Testo da sintetizzare prima del taglio",
"需要合成的文本": "Testo da sintetizzare",
"需要合成的语种": "Lingua da sintetizzare",
">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音": "Se >=3, usa il filtraggio mediano sui risultati del riconoscimento dell'altezza di harvest, il valore è il raggio del filtro. L'uso di questo valore può attenuare i suoni muti.",
"A模型权重": "Peso del modello A",
"A模型路径": "Percorso del modello A",
"B模型路径": "Percorso del modello B",
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File curva F0 (opzionale). ",
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File della curva F0, opzionale, una riga per un'altezza, sostituisce il F0 predefinito e le variazioni di tono",
"Index Rate": "Tasso di indice",
"Onnx导出": "Esporta Onnx",
"Onnx输出路径": "Percorso di esportazione Onnx:",
"RVC模型路径": "Percorso modello RVC:",
"ckpt处理": "Elaborazione ckpt",
"harvest进程数": "harvest进程数",
"index文件路径不可包含中文": "index文件路径不可包含中文",
"pth文件路径不可包含中文": "pth è un'app per il futuro",
"rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程",
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passaggio 1: compilare la configurazione sperimentale. ",
"step1:正在处理数据": "Passaggio 1: elaborazione dei dati",
"step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征",
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passaggio 2a: attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio ed esegui la normalizzazione delle sezioni. ",
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passaggio 2b: utilizzare la CPU per estrarre il tono (se il modello ha il tono), utilizzare la GPU per estrarre le caratteristiche (selezionare l'indice GPU):",
"step3: 填写训练设置, 开始训练模型和索引": "Passaggio 3: compilare le impostazioni di addestramento e avviare l'addestramento del modello e dell'indice",
"step3a:正在训练模型": "Passaggio 3a: è iniziato l'addestramento del modello",
"一键训练": "Addestramento con un clic",
"也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹",
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声不带和声的音频选这个对主人声保留比HP5更好。内置HP2和HP3两个模型HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点 <br>2、仅保留主人声带和声的音频选这个对主人声可能有削弱。内置HP5一个模型 <br> 3、去混响、去延迟模型by FoxJoy<br>(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br>&emsp;(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底DeReverb额外去除混响可去除单声道混响但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍<br>2、MDX-Net-Dereverb模型挺慢的<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Elaborazione batch per la separazione dell'accompagnamento vocale utilizzando il modello UVR5.<br>Esempio di un formato di percorso di cartella valido: D:\\path\\to\\input\\folder (copialo dalla barra degli indirizzi del file manager).<br>Il modello è suddiviso in tre categorie:<br>1. Conserva la voce: scegli questa opzione per l'audio senza armonie. <br>2. Mantieni solo la voce principale: scegli questa opzione per l'audio con armonie. <br>3. Modelli di de-riverbero e de-delay (di FoxJoy):<br>(1) MDX-Net: la scelta migliore per la rimozione del riverbero stereo ma non può rimuovere il riverbero mono;<br><br>Note di de-riverbero/de-delay:<br>1. Il tempo di elaborazione per il modello DeEcho-DeReverb è circa il doppio rispetto agli altri due modelli DeEcho.<br>2. Il modello MDX-Net-Dereverb è piuttosto lento.<br>3. La configurazione più pulita consigliata consiste nell'applicare prima MDX-Net e poi DeEcho-Aggressive.",
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Inserisci gli indici GPU separati da '-', ad esempio 0-1-2 per utilizzare GPU 0, 1 e 2:",
"伴奏人声分离&去混响&去回声": "Separazione voce/accompagnamento",
"使用模型采样率": "使用模型采样率",
"使用设备采样率": "使用设备采样率",
"保存名": "Salva nome:",
"保存的文件名, 默认空为和源文件同名": "Salva il nome del file (predefinito: uguale al file di origine):",
"保存的模型名不带后缀": "Nome del modello salvato (senza estensione):",
"保存频率save_every_epoch": "Frequenza di salvataggio (save_every_epoch):",
"保护清辅音和呼吸声防止电音撕裂等artifact拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteggi le consonanti senza voce e i suoni del respiro per evitare artefatti come il tearing nella musica elettronica. ",
"修改": "Modificare",
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni sul modello (supportato solo per i file di modello di piccole dimensioni estratti dalla cartella 'weights')",
"停止音频转换": "Arresta la conversione audio",
"全流程结束!": "Tutti i processi sono stati completati!",
"刷新音色列表和索引路径": "Aggiorna l'elenco delle voci e il percorso dell'indice",
"加载模型": "Carica modello",
"加载预训练底模D路径": "Carica il percorso D del modello base pre-addestrato:",
"加载预训练底模G路径": "Carica il percorso G del modello base pre-addestrato:",
"单次推理": "单次推理",
"卸载音色省显存": "Scarica la voce per risparmiare memoria della GPU:",
"变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):",
"后处理重采样至最终采样率0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ",
"否": "NO",
"启用相位声码器": "启用相位声码器",
"Onnx导出": "Esporta in Onnx",
"Onnx输出路径": "Percorso di output Onnx",
"RVC模型路径": "Percorso del modello RVC",
"ckpt处理": "Elaborazione del ckpt",
"harvest进程数": "Numero di processi harvest",
"index文件路径不可包含中文": "Il percorso del file di indice non può contenere caratteri cinesi",
"pth文件路径不可包含中文": "Il percorso del file pth non può contenere caratteri cinesi",
"rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configurazione dei numeri delle schede rmvpe: separa con - i numeri delle schede dei diversi processi utilizzati in input. Ad esempio, 0-0-1 utilizza 2 processi sulla scheda 0 e 1 processo sulla scheda 1",
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passo 1: Compila la configurazione sperimentale. I dati sperimentali sono salvati in logs, ogni esperimento in una cartella. È necessario inserire manualmente il percorso del nome dell'esperimento, contenente configurazione sperimentale, log e file di modello addestrato.",
"step1:正在处理数据": "Passo 1: Elaborazione dei dati in corso",
"step2:正在提取音高&正在提取特征": "Passo 2: Estrazione dell'altezza e delle caratteristiche in corso",
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passo 2a: Attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio e li normalizza a fette. Nella cartella sperimentale vengono generate due cartelle wav; Al momento supporta solo l'addestramento singolo.",
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passo 2b: Usa la CPU per estrarre l'altezza (se il modello la include) e la GPU per estrarre le caratteristiche (scegliendo il numero della scheda)",
"step3: 填写训练设置, 开始训练模型和索引": "Passo 3: Compila le impostazioni di addestramento, inizia ad addestrare il modello e l'indice",
"step3a:正在训练模型": "Passo 3a: Addestramento del modello in corso",
"一键训练": "Allenamento One-Click",
"也可批量输入音频文件, 二选一, 优先读文件夹": "È possibile anche inserire file audio in batch, una delle due opzioni, con priorità alla lettura della cartella",
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numeri delle schede separati da - utilizzati in input, ad esempio 0-1-2, utilizzando le schede 0, 1 e 2",
"伴奏人声分离&去混响&去回声": "Separazione tra accompagnamento e voce & Rimozione dell'eco & Rimozione dell'eco",
"使用模型采样率": "Frequenza di campionamento del modello",
"使用设备采样率": "Frequenza di campionamento del dispositivo",
"保存名": "Nome del salvataggio",
"保存的文件名, 默认空为和源文件同名": "Nome del file salvato, vuoto di default è lo stesso del file sorgente",
"保存的模型名不带后缀": "Nome del modello salvato senza estensione",
"保护清辅音和呼吸声防止电音撕裂等artifact拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protegge le consonanti chiare e i suoni di respirazione, evita artifact come la rottura del suono elettronico, tirare a 0.5 per disattivare, abbassare per aumentare la protezione ma potrebbe ridurre l'effetto di indicizzazione",
"修改": "Modifica",
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)",
"停止音频转换": "Interrompi la conversione audio",
"全流程结束!": "Processo completo!",
"刷新音色列表和索引路径": "Aggiorna la lista dei toni e il percorso dell'indice",
"加载模型": "Carica il modello",
"加载预训练底模D路径": "Carica il percorso del modello di fondo preaddestrato D",
"加载预训练底模G路径": "Carica il percorso del modello di fondo preaddestrato G",
"单次推理": "Inferenza singola",
"卸载音色省显存": "Scarica il tono per risparmiare memoria video",
"变调(整数, 半音数量, 升八度12降八度-12)": "Modifica del tono (numero intero, quantità di semitoni, 12 per un'ottava in su, -12 per un'ottava in giù)",
"后处理重采样至最终采样率0为不进行重采样": "Ricampiona in modo post-elaborazione alla frequenza di campionamento finale, 0 per non eseguire il ricampionamento",
"否": "No",
"启用相位声码器": "Abilita il codificatore di fase",
"响应阈值": "Soglia di risposta",
"响度因子": "fattore di sonorità",
"处理数据": "Processa dati",
"导出Onnx模型": "Esporta modello Onnx",
"导出文件格式": "Formato file di esportazione",
"常见问题解答": "FAQ (Domande frequenti)",
"响度因子": "Fattore di risposta",
"处理数据": "Elaborazione dati",
"导出Onnx模型": "Esporta il modello Onnx",
"导出文件格式": "Formato di esportazione del file",
"常见问题解答": "Domande frequenti",
"常规设置": "Impostazioni generali",
"开始音频转换": "Avvia la conversione audio",
"很遗憾您这没有能用的显卡来支持您训练": "Sfortunatamente, non è disponibile alcuna GPU compatibile per supportare l'addestramento.",
"性能设置": "Impostazioni delle prestazioni",
"总训练轮数total_epoch": "Epoch totali di addestramento (total_epoch):",
"批量推理": "批量推理",
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione massiva. Inserisci il percorso della cartella che contiene i file da convertire o carica più file audio. I file convertiti finiranno nella cartella specificata. (default: opt) ",
"指定输出主人声文件夹": "Specifica la cartella di output per le voci:",
"指定输出文件夹": "Specifica la cartella di output:",
"指定输出非主人声文件夹": "Specificare la cartella di output per l'accompagnamento:",
"开始音频转换": "Inizia la conversione audio",
"性能设置": "Impostazioni di performance",
"批量推理": "Inferenza batch",
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione in batch, inserisci la cartella con i file audio da convertire o carica più file audio, i file convertiti verranno salvati nella cartella specificata (per impostazione predefinita opt).",
"指定输出主人声文件夹": "Specifica la cartella di output per la voce principale",
"指定输出文件夹": "Specifica la cartella di output",
"指定输出非主人声文件夹": "Specifica la cartella di output per la non voce principale",
"推理时间(ms):": "Tempo di inferenza (ms):",
"推理音色": "Voce di inferenza:",
"推理音色": "Tono di inferenza",
"提取": "Estrai",
"提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione del tono e l'elaborazione dei dati:",
"是": "SÌ",
"是否仅保存最新的ckpt文件以节省硬盘空间": "Salva solo l'ultimo file '.ckpt' per risparmiare spazio su disco:",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Salva un piccolo modello finale nella cartella \"weights\" in ogni punto di salvataggio:",
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Memorizza nella cache tutti i set di addestramento nella memoria della GPU. ",
"显卡信息": "Informazioni GPU",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "Questo software è open source con licenza MIT. <br>Se non si accetta questa clausola, non è possibile utilizzare o fare riferimento a codici e file all'interno del pacchetto software. <b>Contratto-LICENZA.txt</b> per dettagli.",
"查看": "Visualizzazione",
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni sul modello (supportato solo per file di modello piccoli estratti dalla cartella 'weights')",
"检索特征占比": "Rapporto funzionalità di ricerca (controlla la forza dell'accento, troppo alto ha artefatti):",
"提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione dell'altezza del suono e l'elaborazione dei dati",
"是": "Sì",
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Se memorizzare nella cache tutto l'insieme di addestramento nella memoria video. Piccoli set di dati inferiori a 10 minuti possono essere memorizzati nella cache per accelerare l'addestramento, la memorizzazione nella cache di grandi set di dati può esaurire la memoria video e non accelerare di molto",
"查看": "Visualizza",
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)",
"检索特征占比": "Percentuale di caratteristiche di ricerca",
"模型": "Modello",
"模型推理": "Inferenza del modello",
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserire il percorso del modello di file di grandi dimensioni nella cartella \"logs\"). ",
"模型是否带音高指导": "Se il modello ha una guida del tono:",
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Se il modello ha una guida del tono (necessario per il canto, facoltativo per il parlato):",
"模型是否带音高指导,1是0否": "Se il modello ha una guida del tono (1: sì, 0: no):",
"模型版本型号": "Versione dell'architettura del modello:",
"模型融合, 可用于测试音色融合": "Model fusion, può essere utilizzato per testare la fusione timbrica",
"模型路径": "Percorso al modello:",
"每张显卡的batch_size": "Dimensione batch per GPU:",
"淡入淡出长度": "Lunghezza dissolvenza",
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserisci il percorso del modello di grandi dimensioni nella cartella logs), adatto per i modelli a metà addestramento che non si desidera continuare ad addestrare, i modelli non estratti automaticamente vengono salvati come modelli di piccole dimensioni o per testare la situazione del modello intermedio",
"模型是否带音高指导": "Il modello include o meno la guida all'altezza del suono",
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "Il modello include o meno la guida all'altezza del suono (necessario per il canto, opzionale per la voce)",
"模型是否带音高指导,1是0否": "Il modello include o meno la guida all'altezza del suono, 1 sì, 0 no",
"模型版本型号": "Versione e modello del modello",
"模型融合, 可用于测试音色融合": "Fusione dei modelli, utile per testare la fusione dei toni",
"模型路径": "Percorso del modello",
"淡入淡出长度": "Lunghezza del fading in/fading out",
"版本": "Versione",
"特征提取": "Estrazione delle caratteristiche",
"特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file di indice delle caratteristiche. ",
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tonalità +12 consigliata per la conversione da maschio a femmina e tonalità -12 per la conversione da femmina a maschio. ",
"目标采样率": "Frequenza di campionamento target:",
"算法延迟(ms):": "算法延迟(ms):",
"自动检测index路径,下拉式选择(dropdown)": "Rileva automaticamente il percorso dell'indice e seleziona dal menu a tendina:",
"特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file della libreria di ricerca delle caratteristiche, se vuoto usa la selezione a discesa",
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Consigliato +12 toni per la trasformazione da uomo a donna, -12 toni per la trasformazione da donna a uomo. Se l'intervallo tonale esplode causando distorsioni nel timbro, è possibile regolarlo manualmente nell'intervallo adatto.",
"目标采样率": "Frequenza di campionamento obiettivo",
"算法延迟(ms):": "Ritardo dell'algoritmo (ms):",
"自动检测index路径,下拉式选择(dropdown)": "Rilevamento automatico del percorso dell'indice, selezione a discesa (dropdown)",
"融合": "Fusione",
"要改的模型信息": "Informazioni sul modello da modificare:",
"要置入的模型信息": "Informazioni sul modello da posizionare:",
"要改的模型信息": "Informazioni del modello da modificare",
"要置入的模型信息": "Informazioni del modello da inserire",
"训练": "Addestramento",
"训练模型": "Addestra modello",
"训练特征索引": "Addestra indice delle caratteristiche",
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Addestramento completato. ",
"请指定说话人id": "Si prega di specificare l'ID del locutore/cantante:",
"请选择index文件": "请选择index文件",
"请选择pth文件": "请选择pth 文件",
"请选择说话人id": "Seleziona ID locutore/cantante:",
"转换": "Convertire",
"输入实验名": "Inserisci il nome dell'esperimento:",
"输入待处理音频文件夹路径": "Immettere il percorso della cartella audio da elaborare:",
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Immettere il percorso della cartella audio da elaborare (copiarlo dalla barra degli indirizzi del file manager):",
"输入待处理音频文件路径(默认是正确格式示例)": "Immettere il percorso del file audio da elaborare (l'impostazione predefinita è l'esempio di formato corretto):",
"输入源音量包络替换输出音量包络融合比例越靠近1越使用输出包络": "Regola il ridimensionamento dell'inviluppo del volume. ",
"输入监听": "输入监听",
"输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento:",
"训练模型": "Addestra il modello",
"训练特征索引": "Addestramento dell'indice delle caratteristiche",
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Fine dell'addestramento, puoi visualizzare il registro di addestramento sulla console o il file train.log nella cartella dell'esperimento",
"请指定说话人id": "Si prega di specificare l'ID del parlante",
"请选择index文件": "Seleziona il file di indice",
"请选择pth文件": "Seleziona il file pth",
"请选择说话人id": "Seleziona l'ID del parlante",
"转换": "Converti",
"输入实验名": "Inserisci il nome dell'esperimento",
"输入待处理音频文件夹路径": "Inserisci il percorso della cartella dei file audio da elaborare",
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Inserisci il percorso della cartella dei file audio da elaborare (copialo dalla barra degli indirizzi del gestore dei file)",
"输入待处理音频文件路径(默认是正确格式示例)": "Inserisci il percorso del file audio da elaborare (esempio di formato corretto predefinito)",
"输入源音量包络替换输出音量包络融合比例越靠近1越使用输出包络": "Inserisci la proporzione di fusione della sostituzione dell'involucro del volume di ingresso con l'involucro del volume di uscita, più vicino a 1 più utilizza l'involucro di uscita",
"输入监听": "Inserisci l'ascolto",
"输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento",
"输入设备": "Dispositivo di input",
"输入降噪": "Riduzione del rumore in ingresso",
"输出信息": "Informazioni sull'uscita",
"输出变声": "输出变声",
"输出设备": "Dispositivo di uscita",
"输出降噪": "Riduzione del rumore in uscita",
"输出音频(右下角三个点,点了可以下载)": "Esporta audio (clicca sui tre puntini in basso a destra per scaricarlo)",
"输入降噪": "Inserisci la riduzione del rumore",
"输出信息": "Informazioni di output",
"输出变声": "Variazione della voce in output",
"输出设备": "Dispositivo di output",
"输出降噪": "Riduzione del rumore in output",
"输出音频(右下角三个点,点了可以下载)": "Audio in output (tre punti nell'angolo in basso a destra, fare clic per scaricare)",
"选择.index文件": "Seleziona il file .index",
"选择.pth文件": "Seleziona il file .pth",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione del tono (\"pm\": estrazione più veloce ma risultato di qualità inferiore; \"harvest\": bassi migliori ma estremamente lenti; \"crepe\": qualità migliore ma utilizzo intensivo della GPU):",
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
"采样率:": "采样率:",
"采样长度": "Lunghezza del campione",
"重载设备列表": "Ricaricare l'elenco dei dispositivi",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU, rmvpe ha il miglior effetto ed è leggermente esigente sulla GPU",
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono: l'input vocale può utilizzare pm per velocizzare, la qualità del suono è elevata ma richiede molte risorse della CPU; l'input vocale può utilizzare dio per velocizzare, harvest ha una qualità del suono migliore ma è lento, rmvpe ha il miglior effetto ed è leggermente esigente sulla CPU/GPU",
"采样率:": "Frequenza di campionamento:",
"采样长度": "Lunghezza del campionamento",
"重载设备列表": "Ricarica la lista dei dispositivi",
"音调设置": "Impostazioni del tono",
"音频设备(请使用同种类驱动)": "Dispositivo audio (utilizzare lo stesso tipo di driver)",
"音高算法": "音高算法",
"音频设备(请使用同种类驱动)": "Dispositivo audio (usa driver della stessa categoria)",
"音高算法": "Algoritmo dell'altezza del suono",
"额外推理时长": "Tempo di inferenza extra"
}
}

View File

@ -8,8 +8,16 @@
"是否开启UVR5-WebUI": "UVR5-WebUIをオンにしますか",
"UVR5进程输出信息": "UVR5プロセスの出力情報",
"0b-语音切分工具": "0b-音声分割ツール",
".list标注文件的路径": ".listアテーションファイルのパス",
"GPT模型列表": "GPTモデルリスト",
"SoVITS模型列表": "SoVITSモデルリスト",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "音声を切り取った後の音声が保存されているディレクトリ!読み取られる音声ファイルの完全なパス=このディレクトリ-連結-リストファイル内の波形に対応するファイル名(フルパスではない)。",
"音频自动切分输入路径,可文件可文件夹": "オーディオの自動分割入力パス、ファイルまたはフォルダを指定できます",
"切分后的子音频的输出根目录": "分割後のサブオーディオの出力ルートディレクトリ",
"怎么切": "どうやって切るか",
"不切": "切らない",
"凑四句一切": "4つの文で埋める",
"按英文句号.切": "英文のピリオドで切ってください",
"threshold:音量小于这个值视作静音的备选切割点": "閾値:この値未満の音量は静音と見なされ、代替のカットポイントとして扱われます",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length各セグメントの最小長さ。最初のセグメントが短すぎる場合、連続して後続のセグメントに接続され、この値を超えるまで続きます。",
"min_interval:最短切割间隔": "min_interval最短カット間隔",

View File

@ -1,15 +1,23 @@
{
"很遗憾您这没有能用的显卡来支持您训练": "아쉽게도 훈련을 지원할 수 있는 사용 가능한 그래픽 카드가 없습니다",
"很遗憾您这没有能用的显卡来支持您训练": "죄송합니다. 훈련을 지원할 수 있는 그래픽 카드가 없습니다.",
"UVR5已开启": "UVR5가 활성화되었습니다",
"UVR5已关闭": "UVR5가 비활성화되었습니다",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다. <br>이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 <b>LICENSE</b>를 참조하십시오.",
"0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구",
"是否开启UVR5-WebUI": "UVR5-WebUI를 열까요?",
"是否开启UVR5-WebUI": "UVR5-WebUI를 여시겠습니까?",
"UVR5进程输出信息": "UVR5 프로세스 출력 정보",
"0b-语音切分工具": "0b-음성 분리 도구",
".list标注文件的路径": ".list 주석 파일 경로",
"GPT模型路径": "GPT 모델 경로",
"SoVITS模型列表": "SoVITS 모델 목록",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "분리된 오디오가 있는 디렉터리를 입력하십시오! 읽은 오디오 파일의 전체 경로 = 해당 디렉터리-연결-목록 파일에 해당하는 원본 이름 (전체 경로가 아님).",
"音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능",
"切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리",
"怎么切": "자르기 옵션",
"不切": "자르지 않음",
"凑四句一切": "네 문장의 세트를 완성하세요.",
"按英文句号.切": "영어 문장으로 분리하기",
"threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "최소 길이: 각 세그먼트의 최소 길이. 첫 번째 세그먼트가 너무 짧으면 계속해서 뒷부분과 연결하여 이 값 이상이 될 때까지",
"min_interval:最短切割间隔": "최소 분리 간격",
@ -86,11 +94,13 @@
"TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환",
"施工中,请静候佳音": "공사 중입니다. 기다려주십시오.",
"参考音频在3~10秒范围外请更换": "참고 오디오가 3~10초 범위를 벗어났습니다. 다른 것으로 바꾸십시오!",
"请上传3~10秒内参考音频超过会报错": "3~10초 이내의 참고 오디오를 업로드하십시오. 초과하면 오류가 발생합니다!",
"TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다",
"TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다",
"打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다",
"打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다",
"*请上传并填写参考信息": "*참고 정보를 업로드하고 입력하십시오",
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*합성할 대상 텍스트를 입력하십시오. 중국어와 영어를 혼합하면 중국어를 선택하고 일본어와 영어를 혼합하면 일본어를 선택하십시오. 중국어와 일본어를 혼합하는 것은 아직 지원되지 않으며 대상 언어가 아닌 텍스트는 자동으로 버려집니다.",
"*请填写需要合成的目标文本": "*합성할 대상 텍스트를 입력하십시오",
"ASR任务开启%s": "ASR 작업 시작: %s",
"GPT训练完成": "GPT 훈련 완료",
@ -272,4 +282,4 @@
"音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버 사용 권장)",
"音高算法": "음높이 알고리즘",
"额外推理时长": "추가 추론 시간"
}
}

View File

@ -8,8 +8,16 @@
"是否开启UVR5-WebUI": "是否开启UVR5-WebUI",
"UVR5进程输出信息": "UVR5进程输出信息",
"0b-语音切分工具": "0b-语音切分工具",
".list标注文件的路径": ".list标注文件的路径",
"GPT模型列表": "GPT模型列表",
"SoVITS模型列表": "SoVITS模型列表",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。",
"音频自动切分输入路径,可文件可文件夹": "音频自动切分输入路径,可文件可文件夹",
"切分后的子音频的输出根目录": "切分后的子音频的输出根目录",
"怎么切": "怎么切",
"不切": "不切",
"凑四句一切": "凑四句一切",
"按英文句号.切": "按英文句号.切",
"threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小于这个值视作静音的备选切割点",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值",
"min_interval:最短切割间隔": "min_interval:最短切割间隔",
@ -86,13 +94,15 @@
"TTS推理WebUI进程输出信息": "TTS推理WebUI进程输出信息",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-变声",
"施工中,请静候佳音": "施工中,请静候佳音",
"参考音频在3~10秒范围外请更换": "参考音频在3~10秒范围外请更换",
"请上传3~10秒内参考音频超过会报错": "请上传3~10秒内参考音频超过会报错",
"TTS推理进程已开启": "TTS推理进程已开启",
"TTS推理进程已关闭": "TTS推理进程已关闭",
"打标工具WebUI已开启": "打标工具WebUI已开启",
"打标工具WebUI已关闭": "打标工具WebUI已关闭",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.",
"*请上传并填写参考信息": "*请上传并填写参考信息",
"*请填写需要合成的目标文本": "*请填写需要合成的目标文本",
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。",
"ASR任务开启%s": "ASR任务开启%s",
"GPT训练完成": "GPT训练完成",
"GPT训练开始%s": "GPT训练开始%s",

View File

@ -5,10 +5,11 @@ librosa==0.9.2
numba==0.56.4
pytorch-lightning
gradio==3.38.0
gradio_client==0.8.1
ffmpeg-python
onnxruntime
tqdm
funasr==0.8.7
funasr==1.0.0
cn2an
pypinyin
pyopenjtalk
@ -22,3 +23,6 @@ PyYAML
psutil
jieba_fast
jieba
LangSegment>=0.2.0
Faster_Whisper
wordsegment

0
tools/__init__.py Normal file
View File

31
tools/asr/config.py Normal file
View File

@ -0,0 +1,31 @@
import os
def check_fw_local_models():
'''
启动时检查本地是否有 Faster Whisper 模型.
'''
model_size_list = [
"tiny", "tiny.en",
"base", "base.en",
"small", "small.en",
"medium", "medium.en",
"large", "large-v1",
"large-v2", "large-v3"]
for i, size in enumerate(model_size_list):
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
model_size_list[i] = size + '-local'
return model_size_list
asr_dict = {
"达摩 ASR (中文)": {
'lang': ['zh'],
'size': ['large'],
'path': 'funasr_asr.py',
},
"Faster Whisper (多语种)": {
'lang': ['auto', 'zh', 'en', 'ja'],
'size': check_fw_local_models(),
'path': 'fasterwhisper_asr.py'
}
}

View File

@ -0,0 +1,110 @@
import argparse
import os
os.environ["HF_ENDPOINT"]="https://hf-mirror.com"
import traceback
import requests
from glob import glob
import torch
from faster_whisper import WhisperModel
from tqdm import tqdm
from tools.asr.config import check_fw_local_models
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
language_code_list = [
"af", "am", "ar", "as", "az",
"ba", "be", "bg", "bn", "bo",
"br", "bs", "ca", "cs", "cy",
"da", "de", "el", "en", "es",
"et", "eu", "fa", "fi", "fo",
"fr", "gl", "gu", "ha", "haw",
"he", "hi", "hr", "ht", "hu",
"hy", "id", "is", "it", "ja",
"jw", "ka", "kk", "km", "kn",
"ko", "la", "lb", "ln", "lo",
"lt", "lv", "mg", "mi", "mk",
"ml", "mn", "mr", "ms", "mt",
"my", "ne", "nl", "nn", "no",
"oc", "pa", "pl", "ps", "pt",
"ro", "ru", "sa", "sd", "si",
"sk", "sl", "sn", "so", "sq",
"sr", "su", "sv", "sw", "ta",
"te", "tg", "th", "tk", "tl",
"tr", "tt", "uk", "ur", "uz",
"vi", "yi", "yo", "zh", "yue",
"auto"]
def execute_asr(input_folder, output_folder, model_size, language,precision):
if '-local' in model_size:
model_size = model_size[:-6]
model_path = f'tools/asr/models/faster-whisper-{model_size}'
else:
model_path = model_size
if language == 'auto':
language = None #不设置语种由模型自动输出概率最高的语种
print("loading faster whisper model:",model_size,model_path)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
try:
model = WhisperModel(model_path, device=device, compute_type=precision)
except:
return print(traceback.format_exc())
output = []
output_file_name = os.path.basename(input_folder)
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
try:
segments, info = model.transcribe(
audio = file,
beam_size = 5,
vad_filter = True,
vad_parameters = dict(min_silence_duration_ms=700),
language = language)
text = ''
if info.language == "zh":
print("检测为中文文本,转funasr处理")
if("only_asr"not in globals()):
from tools.asr.funasr_asr import only_asr##如果用英文就不需要导入下载模型
text = only_asr(file)
if text == '':
for segment in segments:
text += segment.text
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
except:
return print(traceback.format_exc())
with open(output_file_path, "w", encoding="utf-8") as f:
f.write("\n".join(output))
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
return output_file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_folder", type=str, required=True,
help="Path to the folder containing WAV files.")
parser.add_argument("-o", "--output_folder", type=str, required=True,
help="Output folder to store transcriptions.")
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
choices=check_fw_local_models(),
help="Model Size of Faster Whisper")
parser.add_argument("-l", "--language", type=str, default='ja',
choices=language_code_list,
help="Language of the audio files.")
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
help="fp16 or fp32")
cmd = parser.parse_args()
output_file_path = execute_asr(
input_folder = cmd.input_folder,
output_folder = cmd.output_folder,
model_size = cmd.model_size,
language = cmd.language,
precision = cmd.precision,
)

76
tools/asr/funasr_asr.py Normal file
View File

@ -0,0 +1,76 @@
# -*- coding:utf-8 -*-
import argparse
import os
import traceback
from tqdm import tqdm
from funasr import AutoModel
path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
model = AutoModel(
model = path_asr,
model_revision = "v2.0.4",
vad_model = path_vad,
vad_model_revision = "v2.0.4",
punc_model = path_punc,
punc_model_revision = "v2.0.4",
)
def only_asr(input_file):
try:
text = model.generate(input=input_file)[0]["text"]
except:
text = ''
print(traceback.format_exc())
return text
def execute_asr(input_folder, output_folder, model_size, language):
input_file_names = os.listdir(input_folder)
input_file_names.sort()
output = []
output_file_name = os.path.basename(input_folder)
for name in tqdm(input_file_names):
try:
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
except:
print(traceback.format_exc())
output_folder = output_folder or "output/asr_opt"
os.makedirs(output_folder, exist_ok=True)
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
with open(output_file_path, "w", encoding="utf-8") as f:
f.write("\n".join(output))
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
return output_file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_folder", type=str, required=True,
help="Path to the folder containing WAV files.")
parser.add_argument("-o", "--output_folder", type=str, required=True,
help="Output folder to store transcriptions.")
parser.add_argument("-s", "--model_size", type=str, default='large',
help="Model Size of FunASR is Large")
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
help="Language of the audio files.")
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
help="fp16 or fp32")#还没接入
cmd = parser.parse_args()
execute_asr(
input_folder = cmd.input_folder,
output_folder = cmd.output_folder,
model_size = cmd.model_size,
language = cmd.language,
)

29
tools/cmd-denoise.py Normal file
View File

@ -0,0 +1,29 @@
import os,argparse
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from tqdm import tqdm
path_denoise = 'tools/denoise-model/speech_frcrn_ans_cirm_16k'
path_denoise = path_denoise if os.path.exists(path_denoise) else "damo/speech_frcrn_ans_cirm_16k"
ans = pipeline(Tasks.acoustic_noise_suppression,model=path_denoise)
def execute_denoise(input_folder,output_folder):
os.makedirs(output_folder,exist_ok=True)
# print(input_folder)
# print(list(os.listdir(input_folder).sort()))
for name in tqdm(os.listdir(input_folder)):
ans("%s/%s"%(input_folder,name),output_path='%s/%s'%(output_folder,name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_folder", type=str, required=True,
help="Path to the folder containing WAV files.")
parser.add_argument("-o", "--output_folder", type=str, required=True,
help="Output folder to store transcriptions.")
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
help="fp16 or fp32")#还没接入
cmd = parser.parse_args()
execute_denoise(
input_folder = cmd.input_folder,
output_folder = cmd.output_folder,
)

View File

@ -1,34 +0,0 @@
# -*- coding:utf-8 -*-
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import sys,os,traceback
dir=sys.argv[1]
# opt_name=dir.split("\\")[-1].split("/")[-1]
opt_name=os.path.basename(dir)
path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
path_asr=path_asr if os.path.exists(path_asr)else "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
path_vad=path_vad if os.path.exists(path_vad)else "damo/speech_fsmn_vad_zh-cn-16k-common-pytorch"
path_punc=path_punc if os.path.exists(path_punc)else "damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=path_asr,
vad_model=path_vad,
punc_model=path_punc,
)
opt=[]
for name in os.listdir(dir):
try:
text = inference_pipeline(audio_in="%s/%s"%(dir,name))["text"]
opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text))
except:
print(traceback.format_exc())
opt_dir="output/asr_opt"
os.makedirs(opt_dir,exist_ok=True)
with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt))

2
tools/denoise-model/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -0,0 +1,285 @@
{
"很遗憾您这没有能用的显卡来支持您训练": "죄송합니다. 훈련을 지원할 수 있는 그래픽 카드가 없습니다.",
"UVR5已开启": "UVR5가 활성화되었습니다",
"UVR5已关闭": "UVR5가 비활성화되었습니다",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다. <br>이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 <b>LICENSE</b>를 참조하십시오.",
"0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구",
"是否开启UVR5-WebUI": "UVR5-WebUI를 여시겠습니까?",
"UVR5进程输出信息": "UVR5 프로세스 출력 정보",
"0b-语音切分工具": "0b-음성 분리 도구",
".list标注文件的路径": ".list 주석 파일 경로",
"GPT模型路径": "GPT 모델 경로",
"SoVITS模型列表": "SoVITS 모델 목록",
"填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。": "분리된 오디오가 있는 디렉터리를 입력하십시오! 읽은 오디오 파일의 전체 경로 = 해당 디렉터리-연결-목록 파일에 해당하는 원본 이름 (전체 경로가 아님).",
"音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능",
"切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리",
"怎么切": "자르기 옵션",
"不切": "자르지 않음",
"凑四句一切": "네 문장의 세트를 완성하세요.",
"按英文句号.切": "영어 문장으로 분리하기",
"threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "최소 길이: 각 세그먼트의 최소 길이. 첫 번째 세그먼트가 너무 짧으면 계속해서 뒷부분과 연결하여 이 값 이상이 될 때까지",
"min_interval:最短切割间隔": "최소 분리 간격",
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop 크기: 볼륨 곡선을 계산하는 방법. 작을수록 정확도가 높아지지만 계산량이 높아집니다 (정확도가 높다고 효과가 좋아지지 않음)",
"max_sil_kept:切完后静音最多留多长": "최대 유지되는 정적 길이 (분리 후)",
"开启语音切割": "음성 분리 활성화",
"终止语音切割": "음성 분리 종료",
"max:归一化后最大值多少": "최대 값 (정규화 후)",
"alpha_mix:混多少比例归一化后音频进来": "알파 믹스: 정규화된 오디오가 들어오는 비율",
"切割使用的进程数": "사용되는 프로세스 수로 자르기",
"语音切割进程输出信息": "음성 분리 프로세스 출력 정보",
"0c-中文批量离线ASR工具": "0c-중국어 대량 오프라인 ASR 도구",
"开启离线批量ASR": "오프라인 대량 ASR 활성화",
"终止ASR进程": "ASR 프로세스 종료",
"批量ASR(中文only)输入文件夹路径": "대량 ASR (중국어 전용) 입력 폴더 경로",
"ASR进程输出信息": "ASR 프로세스 출력 정보",
"0d-语音文本校对标注工具": "0d-음성 텍스트 교정 주석 도구",
"是否开启打标WebUI": "웹 기반 주석 활성화 여부",
"打标数据标注文件路径": "주석 데이터 주석 파일 경로",
"打标工具进程输出信息": "주석 도구 프로세스 출력 정보",
"1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS",
"*实验/模型名": "*실험/모델 이름",
"显卡信息": "그래픽 카드 정보",
"预训练的SoVITS-G模型路径": "사전 훈련된 SoVITS-G 모델 경로",
"预训练的SoVITS-D模型路径": "사전 훈련된 SoVITS-D 모델 경로",
"预训练的GPT模型路径": "사전 훈련된 GPT 모델 경로",
"1A-训练集格式化工具": "1A-훈련 세트 형식 지정 도구",
"输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/실험 이름 디렉터리에는 23456으로 시작하는 파일과 폴더가 있어야 함",
"*文本标注文件": "*텍스트 주석 파일",
"*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터리",
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "훈련 세트 오디오 파일 디렉터리 - 목록 파일에 해당하는 원형 이름 연결",
"1Aa-文本内容": "1Aa-텍스트 내용",
"GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함",
"预训练的中文BERT模型路径": "사전 훈련된 중국어 BERT 모델 경로",
"开启文本获取": "텍스트 추출 활성화",
"终止文本获取进程": "텍스트 추출 프로세스 종료",
"文本进程输出信息": "텍스트 프로세스 출력 정보",
"1Ab-SSL自监督特征提取": "1Ab-SSL 자기 지도 특징 추출",
"预训练的SSL模型路径": "사전 훈련된 SSL 모델 경로",
"开启SSL提取": "SSL 추출 활성화",
"终止SSL提取进程": "SSL 추출 프로세스 종료",
"SSL进程输出信息": "SSL 프로세스 출력 정보",
"1Ac-语义token提取": "1Ac-의미 토큰 추출",
"开启语义token提取": "의미 토큰 추출 활성화",
"终止语义token提取进程": "의미 토큰 추출 프로세스 종료",
"语义token提取进程输出信息": "의미 토큰 추출 프로세스 출력 정보",
"1Aabc-训练集格式化一键三连": "1Aabc-훈련 세트 형식 지정 일괄 처리",
"开启一键三连": "일괄 처리 활성화",
"终止一键三连": "일괄 처리 종료",
"一键三连进程输出信息": "일괄 처리 프로세스 출력 정보",
"1B-微调训练": "1B-미세 조정 훈련",
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS 훈련. 공유 용 모델 파일은 SoVITS_weights 하위에 출력됩니다.",
"每张显卡的batch_size": "각 그래픽 카드의 배치 크기",
"总训练轮数total_epoch不建议太高": "총 훈련 라운드 수 (total_epoch), 너무 높지 않게 권장됨",
"文本模块学习率权重": "텍스트 모듈 학습률 가중치",
"保存频率save_every_epoch": "저장 빈도 (각 라운드마다)",
"是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 ckpt 파일만 저장할지 여부",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부",
"开启SoVITS训练": "SoVITS 훈련 활성화",
"终止SoVITS训练": "SoVITS 훈련 종료",
"SoVITS训练进程输出信息": "SoVITS 훈련 프로세스 출력 정보",
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT 훈련. 공유 용 모델 파일은 GPT_weights 하위에 출력됩니다.",
"总训练轮数total_epoch": "총 훈련 라운드 수 (total_epoch)",
"开启GPT训练": "GPT 훈련 활성화",
"终止GPT训练": "GPT 훈련 종료",
"GPT训练进程输出信息": "GPT 훈련 프로세스 출력 정보",
"1C-推理": "1C-추론",
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模体验5秒Zero Shot TTS用。": "SoVITS_weights 및 GPT_weights에 저장된 훈련 완료된 모델 중 선택. 기본적으로 하나는 기본 모델이며 5초 Zero Shot TTS를 체험할 수 있습니다.",
"*GPT模型列表": "*GPT 모델 목록",
"*SoVITS模型列表": "*SoVITS 모델 목록",
"GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능",
"刷新模型路径": "모델 경로 새로 고침",
"是否开启TTS推理WebUI": "TTS 추론 WebUI 활성화 여부",
"TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보",
"2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환",
"施工中,请静候佳音": "공사 중입니다. 기다려주십시오.",
"参考音频在3~10秒范围外请更换": "참고 오디오가 3~10초 범위를 벗어났습니다. 다른 것으로 바꾸십시오!",
"请上传3~10秒内参考音频超过会报错": "3~10초 이내의 참고 오디오를 업로드하십시오. 초과하면 오류가 발생합니다!",
"TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다",
"TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다",
"打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다",
"打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다",
"*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*합성할 대상 텍스트를 입력하십시오. 중국어와 영어를 혼합하면 중국어를 선택하고 일본어와 영어를 혼합하면 일본어를 선택하십시오. 중국어와 일본어를 혼합하는 것은 아직 지원되지 않으며 대상 언어가 아닌 텍스트는 자동으로 버려집니다.",
"*请填写需要合成的目标文本": "*합성할 대상 텍스트를 입력하십시오",
"ASR任务开启%s": "ASR 작업 시작: %s",
"GPT训练完成": "GPT 훈련 완료",
"GPT训练开始%s": "GPT 훈련 시작: %s",
"SSL提取进程执行中": "SSL 추출 프로세스 실행 중",
"SSL提取进程结束": "SSL 추출 프로세스 종료",
"SoVITS训练完成": "SoVITS 훈련 완료",
"SoVITS训练开始%s": "SoVITS 훈련 시작: %s",
"一键三连中途报错": "일괄 처리 중 오류 발생",
"一键三连进程结束": "일괄 처리 프로세스 종료",
"中文": "중국어",
"凑50字一切": "50자를 채우십시오",
"凑五句一切": "다섯 문장을 채우십시오",
"切分后文本": "분리된 텍스트",
"切割执行中": "분리 진행 중",
"切割结束": "분리 종료",
"参考音频的文本": "참고 오디오의 텍스트",
"参考音频的语种": "참고 오디오의 언어",
"合成语音": "합성 음성",
"后续将支持混合语种编码文本输入。": "향후 혼합 언어 코딩 텍스트 입력을 지원할 예정입니다.",
"已有正在进行的ASR任务需先终止才能开启下一次任务": "이미 진행 중인 ASR 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的GPT训练任务需先终止才能开启下一次任务": "이미 진행 중인 GPT 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的SSL提取任务需先终止才能开启下一次任务": "이미 진행 중인 SSL 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的SoVITS训练任务需先终止才能开启下一次任务": "이미 진행 중인 SoVITS 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的一键三连任务,需先终止才能开启下一次任务": "이미 진행 중인 일괄 처리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的切割任务,需先终止才能开启下一次任务": "이미 진행 중인 분리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的文本任务,需先终止才能开启下一次任务": "이미 진행 중인 텍스트 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已有正在进行的语义token提取任务需先终止才能开启下一次任务": "이미 진행 중인 의미 토큰 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.",
"已终止ASR进程": "ASR 프로세스 종료됨",
"已终止GPT训练": "GPT 훈련 종료됨",
"已终止SoVITS训练": "SoVITS 훈련 종료됨",
"已终止所有1a进程": "모든 1a 프로세스 종료됨",
"已终止所有1b进程": "모든 1b 프로세스 종료됨",
"已终止所有一键三连进程": "모든 일괄 처리 프로세스 종료됨",
"已终止所有切割进程": "모든 분리 프로세스 종료됨",
"已终止所有语义token进程": "모든 의미 토큰 프로세스 종료됨",
"按中文句号。切": "중국어 문장으로 분리하십시오.",
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "텍스트 분리 도구. 너무 긴 텍스트는 합성 결과가 항상 좋지 않을 수 있으므로 너무 길면 먼저 분리하는 것이 좋습니다. 합성은 텍스트 줄 바꿈을 기준으로 분리되어 다시 조합됩니다.",
"文本进程执行中": "텍스트 프로세스 실행 중",
"文本进程结束": "텍스트 프로세스 종료",
"日文": "일본어",
"英文": "영어",
"语义token提取进程执行中": "의미 토큰 추출 프로세스 실행 중",
"语义token提取进程结束": "의미 토큰 추출 프로세스 종료",
"请上传参考音频": "참고 오디오를 업로드하십시오",
"输入路径不存在": "입력 경로가 존재하지 않습니다",
"输入路径存在但既不是文件也不是文件夹": "입력 경로가 파일이나 폴더가 아닙니다",
"输出的语音": "출력 음성",
"进度1a-done": "진행: 1a-done",
"进度1a-done, 1b-ing": "진행: 1a-done, 1b-ing",
"进度1a-ing": "진행: 1a-ing",
"进度1a1b-done": "진행: 1a1b-done",
"进度1a1b-done, 1cing": "진행: 1a1b-done, 1cing",
"进度all-done": "진행: all-done",
"需要合成的切分前文本": "합성해야 할 분할 전 텍스트",
"需要合成的文本": "합성해야 할 텍스트",
"需要合成的语种": "합성해야 할 언어",
">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音": ">=3이면 harvest 음고 인식 결과에 중앙값 필터를 사용하며, 값은 필터 반경이며 사용하면 소리를 약하게 할 수 있습니다",
"A模型权重": "A 모델 가중치",
"A模型路径": "A 모델 경로",
"B模型路径": "B 모델 경로",
"E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오 + 주석\\Miyuki Kenshi\\src",
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음고, 기본 F0 및 음조 대신 사용",
"Index Rate": "인덱스 비율",
"Onnx导出": "Onnx 내보내기",
"Onnx输出路径": "Onnx 출력 경로",
"RVC模型路径": "RVC 모델 경로",
"ckpt处理": "ckpt 처리",
"harvest进程数": "harvest 프로세스 수",
"index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다",
"pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다",
"rmvpe卡号配置以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: 각 입력에 사용되는 다른 프로세스 카드를 -로 구분하여 입력하십시오. 예: 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행합니다",
"step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 구성 입력. 실험 데이터는 logs 하위에 있으며 각 실험에 대한 폴더가 있어야합니다. 실험 이름 경로를 수동으로 입력해야하며 실험 구성, 로그, 훈련된 모델 파일이 포함되어 있습니다.",
"step1:正在处理数据": "step1: 데이터 처리 중",
"step2:正在提取音高&正在提取特征": "step2: 음고 추출 및 특징 추출 중",
"step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 자동으로 훈련 폴더에서 오디오로 디코딩할 수 있는 모든 파일을 반복하고 슬라이스 정규화를 수행하여 실험 디렉토리에 2 개의 wav 폴더를 생성합니다. 현재 단일 훈련만 지원됩니다.",
"step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU로 음고 추출(모델이 음고를 지원하는 경우), GPU로 특징 추출(카드 번호 선택)",
"step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정 입력, 모델 및 인덱스 훈련 시작",
"step3a:正在训练模型": "step3a: 모델 훈련 중",
"一键训练": "일괄 훈련",
"也可批量输入音频文件, 二选一, 优先读文件夹": "오디오 파일을 일괄로 입력할 수도 있습니다. 둘 중 하나를 선택하고 폴더를 읽기를 우선합니다.",
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력에 사용되는 카드 번호를 지정하십시오. 예 : 0-1-2는 카드 0, 1 및 2를 사용합니다",
"伴奏人声分离&去混响&去回声": "반주 및 보컬 분리 & 리버브 제거 & 에코 제거",
"使用模型采样率": "모델 샘플링 속도 사용",
"使用设备采样率": "기기 샘플링 속도 사용",
"保存名": "저장 이름",
"保存的文件名, 默认空为和源文件同名": "저장할 파일 이름, 기본적으로 공백은 원본 파일과 동일한 이름입니다",
"保存的模型名不带后缀": "저장할 모델 이름에는 확장자가 없습니다",
"保护清辅音和呼吸声防止电音撕裂等artifact拉满0.5不开启,调低加大保护力度但可能降低索引效果": "클리어 자음 및 숨소를 보호하여 전자 음향 찢김과 같은 아티팩트를 방지하려면 0.5로 설정하되, 보호 강도를 높이려면 0.5로 당기지 않고 낮추면 인덱스 효과가 감소할 수 있습니다",
"修改": "수정",
"修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정 (weights 폴더에서 추출된 작은 모델 파일만 지원됨)",
"停止音频转换": "오디오 변환 중지",
"全流程结束!": "전체 프로세스 완료!",
"刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로 고침",
"加载模型": "모델 로드",
"加载预训练底模D路径": "사전 훈련된 기본 모델 D 경로 로드",
"加载预训练底模G路径": "사전 훈련된 기본 모델 G 경로 로드",
"单次推理": "단일 추론",
"卸载音色省显存": "음색 언로드 및 GPU 메모리 절약",
"变调(整数, 半音数量, 升八度12降八度-12)": "음높이 변경(정수, 반음 수, 올림 높이 12 내림 높이 -12)",
"后处理重采样至最终采样率0为不进行重采样": "후 처리를 통한 최종 샘플링률 재샘플링, 0은 재샘플링 미실행",
"否": "아니오",
"启用相位声码器": "페이즈 보코더 사용",
"响应阈值": "응답 임계값",
"响度因子": "음량 요소",
"处理数据": "데이터 처리",
"导出Onnx模型": "Onnx 모델 내보내기",
"导出文件格式": "내보내기 파일 형식",
"常见问题解答": "자주 묻는 질문 해결",
"常规设置": "일반 설정",
"开始音频转换": "오디오 변환 시작",
"性能设置": "성능 설정",
"批量推理": "일괄 추론",
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "일괄 변환, 변환 대기 중인 오디오 폴더를 입력하거나 여러 오디오 파일을 업로드하고 지정된 폴더(opt 기본값)에 변환된 오디오를 출력합니다.",
"指定输出主人声文件夹": "지정된 주인 목소리 출력 폴더",
"指定输出文件夹": "지정된 출력 폴더",
"指定输出非主人声文件夹": "지정된 비주인 목소리 출력 폴더",
"推理时间(ms):": "추론 시간(ms):",
"推理音色": "추론 음색",
"提取": "추출",
"提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수 추출",
"是": "예",
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 GPU 메모리에 캐시할지 여부. 10분 미만의 소량 데이터는 훈련 속도를 높이기 위해 캐시할 수 있지만, 대량 데이터를 캐시하면 메모리가 터지고 속도가 크게 향상되지 않을 수 있습니다.",
"查看": "보기",
"查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보보기(작은 모델 파일로 추출된 weights 폴더에서만 지원)",
"检索特征占比": "특징 비율 검색",
"模型": "모델",
"模型推理": "모델 추론",
"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더에 대형 파일 모델 경로 입력), 반 훈련하고 싶지 않거나 모델이 자동으로 작은 파일 모델로 추출되지 않았거나 중간 모델을 테스트하려는 경우에 사용",
"模型是否带音高指导": "모델에 음높이 안내가 있는지 여부",
"模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델에 음높이 안내가 있는지 여부(노래에는 필수, 음성은 선택 사항)",
"模型是否带音高指导,1是0否": "모델에 음높이 안내가 있는지 여부, 1이면 있음 0이면 없음",
"模型版本型号": "모델 버전 및 모델 번호",
"模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능",
"模型路径": "모델 경로",
"淡入淡出长度": "페이드 인/아웃 길이",
"版本": "버전",
"特征提取": "특성 추출",
"特征检索库文件路径,为空则使用下拉的选择结果": "특성 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용",
"男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성을 여성으로 추천 +12키, 여성을 남성으로 추천 -12키, 음역 폭발로 음색이 왜곡되면 적절한 음역으로 직접 조절 가능",
"目标采样率": "목표 샘플링률",
"算法延迟(ms):": "알고리즘 지연 시간(ms):",
"自动检测index路径,下拉式选择(dropdown)": "자동으로 index 경로 감지, 드롭다운 선택",
"融合": "융합",
"要改的模型信息": "수정할 모델 정보",
"要置入的模型信息": "삽입할 모델 정보",
"训练": "훈련",
"训练模型": "모델 훈련",
"训练特征索引": "특성 인덱스 훈련",
"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련 종료, 콘솔 훈련 로그 또는 실험 폴더의 train.log를 확인할 수 있습니다",
"请指定说话人id": "화자 ID 지정",
"请选择index文件": "index 파일 선택",
"请选择pth文件": "pth 파일 선택",
"请选择说话人id": "화자 ID 선택",
"转换": "변환",
"输入实验名": "실험명 입력",
"输入待处理音频文件夹路径": "처리 대기 중인 오디오 폴더 경로 입력",
"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리 대기 중인 오디오 폴더 경로 입력(파일 관리자 주소 표시 줄에서 복사하면 됨)",
"输入待处理音频文件路径(默认是正确格式示例)": "처리 대기 중인 오디오 파일 경로 입력(기본적으로 올바른 형식의 예제)",
"输入源音量包络替换输出音量包络融合比例越靠近1越使用输出包络": "소스 음량 에너벌롭을 입력하여 출력 음량 에너벌롭 합성 비율을 대체하면 1에 가까울수록 출력 에너벌롭 사용",
"输入监听": "입력 모니터링",
"输入训练文件夹路径": "훈련 폴더 경로 입력",
"输入设备": "입력 장치",
"输入降噪": "노이즈 감소 입력",
"输出信息": "출력 정보",
"输出变声": "음성 출력",
"输出设备": "출력 장치",
"输出降噪": "노이즈 감소 출력",
"输出音频(右下角三个点,点了可以下载)": "출력 오디오(우하단 세 점, 클릭하면 다운로드 가능)",
"选择.index文件": "index 파일 선택",
"选择.pth文件": "pth 파일 선택",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용",
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용, rmvpe 효과가 가장 좋으며 약간의 GPU 사용",
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음높이 추출 알고리즘 선택: 노래 입력에 pm 사용 가능, 고품질 음성이지만 CPU가 낮음, dio 사용 가능, harvest 품질이 더 좋지만 느림, rmvpe 효과가 최고이며 CPU/GPU 약간 사용",
"采样率:": "샘플링률:",
"采样长度": "샘플링 길이",
"重载设备列表": "장치 목록 다시로드",
"音调设置": "음조 설정",
"音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버 사용 권장)",
"音高算法": "음높이 알고리즘",
"额外推理时长": "추가 추론 시간"
}

View File

@ -1,4 +1,4 @@
import platform,os
import platform,os,traceback
import ffmpeg
import numpy as np
@ -9,12 +9,17 @@ def load_audio(file, sr):
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车
if os.path.exists(file) == False:
raise RuntimeError(
"You input a wrong audio path that does not exists, please fix it!"
)
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except Exception as e:
traceback.print_exc()
raise RuntimeError(f"Failed to load audio: {e}")
return np.frombuffer(out, np.float32).flatten()

View File

@ -35,7 +35,7 @@ def slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_
if(tmp_max>1):chunk/=tmp_max
chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk
wavfile.write(
"%s/%s_%s_%s.wav" % (opt_root, name, start, end),
"%s/%s_%010d_%010d.wav" % (opt_root, name, start, end),
32000,
# chunk.astype(np.float32),
(chunk * 32767).astype(np.int16),

View File

@ -43,8 +43,8 @@ def wave_to_spectrogram(
wave_left = np.asfortranarray(wave[0])
wave_right = np.asfortranarray(wave[1])
spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
spec_left = librosa.stft(wave_left, n_fft=n_fft, hop_length=hop_length)
spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length)
spec = np.asfortranarray([spec_left, spec_right])
@ -78,7 +78,7 @@ def wave_to_spectrogram_mt(
kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
)
thread.start()
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length)
thread.join()
spec = np.asfortranarray([spec_left, spec_right])
@ -230,27 +230,31 @@ def cache_or_load(mix_path, inst_path, mp):
if d == len(mp.param["band"]): # high-end band
X_wave[d], _ = librosa.load(
mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
mix_path,
sr = bp["sr"],
mono = False,
dtype = np.float32,
res_type = bp["res_type"]
)
y_wave[d], _ = librosa.load(
inst_path,
bp["sr"],
False,
dtype=np.float32,
res_type=bp["res_type"],
sr = bp["sr"],
mono = False,
dtype = np.float32,
res_type = bp["res_type"],
)
else: # lower bands
X_wave[d] = librosa.resample(
X_wave[d + 1],
mp.param["band"][d + 1]["sr"],
bp["sr"],
res_type=bp["res_type"],
orig_sr = mp.param["band"][d + 1]["sr"],
target_sr = bp["sr"],
res_type = bp["res_type"],
)
y_wave[d] = librosa.resample(
y_wave[d + 1],
mp.param["band"][d + 1]["sr"],
bp["sr"],
res_type=bp["res_type"],
orig_sr = mp.param["band"][d + 1]["sr"],
target_sr = bp["sr"],
res_type = bp["res_type"],
)
X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
@ -401,9 +405,9 @@ def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
mp.param["mid_side_b2"],
mp.param["reverse"],
),
bp["sr"],
sr,
res_type="sinc_fastest",
orig_sr = bp["sr"],
target_sr = sr,
res_type = "sinc_fastest",
)
else: # mid
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
@ -418,8 +422,8 @@ def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
mp.param["reverse"],
),
)
# wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
# wave = librosa.core.resample(wave2, orig_sr=bp['sr'], target_sr=sr, res_type="sinc_fastest")
wave = librosa.core.resample(wave2, orig_sr=bp["sr"], target_sr=sr, res_type="scipy")
return wave.T
@ -506,8 +510,8 @@ def ensembling(a, specs):
def stft(wave, nfft, hl):
wave_left = np.asfortranarray(wave[0])
wave_right = np.asfortranarray(wave[1])
spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
spec_left = librosa.stft(wave_left, n_fft=nfft, hop_length=hl)
spec_right = librosa.stft(wave_right, n_fft=nfft, hop_length=hl)
spec = np.asfortranarray([spec_left, spec_right])
return spec
@ -569,10 +573,10 @@ if __name__ == "__main__":
if d == len(mp.param["band"]): # high-end band
wave[d], _ = librosa.load(
args.input[i],
bp["sr"],
False,
dtype=np.float32,
res_type=bp["res_type"],
sr = bp["sr"],
mono = False,
dtype = np.float32,
res_type = bp["res_type"],
)
if len(wave[d].shape) == 1: # mono to stereo
@ -580,9 +584,9 @@ if __name__ == "__main__":
else: # lower bands
wave[d] = librosa.resample(
wave[d + 1],
mp.param["band"][d + 1]["sr"],
bp["sr"],
res_type=bp["res_type"],
orig_sr = mp.param["band"][d + 1]["sr"],
target_sr = bp["sr"],
res_type = bp["res_type"],
)
spec[d] = wave_to_spectrogram(

View File

@ -239,7 +239,7 @@ class Predictor:
class MDXNetDereverb:
def __init__(self, chunks, device):
def __init__(self, chunks):
self.onnx = "%s/uvr5_weights/onnx_dereverb_By_FoxJoy"%os.path.dirname(os.path.abspath(__file__))
self.shifts = 10 # 'Predict with randomised equivariant stabilisation'
self.mixing = "min_mag" # ['default','min_mag','max_mag']
@ -250,7 +250,7 @@ class MDXNetDereverb:
self.n_fft = 6144
self.denoise = True
self.pred = Predictor(self)
self.device = device
self.device = cpu
def _path_audio_(self, input, vocal_root, others_root, format, is_hp3=False):
def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False):
self.pred.prediction(input, vocal_root, others_root, format)

2
tools/uvr5/uvr5_weights/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -61,19 +61,19 @@ class AudioPre:
_,
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug应该上ffmpeg读取但是太麻烦了弃坑
music_file,
bp["sr"],
False,
dtype=np.float32,
res_type=bp["res_type"],
sr = bp["sr"],
mono = False,
dtype = np.float32,
res_type = bp["res_type"],
)
if X_wave[d].ndim == 1:
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
else: # lower bands
X_wave[d] = librosa.core.resample(
X_wave[d + 1],
self.mp.param["band"][d + 1]["sr"],
bp["sr"],
res_type=bp["res_type"],
orig_sr = self.mp.param["band"][d + 1]["sr"],
target_sr = bp["sr"],
res_type = bp["res_type"],
)
# Stft of wave source
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
@ -110,6 +110,9 @@ class AudioPre:
y_spec_m = pred * X_phase
v_spec_m = X_spec_m - y_spec_m
if is_hp3 == True:
ins_root,vocal_root = vocal_root,ins_root
if ins_root is not None:
if self.data["high_end_process"].startswith("mirroring"):
input_high_end_ = spec_utils.mirroring(
@ -242,19 +245,19 @@ class AudioPreDeEcho:
_,
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug应该上ffmpeg读取但是太麻烦了弃坑
music_file,
bp["sr"],
False,
dtype=np.float32,
res_type=bp["res_type"],
sr = bp["sr"],
mono = False,
dtype = np.float32,
res_type = bp["res_type"],
)
if X_wave[d].ndim == 1:
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
else: # lower bands
X_wave[d] = librosa.core.resample(
X_wave[d + 1],
self.mp.param["band"][d + 1]["sr"],
bp["sr"],
res_type=bp["res_type"],
orig_sr = self.mp.param["band"][d + 1]["sr"],
target_sr = bp["sr"],
res_type = bp["res_type"],
)
# Stft of wave source
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(

View File

@ -5,7 +5,7 @@ from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
logger = logging.getLogger(__name__)
import librosa
import librosa,ffmpeg
import soundfile as sf
import torch
import sys
@ -19,7 +19,7 @@ for name in os.listdir(weight_uvr5_root):
uvr5_names.append(name.replace(".pth", ""))
device=sys.argv[1]
is_half=sys.argv[2]
is_half=eval(sys.argv[2])
webui_port_uvr5=int(sys.argv[3])
is_share=eval(sys.argv[4])
@ -33,19 +33,17 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
save_root_ins = (
save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
)
is_hp3 = "HP3" in model_name
if model_name == "onnx_dereverb_By_FoxJoy":
pre_fun = MDXNetDereverb(15, device)
pre_fun = MDXNetDereverb(15)
else:
func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho
pre_fun = func(
agg=int(agg),
model_path=os.path.join(
weight_uvr5_root, model_name + ".pth"
),
model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
device=device,
is_half=is_half,
)
is_hp3 = "HP3" in model_name
if inp_root != "":
paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
else:
@ -53,20 +51,19 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
for path in paths:
inp_path = os.path.join(inp_root, path)
if(os.path.isfile(inp_path)==False):continue
try:
need_reformat = 1
done = 0
try:
y, sr = librosa.load(inp_path, sr=None)
info = sf.info(inp_path)
channels = info.channels
if channels == 2 and sr == 44100:
info = ffmpeg.probe(inp_path, cmd="ffprobe")
if (
info["streams"][0]["channels"] == 2
and info["streams"][0]["sample_rate"] == "44100"
):
need_reformat = 0
pre_fun._path_audio_(
inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3
inp_path, save_root_ins, save_root_vocal, format0,is_hp3
)
done = 1
else:
need_reformat = 1
except:
need_reformat = 1
traceback.print_exc()
@ -75,21 +72,15 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
os.path.join(os.environ["TEMP"]),
os.path.basename(inp_path),
)
y_resampled = librosa.resample(y, sr, 44100)
sf.write(tmp_path, y_resampled, 44100, "PCM_16")
os.system(
"ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
% (inp_path, tmp_path)
)
inp_path = tmp_path
try:
if done == 0:
pre_fun._path_audio_(
inp_path, save_root_ins, save_root_vocal, format0
)
infos.append("%s->Success" % (os.path.basename(inp_path)))
yield "\n".join(infos)
except:
try:
if done == 0:
pre_fun._path_audio_(
inp_path, save_root_ins, save_root_vocal, format0
inp_path, save_root_ins, save_root_vocal, format0,is_hp3
)
infos.append("%s->Success" % (os.path.basename(inp_path)))
yield "\n".join(infos)
@ -98,9 +89,6 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
"%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
)
yield "\n".join(infos)
except:
infos.append("Oh my god. %s->%s"%(os.path.basename(inp_path), traceback.format_exc()))
yield "\n".join(infos)
except:
infos.append(traceback.format_exc())
yield "\n".join(infos)
@ -114,13 +102,12 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
del pre_fun
except:
traceback.print_exc()
print("clean_empty_cache")
if torch.cuda.is_available():
torch.cuda.empty_cache()
logger.info("Executed torch.cuda.empty_cache()")
yield "\n".join(infos)
with gr.Blocks(title="RVC WebUI") as app:
with gr.Blocks(title="UVR5 WebUI") as app:
gr.Markdown(
value=
i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")

158
webui.py
View File

@ -1,6 +1,6 @@
import os,shutil,sys,pdb,re
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.insert(0, now_dir)
import json,yaml,warnings,torch
import platform
import psutil
@ -16,7 +16,11 @@ if(os.path.exists(tmp)):
if(name=="jieba.cache"):continue
path="%s/%s"%(tmp,name)
delete=os.remove if os.path.isfile(path) else shutil.rmtree
try:
delete(path)
except Exception as e:
print(str(e))
pass
import site
site_packages_roots = []
for path in site.getsitepackages():
@ -25,13 +29,18 @@ for path in site.getsitepackages():
if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
#os.environ["OPENBLAS_NUM_THREADS"] = "4"
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
os.environ["all_proxy"] = ""
for site_packages_root in site_packages_roots:
if os.path.exists(site_packages_root):
try:
with open("%s/users.pth" % (site_packages_root), "w") as f:
f.write(
"%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
% (now_dir, now_dir, now_dir, now_dir, now_dir)
)
break
except PermissionError:
pass
from tools import my_utils
import traceback
import shutil
@ -46,7 +55,7 @@ from scipy.io import wavfile
from tools.my_utils import load_audio
from multiprocessing import cpu_count
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
n_cpu=cpu_count()
@ -59,23 +68,24 @@ if_gpu_ok = False
if torch.cuda.is_available() or ngpu != 0:
for i in range(ngpu):
gpu_name = torch.cuda.get_device_name(i)
if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L","4060"]):
if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L4","4060"]):
# A10#A100#V100#A40#P40#M40#K80#A4500
if_gpu_ok = True # 至少有一张能用的N卡
gpu_infos.append("%s\t%s" % (i, gpu_name))
mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
# 判断是否支持mps加速
if torch.backends.mps.is_available():
if_gpu_ok = True
gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
# # 判断是否支持mps加速
# if torch.backends.mps.is_available():
# if_gpu_ok = True
# gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
# mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
if if_gpu_ok and len(gpu_infos) > 0:
gpu_info = "\n".join(gpu_infos)
default_batch_size = min(mem) // 2
else:
gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
default_batch_size = 1
gpu_info = ("%s\t%s" % ("0", "CPU"))
gpu_infos.append("%s\t%s" % ("0", "CPU"))
default_batch_size = psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2
gpus = "-".join([i[0] for i in gpu_infos])
pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth"
@ -108,6 +118,7 @@ def change_choices():
p_label=None
p_uvr5=None
p_asr=None
p_denoise=None
p_tts_inference=None
def kill_proc_tree(pid, including_parent=True):
@ -141,6 +152,7 @@ def kill_process(pid):
def change_label(if_label,path_list):
global p_label
if(if_label==True and p_label==None):
path_list=my_utils.clean_path(path_list)
cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share)
yield i18n("打标工具WebUI已开启")
print(cmd)
@ -182,19 +194,27 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path
p_tts_inference=None
yield i18n("TTS推理进程已关闭")
def open_asr(asr_inp_dir):
from tools.asr.config import asr_dict
def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang):
global p_asr
if(p_asr==None):
cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir)
asr_inp_dir=my_utils.clean_path(asr_inp_dir)
cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
cmd += f' -i "{asr_inp_dir}"'
cmd += f' -o "{asr_opt_dir}"'
cmd += f' -s {asr_model_size}'
cmd += f' -l {asr_lang}'
cmd += " -p %s"%("float16"if is_half==True else "float32")
yield "ASR任务开启%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
print(cmd)
p_asr = Popen(cmd, shell=True)
p_asr.wait()
p_asr=None
yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
yield f"ASR任务完成, 查看终端进行下一步",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
else:
yield "已有正在进行的ASR任务需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
# return None
def close_asr():
global p_asr
@ -202,6 +222,29 @@ def close_asr():
kill_process(p_asr.pid)
p_asr=None
return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
def open_denoise(denoise_inp_dir, denoise_opt_dir):
global p_denoise
if(p_denoise==None):
denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
yield "语音降噪任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
print(cmd)
p_denoise = Popen(cmd, shell=True)
p_denoise.wait()
p_denoise=None
yield f"语音降噪任务完成, 查看终端进行下一步",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
else:
yield "已有正在进行的语音降噪任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
# return None
def close_denoise():
global p_denoise
if(p_denoise!=None):
kill_process(p_denoise.pid)
p_denoise=None
return "已终止语音降噪进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
p_train_SoVITS=None
def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D):
@ -212,6 +255,9 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s
data=json.loads(data)
s2_dir="%s/%s"%(exp_root,exp_name)
os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True)
if(is_half==False):
data["train"]["fp16_run"]=False
batch_size=max(1,batch_size//2)
data["train"]["batch_size"]=batch_size
data["train"]["epochs"]=total_epoch
data["train"]["text_low_lr_rate"]=text_low_lr_rate
@ -224,7 +270,7 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s
data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
data["save_weight_dir"]=SoVITS_weight_root
data["name"]=exp_name
tmp_config_path="TEMP/tmp_s2.json"
tmp_config_path="%s/tmp_s2.json"%tmp
with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path)
@ -245,7 +291,7 @@ def close1Ba():
return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
p_train_GPT=None
def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
global p_train_GPT
if(p_train_GPT==None):
with open("GPT_SoVITS/configs/s1longer.yaml")as f:
@ -253,12 +299,16 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights
data=yaml.load(data, Loader=yaml.FullLoader)
s1_dir="%s/%s"%(exp_root,exp_name)
os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
if(is_half==False):
data["train"]["precision"]="32"
batch_size = max(1, batch_size // 2)
data["train"]["batch_size"]=batch_size
data["train"]["epochs"]=total_epoch
data["pretrained_s1"]=pretrained_s1
data["train"]["save_every_n_epoch"]=save_every_epoch
data["train"]["if_save_every_weights"]=if_save_every_weights
data["train"]["if_save_latest"]=if_save_latest
data["train"]["if_dpo"]=if_dpo
data["train"]["half_weights_save_dir"]=GPT_weight_root
data["train"]["exp_name"]=exp_name
data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir
@ -267,7 +317,7 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights
os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_numbers.replace("-",",")
os.environ["hz"]="25hz"
tmp_config_path="TEMP/tmp_s1.yaml"
tmp_config_path="%s/tmp_s1.yaml"%tmp
with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False))
# cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir)
cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path)
@ -328,6 +378,8 @@ def close_slice():
ps1a=[]
def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
global ps1a
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
if (ps1a == []):
opt_dir="%s/%s"%(exp_root,exp_name)
config={
@ -348,7 +400,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
"is_half": str(is_half)
}
)
os.environ.update(config)#
os.environ.update(config)
cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
print(cmd)
p = Popen(cmd, shell=True)
@ -384,6 +436,8 @@ def close1a():
ps1b=[]
def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
global ps1b
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
if (ps1b == []):
config={
"inp_text":inp_text,
@ -430,6 +484,7 @@ def close1b():
ps1c=[]
def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
global ps1c
inp_text = my_utils.clean_path(inp_text)
if (ps1c == []):
opt_dir="%s/%s"%(exp_root,exp_name)
config={
@ -486,6 +541,8 @@ def close1c():
ps1abc=[]
def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path):
global ps1abc
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
if (ps1abc == []):
opt_dir="%s/%s"%(exp_root,exp_name)
try:
@ -618,6 +675,11 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
value=
i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
)
gr.Markdown(
value=
i18n("中文教程文档https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e")
)
with gr.Tabs():
with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具"))
@ -641,31 +703,78 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
gr.Markdown(value=i18n("0bb-语音降噪工具"))
with gr.Row():
open_denoise_button = gr.Button(i18n("开启语音降噪"), variant="primary",visible=True)
close_denoise_button = gr.Button(i18n("终止语音降噪进程"), variant="primary",visible=False)
denoise_input_dir=gr.Textbox(label=i18n("降噪音频文件输入文件夹"),value="")
denoise_output_dir=gr.Textbox(label=i18n("降噪结果输出文件夹"),value="output/denoise_opt")
denoise_info = gr.Textbox(label=i18n("语音降噪进程输出信息"))
gr.Markdown(value=i18n("0c-中文批量离线ASR工具"))
with gr.Row():
open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True)
close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
with gr.Column():
with gr.Row():
asr_inp_dir = gr.Textbox(
label=i18n("批量ASR(中文only)输入文件夹路径"),
value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx",
label=i18n("输入文件夹路径"),
value="D:\\GPT-SoVITS\\raw\\xxx",
interactive=True,
)
asr_opt_dir = gr.Textbox(
label = i18n("输出文件夹路径"),
value = "output/asr_opt",
interactive = True,
)
with gr.Row():
asr_model = gr.Dropdown(
label = i18n("ASR 模型"),
choices = list(asr_dict.keys()),
interactive = True,
value="达摩 ASR (中文)"
)
asr_size = gr.Dropdown(
label = i18n("ASR 模型尺寸"),
choices = ["large"],
interactive = True,
value="large"
)
asr_lang = gr.Dropdown(
label = i18n("ASR 语言设置"),
choices = ["zh"],
interactive = True,
value="zh"
)
with gr.Row():
asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
def change_lang_choices(key): #根据选择的模型修改可选的语言
# return gr.Dropdown(choices=asr_dict[key]['lang'])
return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]}
def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
# return gr.Dropdown(choices=asr_dict[key]['size'])
return {"__type__": "update", "choices": asr_dict[key]['size']}
asr_model.change(change_lang_choices, [asr_model], [asr_lang])
asr_model.change(change_size_choices, [asr_model], [asr_size])
gr.Markdown(value=i18n("0d-语音文本校对标注工具"))
with gr.Row():
if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True)
path_list = gr.Textbox(
label=i18n("打标数据标注文件路径"),
label=i18n(".list标注文件的路径"),
value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list",
interactive=True,
)
label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
if_label.change(change_label, [if_label,path_list], [label_info])
if_uvr5.change(change_uvr5, [if_uvr5], [uvr5_info])
open_asr_button.click(open_asr, [asr_inp_dir], [asr_info,open_asr_button,close_asr_button])
open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang], [asr_info,open_asr_button,close_asr_button])
close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button])
close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
open_denoise_button.click(open_denoise, [denoise_input_dir,denoise_output_dir], [denoise_info,open_denoise_button,close_denoise_button])
close_denoise_button.click(close_denoise, [], [denoise_info,open_denoise_button,close_denoise_button])
with gr.TabItem(i18n("1-GPT-SoVITS-TTS")):
with gr.Row():
exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True)
@ -681,7 +790,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
label=i18n("*训练集音频文件目录"),
# value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",
interactive=True,
placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名不是全路径")
placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径。如果留空则使用.list文件里的绝对全路径")
)
gr.Markdown(value=i18n("1Aa-文本内容"))
with gr.Row():
@ -734,6 +843,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
with gr.Row():
batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
total_epoch1Bb = gr.Slider(minimum=2,maximum=50,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True)
if_dpo = gr.Checkbox(label=i18n("是否开启dpo训练选项(实验性)"), value=False, interactive=True, show_label=True)
if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True)
@ -744,7 +854,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
info1Bb=gr.Textbox(label=i18n("GPT训练进程输出信息"))
button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Ba,button1Ba_open,button1Ba_close])
button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close])
button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close])
button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_dpo,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close])
button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close])
with gr.TabItem(i18n("1C-推理")):
gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模体验5秒Zero Shot TTS用。"))