mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2026-01-11 04:46:57 +08:00
Merge branch 'RVC-Boss:main' into main
This commit is contained in:
commit
bafd1bf632
@ -854,6 +854,7 @@ class Text2SemanticDecoder(nn.Module):
|
|||||||
|
|
||||||
if idx == 0:
|
if idx == 0:
|
||||||
xy_attn_mask = None
|
xy_attn_mask = None
|
||||||
|
if(idx<11):###至少预测出10个token不然不给停止(0.4s)
|
||||||
logits = logits[:, :-1]
|
logits = logits[:, :-1]
|
||||||
|
|
||||||
samples = sample(
|
samples = sample(
|
||||||
|
|||||||
@ -213,6 +213,10 @@ class TTS_Config:
|
|||||||
"cnhuhbert_base_path": self.cnhuhbert_base_path,
|
"cnhuhbert_base_path": self.cnhuhbert_base_path,
|
||||||
}
|
}
|
||||||
return self.config
|
return self.config
|
||||||
|
|
||||||
|
def update_version(self, version:str)->None:
|
||||||
|
self.version = version
|
||||||
|
self.languages = self.v2_languages if self.version=="v2" else self.v1_languages
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
self.configs = self.update_configs()
|
self.configs = self.update_configs()
|
||||||
@ -300,13 +304,14 @@ class TTS:
|
|||||||
def init_vits_weights(self, weights_path: str):
|
def init_vits_weights(self, weights_path: str):
|
||||||
print(f"Loading VITS weights from {weights_path}")
|
print(f"Loading VITS weights from {weights_path}")
|
||||||
self.configs.vits_weights_path = weights_path
|
self.configs.vits_weights_path = weights_path
|
||||||
self.configs.save_configs()
|
|
||||||
dict_s2 = torch.load(weights_path, map_location=self.configs.device)
|
dict_s2 = torch.load(weights_path, map_location=self.configs.device)
|
||||||
hps = dict_s2["config"]
|
hps = dict_s2["config"]
|
||||||
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
||||||
self.configs.version = "v1"
|
self.configs.update_version("v1")
|
||||||
else:
|
else:
|
||||||
self.configs.version = "v2"
|
self.configs.update_version("v2")
|
||||||
|
self.configs.save_configs()
|
||||||
|
|
||||||
hps["model"]["version"] = self.configs.version
|
hps["model"]["version"] = self.configs.version
|
||||||
self.configs.filter_length = hps["data"]["filter_length"]
|
self.configs.filter_length = hps["data"]["filter_length"]
|
||||||
self.configs.segment_size = hps["train"]["segment_size"]
|
self.configs.segment_size = hps["train"]["segment_size"]
|
||||||
|
|||||||
@ -13,7 +13,9 @@ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
|||||||
from module.commons import init_weights, get_padding
|
from module.commons import init_weights, get_padding
|
||||||
from module.mrte_model import MRTE
|
from module.mrte_model import MRTE
|
||||||
from module.quantize import ResidualVectorQuantizer
|
from module.quantize import ResidualVectorQuantizer
|
||||||
from text import symbols
|
# from text import symbols
|
||||||
|
from text import symbols as symbols_v1
|
||||||
|
from text import symbols2 as symbols_v2
|
||||||
from torch.cuda.amp import autocast
|
from torch.cuda.amp import autocast
|
||||||
|
|
||||||
|
|
||||||
@ -182,6 +184,7 @@ class TextEncoder(nn.Module):
|
|||||||
kernel_size,
|
kernel_size,
|
||||||
p_dropout,
|
p_dropout,
|
||||||
latent_channels=192,
|
latent_channels=192,
|
||||||
|
version="v2",
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.out_channels = out_channels
|
self.out_channels = out_channels
|
||||||
@ -192,6 +195,7 @@ class TextEncoder(nn.Module):
|
|||||||
self.kernel_size = kernel_size
|
self.kernel_size = kernel_size
|
||||||
self.p_dropout = p_dropout
|
self.p_dropout = p_dropout
|
||||||
self.latent_channels = latent_channels
|
self.latent_channels = latent_channels
|
||||||
|
self.version = version
|
||||||
|
|
||||||
self.ssl_proj = nn.Conv1d(768, hidden_channels, 1)
|
self.ssl_proj = nn.Conv1d(768, hidden_channels, 1)
|
||||||
|
|
||||||
@ -207,6 +211,11 @@ class TextEncoder(nn.Module):
|
|||||||
self.encoder_text = attentions.Encoder(
|
self.encoder_text = attentions.Encoder(
|
||||||
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.version == "v1":
|
||||||
|
symbols = symbols_v1.symbols
|
||||||
|
else:
|
||||||
|
symbols = symbols_v2.symbols
|
||||||
self.text_embedding = nn.Embedding(len(symbols), hidden_channels)
|
self.text_embedding = nn.Embedding(len(symbols), hidden_channels)
|
||||||
|
|
||||||
self.mrte = MRTE()
|
self.mrte = MRTE()
|
||||||
@ -817,6 +826,7 @@ class SynthesizerTrn(nn.Module):
|
|||||||
use_sdp=True,
|
use_sdp=True,
|
||||||
semantic_frame_rate=None,
|
semantic_frame_rate=None,
|
||||||
freeze_quantizer=None,
|
freeze_quantizer=None,
|
||||||
|
version="v2",
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -837,6 +847,7 @@ class SynthesizerTrn(nn.Module):
|
|||||||
self.segment_size = segment_size
|
self.segment_size = segment_size
|
||||||
self.n_speakers = n_speakers
|
self.n_speakers = n_speakers
|
||||||
self.gin_channels = gin_channels
|
self.gin_channels = gin_channels
|
||||||
|
self.version = version
|
||||||
|
|
||||||
self.use_sdp = use_sdp
|
self.use_sdp = use_sdp
|
||||||
self.enc_p = TextEncoder(
|
self.enc_p = TextEncoder(
|
||||||
@ -847,6 +858,7 @@ class SynthesizerTrn(nn.Module):
|
|||||||
n_layers,
|
n_layers,
|
||||||
kernel_size,
|
kernel_size,
|
||||||
p_dropout,
|
p_dropout,
|
||||||
|
version=version,
|
||||||
)
|
)
|
||||||
self.dec = Generator(
|
self.dec = Generator(
|
||||||
inter_channels,
|
inter_channels,
|
||||||
@ -871,9 +883,11 @@ class SynthesizerTrn(nn.Module):
|
|||||||
inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels
|
inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels
|
||||||
)
|
)
|
||||||
|
|
||||||
self.ref_enc = modules.MelStyleEncoder(
|
# self.version=os.environ.get("version","v1")
|
||||||
spec_channels, style_vector_dim=gin_channels
|
if self.version == "v1":
|
||||||
)
|
self.ref_enc = modules.MelStyleEncoder(spec_channels, style_vector_dim=gin_channels)
|
||||||
|
else:
|
||||||
|
self.ref_enc = modules.MelStyleEncoder(704, style_vector_dim=gin_channels)
|
||||||
|
|
||||||
ssl_dim = 768
|
ssl_dim = 768
|
||||||
self.ssl_dim = ssl_dim
|
self.ssl_dim = ssl_dim
|
||||||
@ -894,7 +908,10 @@ class SynthesizerTrn(nn.Module):
|
|||||||
|
|
||||||
def forward(self, codes, text, refer):
|
def forward(self, codes, text, refer):
|
||||||
refer_mask = torch.ones_like(refer[:1,:1,:])
|
refer_mask = torch.ones_like(refer[:1,:1,:])
|
||||||
ge = self.ref_enc(refer * refer_mask, refer_mask)
|
if (self.version == "v1"):
|
||||||
|
ge = self.ref_enc(refer * refer_mask, refer_mask)
|
||||||
|
else:
|
||||||
|
ge = self.ref_enc(refer[:, :704] * refer_mask, refer_mask)
|
||||||
|
|
||||||
quantized = self.quantizer.decode(codes)
|
quantized = self.quantizer.decode(codes)
|
||||||
if self.semantic_frame_rate == "25hz":
|
if self.semantic_frame_rate == "25hz":
|
||||||
|
|||||||
@ -1,11 +1,12 @@
|
|||||||
from module.models_onnx import SynthesizerTrn, symbols
|
from module.models_onnx import SynthesizerTrn, symbols_v1, symbols_v2
|
||||||
from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule
|
from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule
|
||||||
import torch
|
import torch
|
||||||
import torchaudio
|
import torchaudio
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from feature_extractor import cnhubert
|
from feature_extractor import cnhubert
|
||||||
cnhubert_base_path = "pretrained_models/chinese-hubert-base"
|
|
||||||
cnhubert.cnhubert_base_path=cnhubert_base_path
|
cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
||||||
|
cnhubert.cnhubert_base_path = cnhubert_base_path
|
||||||
ssl_model = cnhubert.get_model()
|
ssl_model = cnhubert.get_model()
|
||||||
from text import cleaned_text_to_sequence
|
from text import cleaned_text_to_sequence
|
||||||
import soundfile
|
import soundfile
|
||||||
@ -196,6 +197,11 @@ class VitsModel(nn.Module):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
dict_s2 = torch.load(vits_path,map_location="cpu")
|
dict_s2 = torch.load(vits_path,map_location="cpu")
|
||||||
self.hps = dict_s2["config"]
|
self.hps = dict_s2["config"]
|
||||||
|
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
||||||
|
self.hps["model"]["version"] = "v1"
|
||||||
|
else:
|
||||||
|
self.hps["model"]["version"] = "v2"
|
||||||
|
|
||||||
self.hps = DictToAttrRecursive(self.hps)
|
self.hps = DictToAttrRecursive(self.hps)
|
||||||
self.hps.model.semantic_frame_rate = "25hz"
|
self.hps.model.semantic_frame_rate = "25hz"
|
||||||
self.vq_model = SynthesizerTrn(
|
self.vq_model = SynthesizerTrn(
|
||||||
@ -267,13 +273,13 @@ class SSLModel(nn.Module):
|
|||||||
return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2)
|
return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2)
|
||||||
|
|
||||||
|
|
||||||
def export(vits_path, gpt_path, project_name):
|
def export(vits_path, gpt_path, project_name, vits_model="v2"):
|
||||||
vits = VitsModel(vits_path)
|
vits = VitsModel(vits_path)
|
||||||
gpt = T2SModel(gpt_path, vits)
|
gpt = T2SModel(gpt_path, vits)
|
||||||
gpt_sovits = GptSoVits(vits, gpt)
|
gpt_sovits = GptSoVits(vits, gpt)
|
||||||
ssl = SSLModel()
|
ssl = SSLModel()
|
||||||
ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
|
ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"],version=vits_model)])
|
||||||
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
|
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"],version=vits_model)])
|
||||||
ref_bert = torch.randn((ref_seq.shape[1], 1024)).float()
|
ref_bert = torch.randn((ref_seq.shape[1], 1024)).float()
|
||||||
text_bert = torch.randn((text_seq.shape[1], 1024)).float()
|
text_bert = torch.randn((text_seq.shape[1], 1024)).float()
|
||||||
ref_audio = torch.randn((1, 48000 * 5)).float()
|
ref_audio = torch.randn((1, 48000 * 5)).float()
|
||||||
@ -287,34 +293,38 @@ def export(vits_path, gpt_path, project_name):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
ssl_content = ssl(ref_audio_16k).float()
|
ssl_content = ssl(ref_audio_16k).float()
|
||||||
|
|
||||||
debug = False
|
# debug = False
|
||||||
|
debug = True
|
||||||
|
|
||||||
|
# gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
a, b = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, debug=debug)
|
a, b = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, debug=debug)
|
||||||
soundfile.write("out1.wav", a.cpu().detach().numpy(), vits.hps.data.sampling_rate)
|
soundfile.write("out1.wav", a.cpu().detach().numpy(), vits.hps.data.sampling_rate)
|
||||||
soundfile.write("out2.wav", b[0], vits.hps.data.sampling_rate)
|
soundfile.write("out2.wav", b[0], vits.hps.data.sampling_rate)
|
||||||
return
|
else:
|
||||||
|
a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy()
|
||||||
a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy()
|
soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
|
||||||
|
|
||||||
soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
|
if vits_model == "v1":
|
||||||
|
symbols = symbols_v1
|
||||||
gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name)
|
else:
|
||||||
|
symbols = symbols_v2
|
||||||
|
|
||||||
MoeVSConf = {
|
MoeVSConf = {
|
||||||
"Folder" : f"{project_name}",
|
"Folder": f"{project_name}",
|
||||||
"Name" : f"{project_name}",
|
"Name": f"{project_name}",
|
||||||
"Type" : "GPT-SoVits",
|
"Type": "GPT-SoVits",
|
||||||
"Rate" : vits.hps.data.sampling_rate,
|
"Rate": vits.hps.data.sampling_rate,
|
||||||
"NumLayers": gpt.t2s_model.num_layers,
|
"NumLayers": gpt.t2s_model.num_layers,
|
||||||
"EmbeddingDim": gpt.t2s_model.embedding_dim,
|
"EmbeddingDim": gpt.t2s_model.embedding_dim,
|
||||||
"Dict": "BasicDict",
|
"Dict": "BasicDict",
|
||||||
"BertPath": "chinese-roberta-wwm-ext-large",
|
"BertPath": "chinese-roberta-wwm-ext-large",
|
||||||
"Symbol": symbols,
|
# "Symbol": symbols,
|
||||||
"AddBlank": False
|
"AddBlank": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
MoeVSConfJson = json.dumps(MoeVSConf)
|
MoeVSConfJson = json.dumps(MoeVSConf)
|
||||||
with open(f"onnx/{project_name}.json", 'w') as MoeVsConfFile:
|
with open(f"onnx/{project_name}.json", 'w') as MoeVsConfFile:
|
||||||
json.dump(MoeVSConf, MoeVsConfFile, indent = 4)
|
json.dump(MoeVSConf, MoeVsConfFile, indent = 4)
|
||||||
|
|||||||
@ -45021,4 +45021,6 @@
|
|||||||
黄冠野服: ['huang2', 'guan4', 'ye3', 'fu2']
|
黄冠野服: ['huang2', 'guan4', 'ye3', 'fu2']
|
||||||
黄发台背: ['huang2', 'fa1', 'tai2', 'bei4']
|
黄发台背: ['huang2', 'fa1', 'tai2', 'bei4']
|
||||||
鼎铛玉石: ['ding3', 'cheng1', 'yu4', 'shi2']
|
鼎铛玉石: ['ding3', 'cheng1', 'yu4', 'shi2']
|
||||||
齿豁头童: ['chi3', 'huo1', 'tou2', 'tong2']
|
齿豁头童: ['chi3', 'huo1', 'tou2', 'tong2']
|
||||||
|
牦牛: ['mao2', 'niu2']
|
||||||
|
牦: ['mao2']
|
||||||
Binary file not shown.
@ -184,8 +184,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
|||||||
|
|
||||||
#### Integrated Package Users
|
#### Integrated Package Users
|
||||||
|
|
||||||
Double-click `go-webui.bat`or use `go-webui.ps`
|
Double-click `go-webui.bat`or use `go-webui.ps1`
|
||||||
if you want to switch to V1,then double-click`go-webui-v1.bat` or use `go-webui-v1.ps`
|
if you want to switch to V1,then double-click`go-webui-v1.bat` or use `go-webui-v1.ps1`
|
||||||
|
|
||||||
#### Others
|
#### Others
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ Or maunally switch version in WebUI
|
|||||||
|
|
||||||
#### Integrated Package Users
|
#### Integrated Package Users
|
||||||
|
|
||||||
Double-click `go-webui-v2.bat` or use `go-webui-v2.ps` ,then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
|
Double-click `go-webui-v2.bat` or use `go-webui-v2.ps1` ,then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
|
||||||
|
|
||||||
#### Others
|
#### Others
|
||||||
|
|
||||||
|
|||||||
@ -253,13 +253,13 @@ def check_params(req:dict):
|
|||||||
if (text_lang in [None, ""]) :
|
if (text_lang in [None, ""]) :
|
||||||
return JSONResponse(status_code=400, content={"message": "text_lang is required"})
|
return JSONResponse(status_code=400, content={"message": "text_lang is required"})
|
||||||
elif text_lang.lower() not in tts_config.languages:
|
elif text_lang.lower() not in tts_config.languages:
|
||||||
return JSONResponse(status_code=400, content={"message": "text_lang is not supported"})
|
return JSONResponse(status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"})
|
||||||
if (prompt_lang in [None, ""]) :
|
if (prompt_lang in [None, ""]) :
|
||||||
return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
|
return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
|
||||||
elif prompt_lang.lower() not in tts_config.languages:
|
elif prompt_lang.lower() not in tts_config.languages:
|
||||||
return JSONResponse(status_code=400, content={"message": "prompt_lang is not supported"})
|
return JSONResponse(status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"})
|
||||||
if media_type not in ["wav", "raw", "ogg", "aac"]:
|
if media_type not in ["wav", "raw", "ogg", "aac"]:
|
||||||
return JSONResponse(status_code=400, content={"message": "media_type is not supported"})
|
return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
|
||||||
elif media_type == "ogg" and not streaming_mode:
|
elif media_type == "ogg" and not streaming_mode:
|
||||||
return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
|
return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
|
||||||
|
|
||||||
|
|||||||
@ -181,8 +181,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|zh|我爱玩原神。
|
|||||||
|
|
||||||
#### 整合包用户
|
#### 整合包用户
|
||||||
|
|
||||||
双击`go-webui.bat`或者使用`go-webui.ps`
|
双击`go-webui.bat`或者使用`go-webui.ps1`
|
||||||
若想使用V1,则双击`go-webui-v1.bat`或者使用`go-webui-v1.ps`
|
若想使用V1,则双击`go-webui-v1.bat`或者使用`go-webui-v1.ps1`
|
||||||
|
|
||||||
#### 其他
|
#### 其他
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ python webui.py v1 <language(optional)>
|
|||||||
|
|
||||||
#### 整合包用户
|
#### 整合包用户
|
||||||
|
|
||||||
双击 `go-webui.bat` 或者使用 `go-webui.ps` ,然后在 `1-GPT-SoVITS-TTS/1C-推理` 中打开推理webUI
|
双击 `go-webui.bat` 或者使用 `go-webui.ps1` ,然后在 `1-GPT-SoVITS-TTS/1C-推理` 中打开推理webUI
|
||||||
|
|
||||||
#### 其他
|
#### 其他
|
||||||
|
|
||||||
|
|||||||
@ -171,8 +171,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
|||||||
|
|
||||||
#### 統合パッケージ利用者
|
#### 統合パッケージ利用者
|
||||||
|
|
||||||
`go-webui.bat`をダブルクリックするか、`go-webui.ps`を使用します。
|
`go-webui.bat`をダブルクリックするか、`go-webui.ps1`を使用します。
|
||||||
V1に切り替えたい場合は、`go-webui-v1.bat`をダブルクリックするか、`go-webui-v1.ps`を使用してください。
|
V1に切り替えたい場合は、`go-webui-v1.bat`をダブルクリックするか、`go-webui-v1.ps1`を使用してください。
|
||||||
|
|
||||||
#### その他
|
#### その他
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ python webui.py v1 <言語(オプション)>
|
|||||||
|
|
||||||
#### 統合パッケージ利用者
|
#### 統合パッケージ利用者
|
||||||
|
|
||||||
`go-webui-v2.bat`をダブルクリックするか、`go-webui-v2.ps`を使用して、`1-GPT-SoVITS-TTS/1C-inference`で推論webuiを開きます。
|
`go-webui-v2.bat`をダブルクリックするか、`go-webui-v2.ps1`を使用して、`1-GPT-SoVITS-TTS/1C-inference`で推論webuiを開きます。
|
||||||
|
|
||||||
#### その他
|
#### その他
|
||||||
|
|
||||||
|
|||||||
@ -175,8 +175,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
|||||||
|
|
||||||
#### 통합 패키지 사용자
|
#### 통합 패키지 사용자
|
||||||
|
|
||||||
`go-webui.bat`을 더블 클릭하거나 `go-webui.ps`를 사용하십시오.
|
`go-webui.bat`을 더블 클릭하거나 `go-webui.ps1`를 사용하십시오.
|
||||||
V1으로 전환하려면, `go-webui-v1.bat`을 더블 클릭하거나 `go-webui-v1.ps`를 사용하십시오.
|
V1으로 전환하려면, `go-webui-v1.bat`을 더블 클릭하거나 `go-webui-v1.ps1`를 사용하십시오.
|
||||||
|
|
||||||
#### 기타
|
#### 기타
|
||||||
|
|
||||||
@ -211,7 +211,7 @@ python webui.py v1 <언어(옵션)>
|
|||||||
|
|
||||||
#### 통합 패키지 사용자
|
#### 통합 패키지 사용자
|
||||||
|
|
||||||
`go-webui-v2.bat`을 더블 클릭하거나 `go-webui-v2.ps`를 사용한 다음 `1-GPT-SoVITS-TTS/1C-inference`에서 추론 webui를 엽니다.
|
`go-webui-v2.bat`을 더블 클릭하거나 `go-webui-v2.ps1`를 사용한 다음 `1-GPT-SoVITS-TTS/1C-inference`에서 추론 webui를 엽니다.
|
||||||
|
|
||||||
#### 기타
|
#### 기타
|
||||||
|
|
||||||
|
|||||||
@ -172,8 +172,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
|||||||
|
|
||||||
#### Entegre Paket Kullanıcıları
|
#### Entegre Paket Kullanıcıları
|
||||||
|
|
||||||
`go-webui.bat` dosyasına çift tıklayın veya `go-webui.ps` kullanın.
|
`go-webui.bat` dosyasına çift tıklayın veya `go-webui.ps1` kullanın.
|
||||||
V1'e geçmek istiyorsanız, `go-webui-v1.bat` dosyasına çift tıklayın veya `go-webui-v1.ps` kullanın.
|
V1'e geçmek istiyorsanız, `go-webui-v1.bat` dosyasına çift tıklayın veya `go-webui-v1.ps1` kullanın.
|
||||||
|
|
||||||
#### Diğerleri
|
#### Diğerleri
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ veya WebUI'de manuel olarak sürüm değiştirin.
|
|||||||
|
|
||||||
#### Entegre Paket Kullanıcıları
|
#### Entegre Paket Kullanıcıları
|
||||||
|
|
||||||
`go-webui-v2.bat` dosyasına çift tıklayın veya `go-webui-v2.ps` kullanın, ardından çıkarım webui'sini `1-GPT-SoVITS-TTS/1C-inference` adresinde açın.
|
`go-webui-v2.bat` dosyasına çift tıklayın veya `go-webui-v2.ps1` kullanın, ardından çıkarım webui'sini `1-GPT-SoVITS-TTS/1C-inference` adresinde açın.
|
||||||
|
|
||||||
#### Diğerleri
|
#### Diğerleri
|
||||||
|
|
||||||
@ -330,4 +330,4 @@ python ./tools/asr/fasterwhisper_asr.py -i <girdi> -o <çıktı> -l <dil>
|
|||||||
|
|
||||||
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
|
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
|
||||||
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
|
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
|
||||||
</a>
|
</a>
|
||||||
|
|||||||
@ -33,3 +33,4 @@ ko_pron
|
|||||||
opencc; sys_platform != 'linux'
|
opencc; sys_platform != 'linux'
|
||||||
opencc==1.1.1; sys_platform == 'linux'
|
opencc==1.1.1; sys_platform == 'linux'
|
||||||
python_mecab_ko; sys_platform != 'win32'
|
python_mecab_ko; sys_platform != 'win32'
|
||||||
|
fastapi<0.112.2
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user