Compare commits

...

13 Commits

Author SHA1 Message Date
Jacky He
4ad7a0d1c5
Merge 485cb85a552c5bd5b6387569da9b1727332e940e into c767f0b83b998e996a4d230d86da575a03f54a3f 2026-01-08 00:49:31 +01:00
ChasonJiang
c767f0b83b
修复bug (#2704)
* 修复bug

* fallbak and bug fix
2025-12-30 16:00:21 +08:00
ChasonJiang
9080a967d5
修复采样错误 (#2703) 2025-12-30 15:21:03 +08:00
sushistack
51df9f7384
Fix model file name in README instructions (#2700) 2025-12-25 16:44:21 +08:00
ChasonJiang
bfca0f6b2d
对齐naive_infer的解码策略,防止吞句 (#2697) 2025-12-19 17:37:19 +08:00
ChasonJiang
abe984395c
对齐gpt topk默认采样参数 (#2696) 2025-12-19 16:05:36 +08:00
RVC-Boss
cc89c3660e
Update requirements.txt 2025-12-19 15:54:54 +08:00
Jacky He
485cb85a55 fix: determine whether the filename was input or only path is input 2025-08-12 14:03:15 +08:00
Jacky He
baf61b61d8 fix: wrong args name 2025-08-12 12:06:39 +08:00
Jacky He
7263f02706 feat: add optional params 2025-08-11 11:34:08 +08:00
Jacky He
013ed62a78 chore: add new supported language 2025-08-11 10:51:43 +08:00
Jacky He
035964ae78 refactor: better param input
change text_path to text direct input and add model smart selector
2025-08-11 10:33:17 +08:00
Jacky He
5285786b57 fix: when text and word2ph is empty, get_bert_feature will not crash but return an empty tensor 2025-08-09 18:26:25 +08:00
8 changed files with 150 additions and 62 deletions

View File

@ -707,10 +707,12 @@ class Text2SemanticDecoder(nn.Module):
if idx == 0:
attn_mask = F.pad(attn_mask[:, :, -1].unsqueeze(-2), (0, 1), value=False)
logits = logits[:, :-1]
else:
attn_mask = F.pad(attn_mask, (0, 1), value=False)
if idx < 11: ###至少预测出10个token不然不给停止0.4s
logits = logits[:, :-1]
samples = sample(
logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
)[0]

View File

@ -1008,7 +1008,7 @@ class TTS:
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
"prompt_text": "", # str.(optional) prompt text for the reference audio
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
"top_k": 5, # int. top k sampling
"top_k": 15, # int. top k sampling
"top_p": 1, # float. top p sampling
"temperature": 1, # float. temperature for sampling
"text_split_method": "cut1", # str. text split method, see text_segmentation_method.py for details.
@ -1039,7 +1039,7 @@ class TTS:
aux_ref_audio_paths: list = inputs.get("aux_ref_audio_paths", [])
prompt_text: str = inputs.get("prompt_text", "")
prompt_lang: str = inputs.get("prompt_lang", "")
top_k: int = inputs.get("top_k", 5)
top_k: int = inputs.get("top_k", 15)
top_p: float = inputs.get("top_p", 1)
temperature: float = inputs.get("temperature", 1)
text_split_method: str = inputs.get("text_split_method", "cut1")

View File

@ -7,79 +7,163 @@ from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights
i18n = I18nAuto()
LANGUAGE_CHOICES = ["中文", "英文", "日文", "韩文", "粤语"]
MIXED_LANGUAGE_CHOICES = ["中英混合", "日英混合", "粤英混合", "韩英混合", "多语种混合"]
def synthesize(
GPT_model_path,
SoVITS_model_path,
ref_audio_path,
ref_text_path,
ref_language,
target_text_path,
target_language,
output_path,
):
# Read reference text
with open(ref_text_path, "r", encoding="utf-8") as file:
ref_text = file.read()
SLICE_METHOD_CHOICES = ["凑四句一切", "凑50字一切", "按标点符号切", "按中文句号。切", "按英文句号.切"]
# Read target text
with open(target_text_path, "r", encoding="utf-8") as file:
target_text = file.read()
def synthesize(args: argparse.Namespace):
# Change model weights
change_gpt_weights(gpt_path=GPT_model_path)
change_sovits_weights(sovits_path=SoVITS_model_path)
change_gpt_weights(gpt_path=args.gpt_path)
change_sovits_weights(sovits_path=args.sovits_path)
params = {
"ref_wav_path": args.ref_audio,
"prompt_text": args.ref_text,
"prompt_language": i18n(args.ref_language),
"text": args.target_text,
"text_language": i18n(args.target_language),
}
# region - optional params
if args.slicer: params["how_to_cut"] = i18n(args.slicer)
if args.top_k: params["top_k"] = args.top_k
if args.top_p: params["top_p"] = args.top_p
if args.temperature: params["temperature"] = args.temperature
if args.ref_free: params["ref_free"] = args.ref_free
if args.speed: params["speed"] = args.speed
if args.if_freeze: params["if_freeze"] = args.if_freeze
if args.inp_refs: params["inp_refs"] = args.inp_refs
if args.sample_steps: params["sample_steps"] = args.sample_steps
if args.if_sr: params["if_sr"] = args.if_sr
if args.pause_second: params["pause_second"] = args.pause_second
# endregion - optional params
# Synthesize audio
synthesis_result = get_tts_wav(
ref_wav_path=ref_audio_path,
prompt_text=ref_text,
prompt_language=i18n(ref_language),
text=target_text,
text_language=i18n(target_language),
top_p=1,
temperature=1,
)
synthesis_result = get_tts_wav(**params)
result_list = list(synthesis_result)
if result_list:
os.makedirs(args.output_path, exist_ok=True) # Create output directory if it doesn't exist
if args.output_path.endswith(".wav"):
output_wav_path = args.output_path
else:
output_wav_path = os.path.join(args.output_path, "output.wav")
last_sampling_rate, last_audio_data = result_list[-1]
output_wav_path = os.path.join(output_path, "output.wav")
sf.write(output_wav_path, last_audio_data, last_sampling_rate)
print(f"Audio saved to {output_wav_path}")
def main():
def build_parser():
parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool")
parser.add_argument("--gpt_model", required=True, help="Path to the GPT model file")
parser.add_argument("--sovits_model", required=True, help="Path to the SoVITS model file")
# reference settings
parser.add_argument("--ref_audio", required=True, help="Path to the reference audio file")
parser.add_argument("--ref_text", required=True, help="Path to the reference text file")
parser.add_argument(
"--ref_language", required=True, choices=["中文", "英文", "日文"], help="Language of the reference audio"
)
parser.add_argument("--target_text", required=True, help="Path to the target text file")
parser.add_argument(
"--target_language",
required=True,
choices=["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"],
help="Language of the target text",
)
parser.add_argument("--ref_text", required=True, help="Transcript of the reference audio")
parser.add_argument("--ref_language", required=True,
choices=LANGUAGE_CHOICES, help="Language of the reference audio")
# output settings
parser.add_argument("--target_text", required=True, help="Text to be synthesized")
parser.add_argument("--target_language", required=True,
choices=LANGUAGE_CHOICES+MIXED_LANGUAGE_CHOICES,
help="Language of the target text")
parser.add_argument("--slicer", required=False,
choices=SLICE_METHOD_CHOICES, help="Slicer method")
parser.add_argument("--output_path", required=True, help="Path to the output directory")
# region - inference settings
parser.add_argument("--top_k", required=False, type=int, help="Top-k value")
parser.add_argument("--top_p", required=False, type=float, help="Top-p value")
parser.add_argument("--temperature", required=False, type=float, help="Temperature value")
parser.add_argument("--ref_free", required=False, type=bool, help="Reference free value")
parser.add_argument("--speed", required=False, type=float, help="Speed value")
parser.add_argument("--if_freeze", required=False, type=bool, help="If freeze value")
parser.add_argument("--inp_refs", required=False, type=str, help="Input references")
parser.add_argument("--sample_steps", required=False, type=int, help="Sample steps value")
parser.add_argument("--if_sr", required=False, type=bool, help="If super resolution value")
parser.add_argument("--pause_second", required=False, type=float, help="Pause second value")
# endregion - inference settings
# region - model selection
sub = parser.add_subparsers(dest="mode", required=True)
# Mode 1: provide model paths directly
p_paths = sub.add_parser("paths", help="Use explicit model file paths")
p_paths.add_argument("--gpt_path", required=True, help="Path to the GPT model file")
p_paths.add_argument("--sovits_path", required=True, help="Path to the SoVITS model file")
# Mode 2: select by experiment/version
p_sel = sub.add_parser("select", help="Select models by experiment/version")
p_sel.add_argument("--exp_name", required=True, help="Experiment name")
available_gpt_versions = ["v1", "v2", "v2Pro", "v2ProPlus", "v3", "v4"]
p_sel.add_argument("--gpt_version", required=True, choices=available_gpt_versions, help="Version of the GPT model")
available_sovits_versions = ["v1", "v2", "v2Pro", "v2ProPlus", "v3", "v4"]
p_sel.add_argument("--sovits_version", required=True, choices=available_sovits_versions, help="Version of the SoVITS model")
p_sel.add_argument("--gpt_epoch", type=int, help="Epoch of the GPT model")
p_sel.add_argument("--sovits_epoch", type=int, help="Epoch of the SoVITS model")
# endregion - model selection
return parser
def get_model_path(args)->argparse.Namespace:
"""
Get the model path from exp_name, version and epoch
Args:
args: argparse.Namespace
Returns:
args: argparse.Namespace
"""
exist_gpt_path = []
exist_sovits_path = []
def _get_model_dir(model_type, version):
if version == "v1":
return f"{model_type}_weights"
else:
return f"{model_type}_weights_{version}"
# get all the model paths with the same exp_name
for files in os.listdir(_get_model_dir("GPT", args.gpt_version)):
if args.exp_name in files and files.endswith(".ckpt"):
exist_gpt_path.append(os.path.join(_get_model_dir("GPT", args.gpt_version), files))
for files in os.listdir(_get_model_dir("SoVITS", args.sovits_version)):
if args.exp_name in files and files.endswith(".pth"):
exist_sovits_path.append(os.path.join(_get_model_dir("SoVITS", args.sovits_version), files))
# get the largest epoch if not specified
if args.gpt_epoch:
args.gpt_path = [i for i in exist_gpt_path if f"e{args.gpt_epoch}" in i]
else:
args.gpt_path = sorted(exist_gpt_path)[-1]
if args.sovits_epoch:
args.sovits_path = [i for i in exist_sovits_path if f"e{args.sovits_epoch}" in i]
else:
args.sovits_path = sorted(exist_sovits_path)[-1]
if not args.gpt_path or not args.sovits_path:
raise ValueError("No model found")
return args
def main():
parser = build_parser()
args = parser.parse_args()
synthesize(
args.gpt_model,
args.sovits_model,
args.ref_audio,
args.ref_text,
args.ref_language,
args.target_text,
args.target_language,
args.output_path,
)
print(args)
if args.mode == "select":
args = get_model_path(args)
args.target_text = args.target_text.replace("'", "").replace('"', "")
synthesize(args)
if __name__ == "__main__":

View File

@ -180,6 +180,8 @@ def get_bert_feature(text, word2ph):
for i in range(len(word2ph)):
repeat_feature = res[i].repeat(word2ph[i], 1)
phone_level_feature.append(repeat_feature)
if len(phone_level_feature) == 0:
return torch.empty((res.shape[1], 0), dtype=res.dtype, device=res.device)
phone_level_feature = torch.cat(phone_level_feature, dim=0)
return phone_level_feature.T

View File

@ -385,7 +385,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css
minimum=0.6, maximum=1.65, step=0.05, label="语速", value=1.0, interactive=True
)
with gr.Row():
top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=5, interactive=True)
top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True)
top_p = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True)
with gr.Row():
temperature = gr.Slider(

View File

@ -347,7 +347,7 @@ Use v4 from v1/v2/v3 environment:
2. Clone the latest codes from github.
3. Download v4 pretrained models (gsv-v4-pretrained/s2v4.ckpt, and gsv-v4-pretrained/vocoder.pth) from [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) and put them into `GPT_SoVITS/pretrained_models`.
3. Download v4 pretrained models (gsv-v4-pretrained/s2v4.pth, and gsv-v4-pretrained/vocoder.pth) from [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) and put them into `GPT_SoVITS/pretrained_models`.
## V2Pro Release Notes

View File

@ -27,7 +27,7 @@ POST:
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
"prompt_text": "", # str.(optional) prompt text for the reference audio
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
"top_k": 5, # int. top k sampling
"top_k": 15, # int. top k sampling
"top_p": 1, # float. top p sampling
"temperature": 1, # float. temperature for sampling
"text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
@ -158,7 +158,7 @@ class TTS_Request(BaseModel):
aux_ref_audio_paths: list = None
prompt_lang: str = None
prompt_text: str = ""
top_k: int = 5
top_k: int = 15
top_p: float = 1
temperature: float = 1
text_split_method: str = "cut5"
@ -355,7 +355,7 @@ async def tts_handle(req: dict):
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
"prompt_text": "", # str.(optional) prompt text for the reference audio
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
"top_k": 5, # int. top k sampling
"top_k": 15, # int. top k sampling
"top_p": 1, # float. top p sampling
"temperature": 1, # float. temperature for sampling
"text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
@ -460,7 +460,7 @@ async def tts_get_endpoint(
aux_ref_audio_paths: list = None,
prompt_lang: str = None,
prompt_text: str = "",
top_k: int = 5,
top_k: int = 15,
top_p: float = 1,
temperature: float = 1,
text_split_method: str = "cut5",

View File

@ -19,7 +19,7 @@ torchaudio
modelscope
sentencepiece
transformers>=4.43,<=4.50
peft
peft<0.18.0
chardet
PyYAML
psutil