mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-10-08 16:00:01 +08:00
恢复make batch的位置
This commit is contained in:
parent
3bfb20763d
commit
6591e86df3
@ -598,30 +598,7 @@ class TTS:
|
|||||||
tuple[int, np.ndarray]: sampling rate and audio data.
|
tuple[int, np.ndarray]: sampling rate and audio data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def make_batch(batch_texts):
|
|
||||||
batch_data = []
|
|
||||||
print(i18n("############ 提取文本Bert特征 ############"))
|
|
||||||
for text in tqdm(batch_texts):
|
|
||||||
phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(text, text_lang)
|
|
||||||
if phones is None:
|
|
||||||
continue
|
|
||||||
res={
|
|
||||||
"phones": phones,
|
|
||||||
"bert_features": bert_features,
|
|
||||||
"norm_text": norm_text,
|
|
||||||
}
|
|
||||||
batch_data.append(res)
|
|
||||||
if len(batch_data) == 0:
|
|
||||||
return None
|
|
||||||
batch, _ = self.to_batch(batch_data,
|
|
||||||
prompt_data=self.prompt_cache if not no_prompt_text else None,
|
|
||||||
batch_size=batch_size,
|
|
||||||
threshold=batch_threshold,
|
|
||||||
split_bucket=False,
|
|
||||||
device=self.configs.device,
|
|
||||||
precision=self.precision
|
|
||||||
)
|
|
||||||
return batch[0]
|
|
||||||
|
|
||||||
# 直接给全体套一个torch.no_grad()
|
# 直接给全体套一个torch.no_grad()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
@ -720,6 +697,30 @@ class TTS:
|
|||||||
data.append([])
|
data.append([])
|
||||||
data[-1].append(texts[i])
|
data[-1].append(texts[i])
|
||||||
|
|
||||||
|
def make_batch(batch_texts):
|
||||||
|
batch_data = []
|
||||||
|
print(i18n("############ 提取文本Bert特征 ############"))
|
||||||
|
for text in tqdm(batch_texts):
|
||||||
|
phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(text, text_lang)
|
||||||
|
if phones is None:
|
||||||
|
continue
|
||||||
|
res={
|
||||||
|
"phones": phones,
|
||||||
|
"bert_features": bert_features,
|
||||||
|
"norm_text": norm_text,
|
||||||
|
}
|
||||||
|
batch_data.append(res)
|
||||||
|
if len(batch_data) == 0:
|
||||||
|
return None
|
||||||
|
batch, _ = self.to_batch(batch_data,
|
||||||
|
prompt_data=self.prompt_cache if not no_prompt_text else None,
|
||||||
|
batch_size=batch_size,
|
||||||
|
threshold=batch_threshold,
|
||||||
|
split_bucket=False,
|
||||||
|
device=self.configs.device,
|
||||||
|
precision=self.precision
|
||||||
|
)
|
||||||
|
return batch[0]
|
||||||
|
|
||||||
|
|
||||||
t2 = ttime()
|
t2 = ttime()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user