Merge branch 'main' into 添加推理页面的引导音频转写功能

This commit is contained in:
刘悦 2024-01-28 20:21:53 +08:00 committed by GitHub
commit 79be4a1ee4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 51 additions and 58 deletions

7
Docker/download.py Normal file
View File

@ -0,0 +1,7 @@
# Download moda ASR related models
from modelscope import snapshot_download
model_dir = snapshot_download('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
model_dir = snapshot_download('damo/speech_fsmn_vad_zh-cn-16k-common-pytorch')
model_dir = snapshot_download('damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch')

View File

@ -2,7 +2,7 @@
FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04
LABEL maintainer="breakstring@hotmail.com"
LABEL version="dev-20240123.03"
LABEL version="dev-20240127"
LABEL description="Docker image for GPT-SoVITS"
@ -18,27 +18,19 @@ RUN apt-get update && \
WORKDIR /workspace
COPY . /workspace
# install python packages
RUN pip install -r requirements.txt
# Download models
RUN chmod +x /workspace/Docker/download.sh && /workspace/Docker/download.sh
# 本应该从 requirements.txt 里面安装package但是由于funasr和modelscope的问题暂时先在后面手工安装依赖包吧
RUN pip install --no-cache-dir torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba psutil PyYAML
# 这里强制指定了modelscope和funasr的版本后面damo_asr的模型让它们自己下载
RUN pip install --no-cache-dir modelscope~=1.10.0 torchaudio sentencepiece funasr~=0.8.7
# Download moda ASR related
RUN python /workspace/Docker/download.py
# 先屏蔽掉,让容器里自己下载
# Clone damo_asr
#WORKDIR /workspace/tools/damo_asr/models
#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \
# (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull)
#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \
# (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull)
#RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \
# (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull)
# Download nltk realted
RUN python -m nltk.downloader averaged_perceptron_tagger
RUN python -m nltk.downloader cmudict
#RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c"
#WORKDIR /workspace
EXPOSE 9870
EXPOSE 9871

View File

@ -41,7 +41,7 @@ class Text2SemanticDataModule(LightningDataModule):
# pad_val=self.config['data']['pad_val'])
def train_dataloader(self):
batch_size = self.config["train"]["batch_size"]
batch_size = max(min(self.config["train"]["batch_size"],len(self._train_dataset)//4),1)#防止不保存
sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
return DataLoader(
self._train_dataset,

View File

@ -283,17 +283,26 @@ def nonen_get_bert_inf(text, language):
return bert
#i18n("不切"),i18n("凑五句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切")
splits = {"","","","",",",".","?","!","~",":","","","",}
def get_first(text):
pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
text = re.split(pattern, text)[0].strip()
return text
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,how_to_cut=i18n("不切")):
t0 = ttime()
prompt_text = prompt_text.strip("\n")
prompt_language, text = prompt_language, text.strip("\n")
if(prompt_text[-1]not in splits):prompt_text+=""if prompt_text!="en"else "."
text = text.strip("\n")
if(len(get_first(text))<4):text+=""if text!="en"else "."
zero_wav = np.zeros(
int(hps.data.sampling_rate * 0.3),
dtype=np.float16 if is_half == True else np.float32,
)
with torch.no_grad():
wav16k, sr = librosa.load(ref_wav_path, sr=16000)
if(wav16k.shape[0]>160000 or wav16k.shape[0]<48000):
raise OSError(i18n("参考音频在3~10秒范围外请更换"))
wav16k = torch.from_numpy(wav16k)
zero_wav_torch = torch.from_numpy(zero_wav)
if is_half == True:
@ -318,7 +327,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language)
else:
phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language)
if(how_to_cut==i18n("句一切")):text=cut1(text)
if(how_to_cut==i18n("句一切")):text=cut1(text)
elif(how_to_cut==i18n("凑50字一切")):text=cut2(text)
elif(how_to_cut==i18n("按中文句号。切")):text=cut3(text)
elif(how_to_cut==i18n("按英文句号.切")):text=cut4(text)
@ -390,24 +399,6 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
np.int16
)
splits = {
"",
"",
"",
"",
",",
".",
"?",
"!",
"~",
":",
"",
"",
"",
} # 不考虑省略号
def split(todo_text):
todo_text = todo_text.replace("……", "").replace("——", "")
if todo_text[-1] not in splits:
@ -430,7 +421,7 @@ def split(todo_text):
def cut1(inp):
inp = inp.strip("\n")
inps = split(inp)
split_idx = list(range(0, len(inps), 5))
split_idx = list(range(0, len(inps), 4))
split_idx[-1] = None
if len(split_idx) > 1:
opts = []
@ -444,7 +435,6 @@ def cut1(inp):
def cut2(inp):
inp = inp.strip("\n")
inps = split(inp)
# print(inps)
if len(inps) < 2:
return inp
opts = []
@ -517,6 +507,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
with gr.Row():
inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath")
whisper_button = gr.Button(i18n("faster_whisper转写音频内容到文本"))
inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频超过会报错"), type="filepath")
prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="")
prompt_language = gr.Dropdown(
label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文")
@ -530,7 +521,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
)
how_to_cut = gr.Radio(
label=i18n("怎么切"),
choices=[i18n("不切"),i18n("句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切"),],
choices=[i18n("不切"),i18n("句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切"),],
value=i18n("凑50字一切"),
interactive=True,
)
@ -546,7 +537,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
with gr.Row():
text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"),value="")
button1 = gr.Button(i18n("句一切"), variant="primary")
button1 = gr.Button(i18n("句一切"), variant="primary")
button2 = gr.Button(i18n("凑50字一切"), variant="primary")
button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
button4 = gr.Button(i18n("按英文句号.切"), variant="primary")

View File

@ -44,9 +44,8 @@ class my_model_ckpt(ModelCheckpoint):
self.config = config
def on_train_epoch_end(self, trainer, pl_module):
if not self._should_skip_saving_checkpoint(
trainer
) and self._should_save_on_train_epoch_end(trainer):
# if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer):
if self._should_save_on_train_epoch_end(trainer):
monitor_candidates = self._monitor_candidates(trainer)
if (
self._every_n_epochs >= 1

View File

@ -121,8 +121,9 @@ For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally)
### Using Docker
#### docker-compose.yaml configuration
#### docker-compose.yaml configuration
0. Regarding image tags: Due to rapid updates in the codebase and the slow process of packaging and testing images, please check [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) for the currently packaged latest images and select as per your situation, or alternatively, build locally using a Dockerfile according to your own needs.
1. Environment Variables
- is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation.
@ -140,7 +141,7 @@ docker compose -f "docker-compose.yaml" up -d
As above, modify the corresponding parameters based on your actual situation, then run the following command:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```

View File

@ -2,7 +2,7 @@ version: '3.8'
services:
gpt-sovits:
image: breakstring/gpt-sovits:dev-20240123.03
image: breakstring/gpt-sovits:xxxxx # please change the image name and tag base your environment
container_name: gpt-sovits-container
environment:
- is_half=False

View File

@ -110,7 +110,7 @@ brew install ffmpeg
### 在 Docker 中使用
#### docker-compose.yaml 设置
0. image的标签由于代码库更新很快镜像的打包和测试又很慢所以请自行在 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) 查看当前打包好的最新的镜像并根据自己的情况选用或者在本地根据您自己的需求通过Dockerfile进行构建。
1. 环境变量:
- is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时一般都是它引起的可以根据实际情况来调整为True或者False。
@ -129,7 +129,7 @@ docker compose -f "docker-compose.yaml" up -d
同上,根据您自己的实际情况修改对应的参数,然后运行如下命令:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```

View File

@ -106,8 +106,9 @@ brew install ffmpeg
### Dockerの使用
#### docker-compose.yamlの設定
#### docker-compose.yamlの設定
0. イメージのタグについて:コードベースの更新が速く、イメージのパッケージングとテストが遅いため、[Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) で現在パッケージされている最新のイメージをご覧になり、ご自身の状況に応じて選択するか、またはご自身のニーズに応じてDockerfileを使用してローカルで構築してください。
1. 環境変数:
- `is_half`:半精度/倍精度の制御。"SSL抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じてTrueまたはFalseに調整してください。
@ -124,7 +125,7 @@ docker compose -f "docker-compose.yaml" up -d
上記と同様に、実際の状況に基づいて対応するパラメータを変更し、次のコマンドを実行します:
```markdown
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```

View File

@ -16,7 +16,11 @@ if(os.path.exists(tmp)):
if(name=="jieba.cache"):continue
path="%s/%s"%(tmp,name)
delete=os.remove if os.path.isfile(path) else shutil.rmtree
delete(path)
try:
delete(path)
except Exception as e:
print(str(e))
pass
import site
site_packages_roots = []
for path in site.getsitepackages():
@ -26,7 +30,6 @@ if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages"
#os.environ["OPENBLAS_NUM_THREADS"] = "4"
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
os.environ["all_proxy"] = ""
for site_packages_root in site_packages_roots:
if os.path.exists(site_packages_root):
try:
@ -38,7 +41,6 @@ for site_packages_root in site_packages_roots:
break
except PermissionError:
pass
from tools import my_utils
import traceback
import shutil
@ -662,7 +664,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
with gr.Row():
if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True)
path_list = gr.Textbox(
label=i18n("打标数据标注文件路径"),
label=i18n(".list标注文件的路径"),
value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list",
interactive=True,
)
@ -688,7 +690,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
label=i18n("*训练集音频文件目录"),
# value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",
interactive=True,
placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名不是全路径")
placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名不是全路径")
)
gr.Markdown(value=i18n("1Aa-文本内容"))
with gr.Row():