mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-10-15 21:26:51 +08:00
.
This commit is contained in:
parent
4d8e5dce38
commit
0373aa97e0
25
.github/build_windows_packages.ps1
vendored
25
.github/build_windows_packages.ps1
vendored
@ -92,6 +92,30 @@ Get-ChildItem -Path $x64Path -Directory | Where-Object {
|
||||
}
|
||||
}
|
||||
|
||||
$ffmpegUrl = "https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-n7.1-latest-win64-gpl-shared-7.1.zip"
|
||||
$zipPath = Join-Path $tmpDir "ffmpeg.zip"
|
||||
|
||||
Invoke-WebRequest -Uri $ffmpegUrl -OutFile $zipPath
|
||||
|
||||
Expand-Archive -Path $zipPath -DestinationPath $tmpDir -Force
|
||||
|
||||
$extractedDir = Get-ChildItem -Path $tmpDir -Directory | Where-Object { $_.Name -match "^ffmpeg.*win64.*gpl.*shared" } | Select-Object -First 1
|
||||
|
||||
if (-not $extractedDir) {
|
||||
Write-Error "Can Not Find FFmpeg Folder"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$runtimeDir = "$srcDir\runtime"
|
||||
New-Item -ItemType Directory -Force -Path $runtimeDir | Out-Null
|
||||
|
||||
$src = Join-Path $extractedDir.FullName "bin"
|
||||
if (Test-Path $src) {
|
||||
Move-Item -Path (Join-Path $src '*') -Destination $runtimeDir -Force
|
||||
}
|
||||
|
||||
Write-Host "FFmpeg Downloaded and extracted to $runtimeDir"
|
||||
|
||||
function DownloadAndUnzip($url, $targetRelPath) {
|
||||
$filename = Split-Path $url -Leaf
|
||||
$tmpZip = "$tmpDir\$filename"
|
||||
@ -157,7 +181,6 @@ switch ($cuda) {
|
||||
}
|
||||
|
||||
Write-Host "[INFO] Installing dependencies..."
|
||||
& ".\runtime\python.exe" -m pip install --pre torchcodec --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
& ".\runtime\python.exe" -m pip install -r extra-req.txt --no-deps --no-warn-script-location
|
||||
& ".\runtime\python.exe" -m pip install -r requirements.txt --no-warn-script-location
|
||||
|
||||
|
@ -7,6 +7,10 @@ from .structs import T2SRequest, T2SResult
|
||||
from .t2s_engine import T2SEngine as T2SEngineTorch
|
||||
|
||||
torch.set_grad_enabled(False)
|
||||
torch.backends.cudnn.benchmark = True
|
||||
torch.backends.cudnn.enabled = True
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
|
||||
backends = ["torch_varlen"]
|
||||
if torch.cuda.is_available():
|
||||
|
@ -184,7 +184,7 @@ class T2SEngine(T2SEngineProtocol):
|
||||
case "mtia":
|
||||
torch.mtia.empty_cache()
|
||||
case "cpu":
|
||||
gc.collect()
|
||||
gc.collect(1)
|
||||
|
||||
if request.use_cuda_graph and self.graphcache.is_applicable:
|
||||
self.graphcache.release_graph(session)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
import contextlib
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@ -966,10 +967,11 @@ def get_tts_wav(
|
||||
gr.Info(f"{infer_speed_avg:.2f} Token/s", title="Infer Speed")
|
||||
gr.Info(f"{rtf_value:.2f}", title="RTF")
|
||||
|
||||
yield opt_sr, (audio_opt_n * 32767).astype(np.int16)
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
yield opt_sr, (audio_opt_n * 32767).astype(np.int16)
|
||||
gc.collect()
|
||||
|
||||
|
||||
def split(todo_text):
|
||||
|
@ -91,7 +91,7 @@ class G2PWOnnxConverter:
|
||||
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
|
||||
sess_options.intra_op_num_threads = 2 if torch.cuda.is_available() else 0
|
||||
if "CUDAExecutionProvider" in onnxruntime.get_available_providers():
|
||||
if "CUDAExecutionProvider" in onnxruntime.get_available_providers() and torch.cuda.is_available():
|
||||
self.session_g2pW = onnxruntime.InferenceSession(
|
||||
os.path.join(uncompress_path, "g2pW.onnx"),
|
||||
sess_options=sess_options,
|
||||
|
13
README.md
13
README.md
@ -45,12 +45,13 @@ Unseen speakers few-shot fine-tuning demo:
|
||||
|
||||
## Infer Speed
|
||||
|
||||
| Device | RTF | Batch Size | Backend |
|
||||
| ----------- | ----- | ---------- | --------------------------- |
|
||||
| RTX 5090 | 0.05 | 1 | Flash Attn Varlen CUDAGraph |
|
||||
| Apple M4 | 0.21 | 1 | MLX Quantized Affined |
|
||||
| RTX 4090 | 0.014 | 24 | Flash Attn Varlen CUDAGraph |
|
||||
| RTX 4060 Ti | 0.028 | 28 | Flash Attn Varlen CUDAGraph |
|
||||
| Device | RTF | TTFB | Batch Size | Backend |
|
||||
| ----------- | ----- | ------ | ---------- | --------------------------- |
|
||||
| RTX 5090 | 0.05 | 150 ms | 1 | Flash Attn Varlen CUDAGraph |
|
||||
| RTX 4090 | 0.014 | UNK | 24 | Flash Attn Varlen CUDAGraph |
|
||||
| RTX 4060 Ti | 0.07 | 460 ms | 1 | Flash Attn Varlen CUDAGraph |
|
||||
| RTX 4060 Ti | 0.028 | UNK | 28 | Flash Attn Varlen CUDAGraph |
|
||||
| Apple M4 | 0.21 | | 1 | MLX Quantized Affined |
|
||||
|
||||
**User guide: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
|
||||
|
||||
|
@ -2,5 +2,5 @@ set "SCRIPT_DIR=%~dp0"
|
||||
set "SCRIPT_DIR=%SCRIPT_DIR:~0,-1%"
|
||||
cd /d "%SCRIPT_DIR%"
|
||||
set "PATH=%SCRIPT_DIR%\runtime"
|
||||
runtime\python.exe -I webui.py zh_CN
|
||||
runtime\python.exe -s webui.py zh_CN
|
||||
pause
|
||||
|
@ -3,5 +3,5 @@ chcp 65001
|
||||
Set-Location $PSScriptRoot
|
||||
$runtimePath = Join-Path $PSScriptRoot "runtime"
|
||||
$env:PATH = "$runtimePath"
|
||||
& "$runtimePath\python.exe" -I "$PSScriptRoot\webui.py" zh_CN
|
||||
& "$runtimePath\python.exe" -s "$PSScriptRoot\webui.py" zh_CN
|
||||
pause
|
||||
|
@ -255,7 +255,6 @@ switch ($Device) {
|
||||
Write-Success "PyTorch Installed"
|
||||
|
||||
Write-Info "Installing Python Dependencies From requirements.txt..."
|
||||
Invoke-Pip --pre torchcodec --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
Invoke-Pip -r extra-req.txt --no-deps
|
||||
Invoke-Pip -r requirements.txt
|
||||
Write-Success "Python Dependencies Installed"
|
||||
|
@ -13,6 +13,7 @@ peft
|
||||
py-cpuinfo
|
||||
pypinyin
|
||||
split-lang
|
||||
torchaudio
|
||||
torchcodec
|
||||
transformers
|
||||
tensorboard
|
||||
|
Loading…
x
Reference in New Issue
Block a user