mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-06-24 21:39:17 +08:00
* Docker Auto-Build Workflow * Rename * Update * Fix Bugs * Disable Progress Bar When workflows triggered * Fix Wget * Fix Bugs * Fix Bugs * Update Wget * Update Workflows * Accelerate Docker Image Building * Fix Install.sh * Add Skip-Check For Action Runner * Fix Dockerfile * . * . * . * . * Delete File in Runner * Add Sort * Delete More Files * Delete More * . * . * . * Add Pre-Commit Hook Update Docker * Add Code Spell Check * [pre-commit.ci] trigger * [pre-commit.ci] trigger * [pre-commit.ci] trigger * Fix Bugs * . * Disable Progress Bar and Logs while using GitHub Actions * . * . * Fix Bugs * update conda * fix bugs * Fix Bugs * fix bugs * . * . * Quiet Installation * fix bugs * . * fix bug * . * Fix pre-commit.ci and Docker * fix bugs * . * Update Docker & Pre-Commit * fix bugs * Update Req * Update Req * Update OpenCC * update precommit * . * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Docs and fix bugs * Fix \ * Fix MacOS * . * test * . * Add Tag Alias * . * fix bugs * fix bugs * make image smaller * update pre-commit config * . * . * fix bugs * use miniconda * Fix Wrong Path * . * debug * debug * revert * Fix Bugs * Update Docs, Add Dict Auto Download in install.sh * update docker_build * Update Docs for Install.sh * update docker docs about architecture * Add Xcode-Commandline-Tool Installation * Update Docs 1. Add Missing VC17 2. Modufied the Order of FFmpeg Installation and Requirements Installation 3. Remove Duplicate FFmpeg * Fix Wrong Cuda Version * Update TESTED ENV * Add PYTHONNOUSERSITE(-s) * Fix Wrapper * Update install.sh For Robustness * Ignore .git * Preload CUDNN For Ctranslate2 * Remove Gradio Warnings * Update Colab * Fix OpenCC Problems * Update Win DLL Strategy * Fix Onnxruntime-gpu NVRTC Error * Fix Path Problems * Add Windows Packages Workflow * WIP * WIP * WIP * WIP * WIP * WIP * . * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * WIP * Fix Path * Fix Path * Enable Logging * Set 7-Zip compression level to maximum (-mx=9) * Use Multithread in ONNX Session * Fix Tag Bugs * Add Time * Add Time * Add Time * Compress More * Copy DLL to Solve VC Runtime DLL Missing Issues * Expose FFmpeg Errors, Copy Only Part of Visual C++ Runtime * Update build_windows_packages.ps1 * Update build_windows_packages.ps1 * Update build_windows_packages.ps1 * Update build_windows_packages.ps1 * WIP * WIP * WIP * Update build_windows_packages.ps1 * Update install.sh * Update build_windows_packages.ps1 * Update docker-publish.yaml * Update install.sh * Update Dockerfile * Update docker_build.sh * Update miniconda_install.sh * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update Colab-WebUI.ipynb * Update Colab-Inference.ipynb * Update docker-compose.yaml * 更新 build_windows_packages.ps1 * Update install.sh --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
144 lines
4.5 KiB
Python
144 lines
4.5 KiB
Python
import torch
|
|
import torch.utils.data
|
|
from librosa.filters import mel as librosa_mel_fn
|
|
|
|
MAX_WAV_VALUE = 32768.0
|
|
|
|
|
|
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
|
"""
|
|
PARAMS
|
|
------
|
|
C: compression factor
|
|
"""
|
|
return torch.log(torch.clamp(x, min=clip_val) * C)
|
|
|
|
|
|
def dynamic_range_decompression_torch(x, C=1):
|
|
"""
|
|
PARAMS
|
|
------
|
|
C: compression factor used to compress
|
|
"""
|
|
return torch.exp(x) / C
|
|
|
|
|
|
def spectral_normalize_torch(magnitudes):
|
|
output = dynamic_range_compression_torch(magnitudes)
|
|
return output
|
|
|
|
|
|
def spectral_de_normalize_torch(magnitudes):
|
|
output = dynamic_range_decompression_torch(magnitudes)
|
|
return output
|
|
|
|
|
|
mel_basis = {}
|
|
hann_window = {}
|
|
|
|
|
|
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
|
if torch.min(y) < -1.2:
|
|
print("min value is ", torch.min(y))
|
|
if torch.max(y) > 1.2:
|
|
print("max value is ", torch.max(y))
|
|
|
|
global hann_window
|
|
dtype_device = str(y.dtype) + "_" + str(y.device)
|
|
# wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
|
key = "%s-%s-%s-%s-%s" % (dtype_device, n_fft, sampling_rate, hop_size, win_size)
|
|
# if wnsize_dtype_device not in hann_window:
|
|
if key not in hann_window:
|
|
# hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
|
hann_window[key] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
|
|
|
y = torch.nn.functional.pad(
|
|
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
|
|
)
|
|
y = y.squeeze(1)
|
|
# spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
|
spec = torch.stft(
|
|
y,
|
|
n_fft,
|
|
hop_length=hop_size,
|
|
win_length=win_size,
|
|
window=hann_window[key],
|
|
center=center,
|
|
pad_mode="reflect",
|
|
normalized=False,
|
|
onesided=True,
|
|
return_complex=False,
|
|
)
|
|
|
|
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8)
|
|
return spec
|
|
|
|
|
|
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
|
global mel_basis
|
|
dtype_device = str(spec.dtype) + "_" + str(spec.device)
|
|
# fmax_dtype_device = str(fmax) + '_' + dtype_device
|
|
key = "%s-%s-%s-%s-%s-%s" % (dtype_device, n_fft, num_mels, sampling_rate, fmin, fmax)
|
|
# if fmax_dtype_device not in mel_basis:
|
|
if key not in mel_basis:
|
|
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
|
# mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
|
mel_basis[key] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
|
# spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
|
spec = torch.matmul(mel_basis[key], spec)
|
|
spec = spectral_normalize_torch(spec)
|
|
return spec
|
|
|
|
|
|
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
|
if torch.min(y) < -1.2:
|
|
print("min value is ", torch.min(y))
|
|
if torch.max(y) > 1.2:
|
|
print("max value is ", torch.max(y))
|
|
|
|
global mel_basis, hann_window
|
|
dtype_device = str(y.dtype) + "_" + str(y.device)
|
|
# fmax_dtype_device = str(fmax) + '_' + dtype_device
|
|
fmax_dtype_device = "%s-%s-%s-%s-%s-%s-%s-%s" % (
|
|
dtype_device,
|
|
n_fft,
|
|
num_mels,
|
|
sampling_rate,
|
|
hop_size,
|
|
win_size,
|
|
fmin,
|
|
fmax,
|
|
)
|
|
# wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
|
wnsize_dtype_device = fmax_dtype_device
|
|
if fmax_dtype_device not in mel_basis:
|
|
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
|
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
|
if wnsize_dtype_device not in hann_window:
|
|
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
|
|
|
y = torch.nn.functional.pad(
|
|
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
|
|
)
|
|
y = y.squeeze(1)
|
|
|
|
spec = torch.stft(
|
|
y,
|
|
n_fft,
|
|
hop_length=hop_size,
|
|
win_length=win_size,
|
|
window=hann_window[wnsize_dtype_device],
|
|
center=center,
|
|
pad_mode="reflect",
|
|
normalized=False,
|
|
onesided=True,
|
|
return_complex=False,
|
|
)
|
|
|
|
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8)
|
|
|
|
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
|
spec = spectral_normalize_torch(spec)
|
|
|
|
return spec
|