Add Docker Build pwsh in windows

This commit is contained in:
XXXXRT666 2025-05-27 00:23:44 +08:00
parent 6a2ab63e18
commit 89438d6001
4 changed files with 95 additions and 24 deletions

73
docker_build.ps1 Normal file
View File

@ -0,0 +1,73 @@
$ErrorActionPreference = "Stop"
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
Set-Location $ScriptDir
if (-not (Get-Command "docker" -ErrorAction SilentlyContinue)) {
Write-Host "Docker Not Found"
exit 1
}
$Lite = $false
$CudaVersion = "12.6"
function Write-Help {
Write-Host @"
Usage: powershell -File docker_build.ps1 [OPTIONS]
Options:
--cuda 12.6|12.8 Specify the CUDA VERSION (REQUIRED)
--lite Build a Lite Image
-h, --help Show this help message and exit
Examples:
powershell -File docker_build.ps1 --cuda 12.6 --lite
"@
}
if ($args.Count -eq 0) {
Write-Help
exit 0
}
for ($i = 0; $i -lt $args.Count; $i++) {
switch ($args[$i]) {
'--cuda' {
$i++
$val = $args[$i]
if ($val -ne "12.6" -and $val -ne "12.8") {
Write-Host "Error: Invalid CUDA_VERSION: $val"
Write-Host "Choose From: [12.6, 12.8]"
exit 1
}
$CudaVersion = $val
}
'--lite' {
$Lite = $true
}
'-h' { Write-Help; exit 0 }
'--help' { Write-Help; exit 0 }
default {
Write-Host "Unknown Argument: $($args[$i])"
Write-Host "Use -h or --help to see available options."
exit 1
}
}
}
$arch = (Get-CimInstance Win32_Processor).Architecture
$TargetPlatform = if ($arch -eq 9) { "linux/amd64" } else { "linux/arm64" }
if ($Lite) {
$TorchBase = "lite"
} else {
$TorchBase = "full"
}
docker build `
--build-arg CUDA_VERSION=$CudaVersion `
--build-arg LITE=$Lite `
--build-arg TARGETPLATFORM=$TargetPlatform `
--build-arg TORCH_BASE=$TorchBase `
-t "$env:USERNAME/gpt-sovits:local" `
.

View File

@ -25,7 +25,7 @@ print_help() {
echo " -h, --help Show this help message and exit"
echo ""
echo "Examples:"
echo " bash docker_build.sh --cuda 12.6 --funasr --faster-whisper"
echo " bash docker_build.sh --cuda 12.6"
}
# Show help if no arguments provided

View File

@ -34,8 +34,8 @@ print_help() {
echo " -h, --help Show this help message and exit"
echo ""
echo "Examples:"
echo " bash install.sh --source HF --download-uvr5"
echo " bash install.sh --source ModelScope"
echo " bash install.sh --device CU128 --source HF --download-uvr5"
echo " bash install.sh --device MPS --source ModelScope"
}
# Show help if no arguments provided
@ -149,7 +149,6 @@ else
echo "InstallingPlease Wait..."
fi
done
conda install -c conda-forge -q -y
fi
echo "Installing ffmpeg and cmake..."

View File

@ -88,7 +88,7 @@ class Subfix:
@property
def max_index(self):
return len(self.transcriptions_list)
return len(self.transcriptions_list) - 1
def load_list(self, list_path: str):
with open(list_path, mode="r", encoding="utf-8") as f:
@ -126,7 +126,7 @@ class Subfix:
checkboxs = []
with LOCK:
for i in range(index, index + self.batch_size):
if i <= self.max_index - 1:
if i <= self.max_index:
audios.append(gr.Audio(value=self.transcriptions_list[i][0]))
texts.append(gr.Textbox(value=self.transcriptions_list[i][3], label=self.i18n("Text") + f" {i}"))
languages.append(gr.Dropdown(value=self.transcriptions_list[i][2]))
@ -140,10 +140,8 @@ class Subfix:
def next_page(self, index: int):
batch_size = self.batch_size
max_index = self.max_index - batch_size
if max_index <= 0:
max_index = 1
index = min(index + batch_size, max_index - 1)
max_index = max(self.max_index - batch_size + 1, 0)
index = min(index + batch_size, max_index)
return gr.Slider(value=index), *self.change_index(index)
def previous_page(self, index: int):
@ -153,7 +151,7 @@ class Subfix:
def delete_audio(self, index, *selected):
delete_index = [i + index for i, _ in enumerate(selected) if _]
delete_index = [i for i in delete_index if i < self.max_index - 1]
delete_index = [i for i in delete_index if i < self.max_index]
for idx in delete_index[::-1]:
self.transcriptions_list.pop(idx)
self.save_list()
@ -167,7 +165,8 @@ class Subfix:
languages = input[len(input) // 2 :]
if texts is None or languages is None:
raise ValueError()
for idx in range(index, min(index + batch_size, self.max_index - 1)):
print(index, min(index + batch_size, self.max_index))
for idx in range(index, min(index + batch_size, self.max_index + 1)):
self.transcriptions_list[idx][3] = texts[idx - index].strip().strip("\n")
self.transcriptions_list[idx][2] = languages[idx - index]
result = self.save_list()
@ -178,7 +177,7 @@ class Subfix:
def merge_audio(self, index, *selected):
batch_size = self.batch_size
merge_index = [i + index for i, _ in enumerate(selected) if _]
merge_index = [i for i in merge_index if i < self.max_index - 1]
merge_index = [i for i in merge_index if i < self.max_index]
if len(merge_index) < 2:
return *(gr.skip() for _ in range(batch_size * 3 + 1)), *(gr.Checkbox(False) for _ in range(batch_size))
else:
@ -211,7 +210,7 @@ class Subfix:
self.batch_size = batch_size
for i in range(index, index + batch_size):
with gr.Row(equal_height=True):
if i <= self.max_index - 1:
if i <= self.max_index:
with gr.Column(scale=2, min_width=160):
textbox_tmp = gr.Textbox(
value=self.transcriptions_list[i][3],
@ -281,7 +280,7 @@ class Subfix:
self.selections.append(selection_tmp)
with gr.Row(equal_height=True):
with gr.Column(scale=2, min_width=160):
self.close_button = gr.Button(value=i18n("关闭打标WebUI"), variant="stop")
self.close_button = gr.Button(value=i18n("保存并关闭打标WebUI"), variant="stop")
with gr.Column(scale=1, min_width=160):
self.previous_index_button2 = gr.Button(value=i18n("上一页"))
with gr.Column(scale=1, min_width=160):
@ -507,12 +506,12 @@ def main(list_path: str = "", i18n_lang="Auto", port=9871, share=False):
with gr.Blocks(analytics_enabled=False) as app:
subfix = Subfix(I18nAuto(i18n_lang))
subfix.render(list_path=list_path)
if subfix.max_index > 0:
if subfix.max_index >= 0:
timer = gr.Timer(0.1)
timer.tick(
fn=lambda: (
gr.Slider(value=0, maximum=subfix.max_index),
gr.Slider(value=0, maximum=subfix.max_index, step=1),
gr.Slider(value=10),
gr.Timer(active=False),
),