diff --git a/README.md b/README.md index 958e314..c0eda03 100644 --- a/README.md +++ b/README.md @@ -197,8 +197,40 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. - [ ] better sovits base model (enhanced audio quality) - [ ] model mix +## (Optional) If you need, here will provide the command line operation mode +Use the command line to open the WebUI for UVR5 +``` +python tools/uvr5/webui.py "" +``` +If you can't open a browser, follow the format below for UVR processing,This is using mdxnet for audio processing +``` +python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision +``` +This is how the audio segmentation of the dataset is done using the command line +``` +python audio_slicer.py \ + --input_path "" \ + --output_root "" \ + --threshold \ + --min_length \ + --min_interval + --hop_size +``` +This is how dataset ASR processing is done using the command line(Only Chinese) +``` +python tools/damo_asr/cmd-asr.py "" +``` +ASR processing is performed through Faster_Whisper(ASR marking except Chinese) + +(No progress bars, GPU performance may cause time delays) +``` +python ./tools/damo_asr/WhisperASR.py -i -o -f -l +``` +A custom list save path is enabled ## Credits + + Special thanks to the following projects and contributors: - [ar-vits](https://github.com/innnky/ar-vits) diff --git a/requirements.txt b/requirements.txt index 4ab0e05..fae6198 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ gradio_client==0.8.1 ffmpeg-python onnxruntime tqdm -funasr>=1.0.0 +funasr==1.0.0 cn2an pypinyin pyopenjtalk @@ -24,3 +24,4 @@ psutil jieba_fast jieba LangSegment +Faster_Whisper \ No newline at end of file diff --git a/tools/damo_asr/WhisperASR.py b/tools/damo_asr/WhisperASR.py new file mode 100644 index 0000000..3b0a946 --- /dev/null +++ b/tools/damo_asr/WhisperASR.py @@ -0,0 +1,42 @@ +import os +import argparse +import os +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" +from glob import glob +from faster_whisper import WhisperModel + +def main(input_folder, output_folder, output_filename, language): + model = WhisperModel("large-v3", device="cuda", compute_type="float16") + + output_file = os.path.join(output_folder, output_filename) + if not os.path.exists(output_folder): + os.makedirs(output_folder) + + with open(output_file, 'w', encoding='utf-8') as f: + for file in glob(os.path.join(input_folder, '**/*.wav'), recursive=True): + segments, _ = model.transcribe(file, beam_size=10, vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=700), language=language) + segments = list(segments) + + filename = os.path.basename(file).replace('.wav', '') + directory = os.path.dirname(file) + + result_line = f"{file}|{language.upper()}|{segments[0].text}\n" + f.write(result_line) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input_folder", type=str, required=True, + help="Path to the folder containing WAV files.") + parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.") + parser.add_argument("-f", "--output_filename", type=str, default="transcriptions.txt", help="Name of the output text file.") + parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh', 'en', ...], + help="Language of the audio files.") + + cmd = parser.parse_args() + + input_folder = cmd.input_folder + output_folder = cmd.output_folder + output_filename = cmd.output_filename + language = cmd.language + main(input_folder, output_folder, output_filename, language) \ No newline at end of file