diff --git a/GPT_SoVITS/s2_train.py b/GPT_SoVITS/s2_train.py index 917f762..50cb9cb 100644 --- a/GPT_SoVITS/s2_train.py +++ b/GPT_SoVITS/s2_train.py @@ -306,7 +306,7 @@ def train_and_evaluate( y_lengths, text, text_lengths, - ) in enumerate(tqdm(train_loader,position=rank+1,leave=(epoch==hps.train.epochs),postfix=f'epoch:{epoch}',disable=(rank!=0))): + ) in enumerate(tqdm(train_loader,position=rank,leave=(epoch==hps.train.epochs),postfix=f'Epoch:{epoch},Rank:{rank}',delay=1)): if torch.cuda.is_available(): spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True diff --git a/GPT_SoVITS/utils.py b/GPT_SoVITS/utils.py index 177eda1..bbd8a6a 100644 --- a/GPT_SoVITS/utils.py +++ b/GPT_SoVITS/utils.py @@ -18,7 +18,7 @@ logging.getLogger("matplotlib").setLevel(logging.ERROR) MATPLOTLIB_FLAG = False -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) +# logging.basicConfig(stream=sys.stdout, level=logging.ERROR) logger = logging @@ -319,13 +319,13 @@ def check_git_hash(model_dir): def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.ERROR) + logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.ERROR) + h.setLevel(logging.DEBUG) h.setFormatter(formatter) logger.addHandler(h) return logger