diff --git a/finetune/accelerate_train_i2v.sh b/finetune/accelerate_train_i2v.sh index ec3922e..fe6280c 100644 --- a/finetune/accelerate_train_i2v.sh +++ b/finetune/accelerate_train_i2v.sh @@ -6,7 +6,7 @@ export TOKENIZERS_PARALLELISM=false # Model Configuration MODEL_ARGS=( --model_path "THUDM/CogVideoX1.5-5B-I2V" - --model_name "cogvideox1.5-i2v" + --model_name "cogvideox1.5-i2v" # ["cogvideox-i2v"] --model_type "i2v" --training_type "lora" ) @@ -23,7 +23,7 @@ DATA_ARGS=( --caption_column "prompt.txt" --video_column "videos.txt" --image_column "images.txt" - --train_resolution "80x768x1360" + --train_resolution "81x768x1360" ) # Training Configuration @@ -31,7 +31,7 @@ TRAIN_ARGS=( --train_epochs 10 --batch_size 1 --gradient_accumulation_steps 1 - --mixed_precision "bf16" + --mixed_precision "bf16" # ["no", "fp16"] --seed 42 ) @@ -55,7 +55,7 @@ VALIDATION_ARGS=( --validation_steps 400 --validation_prompts "prompts.txt" --validation_images "images.txt" - --gen_fps 15 + --gen_fps 16 ) # Combine all arguments and launch training diff --git a/finetune/accelerate_train_t2v.sh b/finetune/accelerate_train_t2v.sh index 0d2b7f6..ce2c2bd 100644 --- a/finetune/accelerate_train_t2v.sh +++ b/finetune/accelerate_train_t2v.sh @@ -6,7 +6,7 @@ export TOKENIZERS_PARALLELISM=false # Model Configuration MODEL_ARGS=( --model_path "THUDM/CogVideoX1.5-5B" - --model_name "cogvideox1.5-t2v" + --model_name "cogvideox1.5-t2v" # ["cogvideox-t2v"] --model_type "t2v" --training_type "lora" ) @@ -22,7 +22,7 @@ DATA_ARGS=( --data_root "/path/to/data/dir" --caption_column "prompt.txt" --video_column "videos.txt" - --train_resolution "80x768x1360" + --train_resolution "81x768x1360" ) # Training Configuration @@ -30,7 +30,7 @@ TRAIN_ARGS=( --train_epochs 10 --batch_size 1 --gradient_accumulation_steps 1 - --mixed_precision "bf16" + --mixed_precision "bf16" # ["no", "fp16"] --seed 42 ) @@ -53,7 +53,7 @@ VALIDATION_ARGS=( --validation_dir "/path/to/validation/dir" --validation_steps 400 --validation_prompts "prompts.txt" - --gen_fps 15 + --gen_fps 16 ) # Combine all arguments and launch training