From 8a8144e1e4f430995d8303893ffc59dccdec8b45 Mon Sep 17 00:00:00 2001 From: chenxwh Date: Sat, 21 Sep 2024 14:26:12 +0000 Subject: [PATCH 01/15] replicate --- cog.yaml | 36 ++++++++++++++++++++++ predict.py | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 cog.yaml create mode 100644 predict.py diff --git a/cog.yaml b/cog.yaml new file mode 100644 index 0000000..47824ea --- /dev/null +++ b/cog.yaml @@ -0,0 +1,36 @@ +# Configuration for Cog ⚙️ +# Reference: https://cog.run/yaml + +build: + # set to true if your model requires a GPU + gpu: true + + # a list of ubuntu apt packages to install + system_packages: + - "libgl1-mesa-glx" + - "libglib2.0-0" + + # python version in the form '3.11' or '3.11.4' + python_version: "3.11" + + # a list of packages in the format == + python_packages: + - diffusers>=0.30.3 + - accelerate>=0.34.2 + - transformers>=4.44.2 + - numpy==1.26.0 + - torch>=2.4.0 + - torchvision>=0.19.0 + - sentencepiece>=0.2.0 + - SwissArmyTransformer>=0.4.12 + - imageio>=2.35.1 + - imageio-ffmpeg>=0.5.1 + - openai>=1.45.0 + - moviepy>=1.0.3 + - pillow==9.5.0 + - pydantic==1.10.7 + run: + - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.2/pget_linux_x86_64" && chmod +x /usr/local/bin/pget + +# predict.py defines how predictions are run on your model +predict: "predict.py:Predictor" diff --git a/predict.py b/predict.py new file mode 100644 index 0000000..cadeee2 --- /dev/null +++ b/predict.py @@ -0,0 +1,87 @@ +# Prediction interface for Cog ⚙️ +# https://cog.run/python + +import os +import subprocess +import time +import torch +from diffusers import CogVideoXPipeline +from diffusers.utils import export_to_video +from cog import BasePredictor, Input, Path + + +MODEL_CACHE = "model_cache" +MODEL_URL = ( + f"https://weights.replicate.delivery/default/THUDM/CogVideo/{MODEL_CACHE}.tar" +) +os.environ["HF_DATASETS_OFFLINE"] = "1" +os.environ["TRANSFORMERS_OFFLINE"] = "1" +os.environ["HF_HOME"] = MODEL_CACHE +os.environ["TORCH_HOME"] = MODEL_CACHE +os.environ["HF_DATASETS_CACHE"] = MODEL_CACHE +os.environ["TRANSFORMERS_CACHE"] = MODEL_CACHE +os.environ["HUGGINGFACE_HUB_CACHE"] = MODEL_CACHE + + +def download_weights(url, dest): + start = time.time() + print("downloading url: ", url) + print("downloading to: ", dest) + subprocess.check_call(["pget", "-x", url, dest], close_fds=False) + print("downloading took: ", time.time() - start) + + +class Predictor(BasePredictor): + def setup(self) -> None: + """Load the model into memory to make running multiple predictions efficient""" + + if not os.path.exists(MODEL_CACHE): + download_weights(MODEL_URL, MODEL_CACHE) + + # model_id: THUDM/CogVideoX-5b + self.pipe = CogVideoXPipeline.from_pretrained( + MODEL_CACHE, + torch_dtype=torch.bfloat16, + ).to("cuda") + + self.pipe.enable_model_cpu_offload() + self.pipe.vae.enable_tiling() + + def predict( + self, + prompt: str = Input( + description="Input prompt", + default="A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance.", + ), + num_inference_steps: int = Input( + description="Number of denoising steps", ge=1, le=500, default=50 + ), + guidance_scale: float = Input( + description="Scale for classifier-free guidance", ge=1, le=20, default=6 + ), + num_frames: int = Input( + description="Number of frames for the output video", default=49 + ), + seed: int = Input( + description="Random seed. Leave blank to randomize the seed", default=None + ), + ) -> Path: + """Run a single prediction on the model""" + + if seed is None: + seed = int.from_bytes(os.urandom(2), "big") + print(f"Using seed: {seed}") + + video = self.pipe( + prompt=prompt, + num_videos_per_prompt=1, + num_inference_steps=num_inference_steps, + num_frames=num_frames, + guidance_scale=guidance_scale, + generator=torch.Generator(device="cuda").manual_seed(seed), + ).frames[0] + + out_path = "/tmp/out.mp4" + + export_to_video(video, out_path, fps=8) + return Path(out_path) From bc514dc29e9bfce6625c5bb59b676262ef551808 Mon Sep 17 00:00:00 2001 From: chenxwh Date: Sat, 21 Sep 2024 14:32:12 +0000 Subject: [PATCH 02/15] replicate demo link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 802ad2f..2214dc3 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@

-Experience the CogVideoX-5B model online at 🤗 Huggingface Space or 🤖 ModelScope Space +Experience the CogVideoX-5B model online at 🤗 Huggingface Space or 🤖 ModelScope Space or Replicate

📚 View the paper and user guide From 4fb7ef15bf22713c20aa601f207445814550aa16 Mon Sep 17 00:00:00 2001 From: chenxwh Date: Sat, 21 Sep 2024 19:54:30 +0000 Subject: [PATCH 03/15] add i2v --- cog.yaml | 3 +- predict_i2v.py | 89 ++++++++++++++++++++++++++++++++++++ predict.py => predict_t2v.py | 0 3 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 predict_i2v.py rename predict.py => predict_t2v.py (100%) diff --git a/cog.yaml b/cog.yaml index 47824ea..2de2ddb 100644 --- a/cog.yaml +++ b/cog.yaml @@ -33,4 +33,5 @@ build: - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.2/pget_linux_x86_64" && chmod +x /usr/local/bin/pget # predict.py defines how predictions are run on your model -predict: "predict.py:Predictor" +predict: "predict_t2v.py:Predictor" +# predict: "predict_i2v.py:Predictor" diff --git a/predict_i2v.py b/predict_i2v.py new file mode 100644 index 0000000..5e45796 --- /dev/null +++ b/predict_i2v.py @@ -0,0 +1,89 @@ +# Prediction interface for Cog ⚙️ +# https://cog.run/python + +import os +import subprocess +import time +import torch +from diffusers import CogVideoXImageToVideoPipeline +from diffusers.utils import export_to_video, load_image +from cog import BasePredictor, Input, Path + + +MODEL_CACHE = "model_cache_i2v" +MODEL_URL = ( + f"https://weights.replicate.delivery/default/THUDM/CogVideo/{MODEL_CACHE}.tar" +) +os.environ["HF_DATASETS_OFFLINE"] = "1" +os.environ["TRANSFORMERS_OFFLINE"] = "1" +os.environ["HF_HOME"] = MODEL_CACHE +os.environ["TORCH_HOME"] = MODEL_CACHE +os.environ["HF_DATASETS_CACHE"] = MODEL_CACHE +os.environ["TRANSFORMERS_CACHE"] = MODEL_CACHE +os.environ["HUGGINGFACE_HUB_CACHE"] = MODEL_CACHE + + +def download_weights(url, dest): + start = time.time() + print("downloading url: ", url) + print("downloading to: ", dest) + subprocess.check_call(["pget", "-x", url, dest], close_fds=False) + print("downloading took: ", time.time() - start) + + +class Predictor(BasePredictor): + def setup(self) -> None: + """Load the model into memory to make running multiple predictions efficient""" + + if not os.path.exists(MODEL_CACHE): + download_weights(MODEL_URL, MODEL_CACHE) + + # model_id: THUDM/CogVideoX-5b-I2V + self.pipe = CogVideoXImageToVideoPipeline.from_pretrained( + MODEL_CACHE, torch_dtype=torch.bfloat16 + ).to("cuda") + + self.pipe.enable_model_cpu_offload() + self.pipe.vae.enable_tiling() + + def predict( + self, + prompt: str = Input( + description="Input prompt", default="Starry sky slowly rotating." + ), + image: Path = Input(description="Input image"), + num_inference_steps: int = Input( + description="Number of denoising steps", ge=1, le=500, default=50 + ), + guidance_scale: float = Input( + description="Scale for classifier-free guidance", ge=1, le=20, default=6 + ), + num_frames: int = Input( + description="Number of frames for the output video", default=49 + ), + seed: int = Input( + description="Random seed. Leave blank to randomize the seed", default=None + ), + ) -> Path: + """Run a single prediction on the model""" + + if seed is None: + seed = int.from_bytes(os.urandom(2), "big") + print(f"Using seed: {seed}") + + img = load_image(image=str(image)) + + video = self.pipe( + prompt=prompt, + image=img, + num_videos_per_prompt=1, + num_inference_steps=num_inference_steps, + num_frames=num_frames, + guidance_scale=guidance_scale, + generator=torch.Generator(device="cuda").manual_seed(seed), + ).frames[0] + + out_path = "/tmp/out.mp4" + + export_to_video(video, out_path, fps=8) + return Path(out_path) diff --git a/predict.py b/predict_t2v.py similarity index 100% rename from predict.py rename to predict_t2v.py From 3e5841dbbbf52e21771d7e348b2fd0018d530b2a Mon Sep 17 00:00:00 2001 From: sculmh <33420450+sculmh@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:44:27 +0800 Subject: [PATCH 04/15] fix: unexpected num_frames during v2v inference --- inference/cli_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/cli_demo.py b/inference/cli_demo.py index 9f5263e..6e1dbf3 100644 --- a/inference/cli_demo.py +++ b/inference/cli_demo.py @@ -133,7 +133,7 @@ def generate_video( video=video, # The path of the video to be used as the background of the video num_videos_per_prompt=num_videos_per_prompt, num_inference_steps=num_inference_steps, - num_frames=49, + # num_frames=49, use_dynamic_cfg=True, guidance_scale=guidance_scale, generator=torch.Generator().manual_seed(seed), # Set the seed for reproducibility From b459bd2b00f40570ef51611878ae0ae0a2bf66d8 Mon Sep 17 00:00:00 2001 From: sculmh <33420450+sculmh@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:54:59 +0800 Subject: [PATCH 05/15] Update cli_demo.py --- inference/cli_demo.py | 1 - 1 file changed, 1 deletion(-) diff --git a/inference/cli_demo.py b/inference/cli_demo.py index 6e1dbf3..c575feb 100644 --- a/inference/cli_demo.py +++ b/inference/cli_demo.py @@ -133,7 +133,6 @@ def generate_video( video=video, # The path of the video to be used as the background of the video num_videos_per_prompt=num_videos_per_prompt, num_inference_steps=num_inference_steps, - # num_frames=49, use_dynamic_cfg=True, guidance_scale=guidance_scale, generator=torch.Generator().manual_seed(seed), # Set the seed for reproducibility From f7f8c35d9f8ec060a3e170a03a84b347c60b6445 Mon Sep 17 00:00:00 2001 From: sculmh <33420450+sculmh@users.noreply.github.com> Date: Mon, 23 Sep 2024 19:32:02 +0800 Subject: [PATCH 06/15] Update cli_demo.py --- inference/cli_demo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/inference/cli_demo.py b/inference/cli_demo.py index c575feb..9f5263e 100644 --- a/inference/cli_demo.py +++ b/inference/cli_demo.py @@ -133,6 +133,7 @@ def generate_video( video=video, # The path of the video to be used as the background of the video num_videos_per_prompt=num_videos_per_prompt, num_inference_steps=num_inference_steps, + num_frames=49, use_dynamic_cfg=True, guidance_scale=guidance_scale, generator=torch.Generator().manual_seed(seed), # Set the seed for reproducibility From 51162346b697fc4de3689aa91f62d8a484c3690e Mon Sep 17 00:00:00 2001 From: sculmh <33420450+sculmh@users.noreply.github.com> Date: Mon, 23 Sep 2024 19:34:31 +0800 Subject: [PATCH 07/15] Update cli_demo.py --- inference/cli_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/cli_demo.py b/inference/cli_demo.py index 9f5263e..6e1dbf3 100644 --- a/inference/cli_demo.py +++ b/inference/cli_demo.py @@ -133,7 +133,7 @@ def generate_video( video=video, # The path of the video to be used as the background of the video num_videos_per_prompt=num_videos_per_prompt, num_inference_steps=num_inference_steps, - num_frames=49, + # num_frames=49, use_dynamic_cfg=True, guidance_scale=guidance_scale, generator=torch.Generator().manual_seed(seed), # Set the seed for reproducibility From 11e0cce3c6b245a15c7df35abbf19310da445086 Mon Sep 17 00:00:00 2001 From: Chenxi Date: Wed, 25 Sep 2024 12:30:10 +0100 Subject: [PATCH 08/15] Rename cog.yaml to tools/replicate/cog.yaml --- cog.yaml => tools/replicate/cog.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cog.yaml => tools/replicate/cog.yaml (100%) diff --git a/cog.yaml b/tools/replicate/cog.yaml similarity index 100% rename from cog.yaml rename to tools/replicate/cog.yaml From e027ebd69f08f9a04cc0c0cf66c89f17469d952f Mon Sep 17 00:00:00 2001 From: Chenxi Date: Wed, 25 Sep 2024 12:30:56 +0100 Subject: [PATCH 09/15] Rename predict_i2v.py to tools/replicate/predict_i2v.py --- predict_i2v.py => tools/replicate/predict_i2v.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename predict_i2v.py => tools/replicate/predict_i2v.py (100%) diff --git a/predict_i2v.py b/tools/replicate/predict_i2v.py similarity index 100% rename from predict_i2v.py rename to tools/replicate/predict_i2v.py From a2359dade4b3e88751dcfdb379bad29ebf2b72db Mon Sep 17 00:00:00 2001 From: Chenxi Date: Wed, 25 Sep 2024 12:31:11 +0100 Subject: [PATCH 10/15] Rename predict_t2v.py to tools/replicate/predict_t2v.py --- predict_t2v.py => tools/replicate/predict_t2v.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename predict_t2v.py => tools/replicate/predict_t2v.py (100%) diff --git a/predict_t2v.py b/tools/replicate/predict_t2v.py similarity index 100% rename from predict_t2v.py rename to tools/replicate/predict_t2v.py From 898a7a8d9f7b267718b6205119957f18d8fe981a Mon Sep 17 00:00:00 2001 From: Chenxi Date: Wed, 25 Sep 2024 12:31:30 +0100 Subject: [PATCH 11/15] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 880d8b9..6e8a37a 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@

-Experience the CogVideoX-5B model online at 🤗 Huggingface Space or 🤖 ModelScope Space or Replicate +Experience the CogVideoX-5B model online at 🤗 Huggingface Space or 🤖 ModelScope Space

📚 View the paper and user guide From 59b36555d068807da0b59fdcbf3ecdda93a57f47 Mon Sep 17 00:00:00 2001 From: Chenxi Date: Wed, 25 Sep 2024 12:38:48 +0100 Subject: [PATCH 12/15] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6e8a37a..e3b7ce9 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ Experience the CogVideoX-5B model online at \ + --ring_degree 2 --use_cfg_parallel --height 480 --width 720 --num_frames 9 \ + --prompt 'A small dog.' + +""" + +import time +import torch +import torch.distributed +from diffusers import AutoencoderKLTemporalDecoder +from xfuser import xFuserCogVideoXPipeline, xFuserArgs +from xfuser.config import FlexibleArgumentParser +from xfuser.core.distributed import ( + get_world_group, + get_data_parallel_rank, + get_data_parallel_world_size, + get_runtime_state, + is_dp_last_group, +) +from diffusers.utils import export_to_video + + +def main(): + parser = FlexibleArgumentParser(description="xFuser Arguments") + args = xFuserArgs.add_cli_args(parser).parse_args() + engine_args = xFuserArgs.from_cli_args(args) + + # Check if ulysses_degree is valid + num_heads = 30 + if engine_args.ulysses_degree > 0 and num_heads % engine_args.ulysses_degree != 0: + raise ValueError( + f"ulysses_degree ({engine_args.ulysses_degree}) must be a divisor of the number of heads ({num_heads})" + ) + + engine_config, input_config = engine_args.create_config() + local_rank = get_world_group().local_rank + + pipe = xFuserCogVideoXPipeline.from_pretrained( + pretrained_model_name_or_path=engine_config.model_config.model, + engine_config=engine_config, + torch_dtype=torch.bfloat16, + ) + if args.enable_sequential_cpu_offload: + pipe.enable_model_cpu_offload(gpu_id=local_rank) + pipe.vae.enable_tiling() + else: + device = torch.device(f"cuda:{local_rank}") + pipe = pipe.to(device) + + torch.cuda.reset_peak_memory_stats() + start_time = time.time() + + output = pipe( + height=input_config.height, + width=input_config.width, + num_frames=input_config.num_frames, + prompt=input_config.prompt, + num_inference_steps=input_config.num_inference_steps, + generator=torch.Generator(device="cuda").manual_seed(input_config.seed), + guidance_scale=6, + ).frames[0] + + end_time = time.time() + elapsed_time = end_time - start_time + peak_memory = torch.cuda.max_memory_allocated(device=f"cuda:{local_rank}") + + parallel_info = ( + f"dp{engine_args.data_parallel_degree}_cfg{engine_config.parallel_config.cfg_degree}_" + f"ulysses{engine_args.ulysses_degree}_ring{engine_args.ring_degree}_" + f"tp{engine_args.tensor_parallel_degree}_" + f"pp{engine_args.pipefusion_parallel_degree}_patch{engine_args.num_pipeline_patch}" + ) + if is_dp_last_group(): + world_size = get_data_parallel_world_size() + resolution = f"{input_config.width}x{input_config.height}" + output_filename = f"results/cogvideox_{parallel_info}_{resolution}.mp4" + export_to_video(output, output_filename, fps=8) + print(f"output saved to {output_filename}") + + if get_world_group().rank == get_world_group().world_size - 1: + print(f"epoch time: {elapsed_time:.2f} sec, memory: {peak_memory/1e9} GB") + get_runtime_state().destory_distributed_env() + + +if __name__ == "__main__": + main() diff --git a/tools/parallel_inference/run.sh b/tools/parallel_inference/run.sh new file mode 100644 index 0000000..7f9d5a8 --- /dev/null +++ b/tools/parallel_inference/run.sh @@ -0,0 +1,51 @@ +set -x + +export PYTHONPATH=$PWD:$PYTHONPATH + +# Select the model type +# The model is downloaded to a specified location on disk, +# or you can simply use the model's ID on Hugging Face, +# which will then be downloaded to the default cache path on Hugging Face. + +export MODEL_TYPE="CogVideoX" +# Configuration for different model types +# script, model_id, inference_step +declare -A MODEL_CONFIGS=( + ["CogVideoX"]="parallel_inference_xdit.py /cfs/dit/CogVideoX-2b 20" +) + +if [[ -v MODEL_CONFIGS[$MODEL_TYPE] ]]; then + IFS=' ' read -r SCRIPT MODEL_ID INFERENCE_STEP <<< "${MODEL_CONFIGS[$MODEL_TYPE]}" + export SCRIPT MODEL_ID INFERENCE_STEP +else + echo "Invalid MODEL_TYPE: $MODEL_TYPE" + exit 1 +fi + +mkdir -p ./results + +# task args +if [ "$MODEL_TYPE" = "CogVideoX" ]; then + TASK_ARGS="--height 480 --width 720 --num_frames 9" +fi + +# CogVideoX asserts sp_degree == ulysses_degree*ring_degree <= 2. Also, do not set the pipefusion degree. +if [ "$MODEL_TYPE" = "CogVideoX" ]; then +N_GPUS=4 +PARALLEL_ARGS="--ulysses_degree 2 --ring_degree 1" +CFG_ARGS="--use_cfg_parallel" +fi + + +torchrun --nproc_per_node=$N_GPUS ./$SCRIPT \ +--model $MODEL_ID \ +$PARALLEL_ARGS \ +$TASK_ARGS \ +$PIPEFUSION_ARGS \ +$OUTPUT_ARGS \ +--num_inference_steps $INFERENCE_STEP \ +--warmup_steps 0 \ +--prompt "A small dog." \ +$CFG_ARGS \ +$PARALLLEL_VAE \ +$COMPILE_FLAG From 3304ea72756863483d158167ef023894873103f7 Mon Sep 17 00:00:00 2001 From: Xibo Sun Date: Thu, 26 Sep 2024 14:12:26 +0800 Subject: [PATCH 14/15] fix the example scripts in parallel_inference_xdit.py --- .../parallel_inference_xdit.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tools/parallel_inference/parallel_inference_xdit.py b/tools/parallel_inference/parallel_inference_xdit.py index e10f385..e4caf33 100644 --- a/tools/parallel_inference/parallel_inference_xdit.py +++ b/tools/parallel_inference/parallel_inference_xdit.py @@ -7,16 +7,20 @@ https://github.com/xdit-project/xDiT/blob/main/examples/cogvideox_example.py By using this code, the inference process is parallelized on multiple GPUs, and thus speeded up. -You can also use the run.sh file in the same folder to automate running this -code for batch generation of videos. - Usage: 1. pip install xfuser -2. run the following command to generate video -torchrun --nproc_per_node=4 cogvideox_xdit.py --model \ - --ring_degree 2 --use_cfg_parallel --height 480 --width 720 --num_frames 9 \ +2. mkdir results +3. run the following command to generate video +torchrun --nproc_per_node=4 parallel_inference_xdit.py \ + --model --ulysses_degree 1 --ring_degree 2 \ + --use_cfg_parallel --height 480 --width 720 --num_frames 9 \ --prompt 'A small dog.' +You can also use the run.sh file in the same folder to automate running this +code for batch generation of videos, by running: + +sh ./run.sh + """ import time From d53b5272d15079ebfa731569e5816c30926c02c6 Mon Sep 17 00:00:00 2001 From: therealfish Date: Thu, 26 Sep 2024 10:07:27 -0400 Subject: [PATCH 15/15] Update requirements.txt ModuleNotFoundError: No module named 'skvideo' added: scikit-video latest --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 02c0987..a12fc67 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,4 +11,5 @@ imageio>=2.35.1 imageio-ffmpeg>=0.5.1 openai>=1.45.0 moviepy>=1.0.3 -pillow==9.5.0 \ No newline at end of file +pillow==9.5.0 +scikit-video