Merge pull request #334 from chenxwh/main

Add Replicate demo and API
This commit is contained in:
Yuxuan.Zhang 2024-09-26 12:52:50 +08:00 committed by GitHub
commit dcb6795b20
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 214 additions and 0 deletions

View File

@ -22,6 +22,7 @@ Experience the CogVideoX-5B model online at <a href="https://huggingface.co/spac
## Project Updates
- 🔥🔥 **News**: ```2024/9/25```: CogVideoX web demo is available on Replicate. Try the text-to-video model **CogVideoX-5B** here [![Replicate](https://replicate.com/chenxwh/cogvideox-t2v/badge)](https://replicate.com/chenxwh/cogvideox-t2v) and image-to-video model **CogVideoX-5B-I2V** here [![Replicate](https://replicate.com/chenxwh/cogvideox-i2v/badge)](https://replicate.com/chenxwh/cogvideox-i2v).
- 🔥🔥 **News**: ```2024/9/19```: We have open-sourced the CogVideoX series image-to-video model **CogVideoX-5B-I2V**.
This model can take an image as a background input and generate a video combined with prompt words, offering greater
controllability. With this, the CogVideoX series models now support three tasks: text-to-video generation, video

37
tools/replicate/cog.yaml Normal file
View File

@ -0,0 +1,37 @@
# Configuration for Cog ⚙️
# Reference: https://cog.run/yaml
build:
# set to true if your model requires a GPU
gpu: true
# a list of ubuntu apt packages to install
system_packages:
- "libgl1-mesa-glx"
- "libglib2.0-0"
# python version in the form '3.11' or '3.11.4'
python_version: "3.11"
# a list of packages in the format <package-name>==<version>
python_packages:
- diffusers>=0.30.3
- accelerate>=0.34.2
- transformers>=4.44.2
- numpy==1.26.0
- torch>=2.4.0
- torchvision>=0.19.0
- sentencepiece>=0.2.0
- SwissArmyTransformer>=0.4.12
- imageio>=2.35.1
- imageio-ffmpeg>=0.5.1
- openai>=1.45.0
- moviepy>=1.0.3
- pillow==9.5.0
- pydantic==1.10.7
run:
- curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.2/pget_linux_x86_64" && chmod +x /usr/local/bin/pget
# predict.py defines how predictions are run on your model
predict: "predict_t2v.py:Predictor"
# predict: "predict_i2v.py:Predictor"

View File

@ -0,0 +1,89 @@
# Prediction interface for Cog ⚙️
# https://cog.run/python
import os
import subprocess
import time
import torch
from diffusers import CogVideoXImageToVideoPipeline
from diffusers.utils import export_to_video, load_image
from cog import BasePredictor, Input, Path
MODEL_CACHE = "model_cache_i2v"
MODEL_URL = (
f"https://weights.replicate.delivery/default/THUDM/CogVideo/{MODEL_CACHE}.tar"
)
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_HOME"] = MODEL_CACHE
os.environ["TORCH_HOME"] = MODEL_CACHE
os.environ["HF_DATASETS_CACHE"] = MODEL_CACHE
os.environ["TRANSFORMERS_CACHE"] = MODEL_CACHE
os.environ["HUGGINGFACE_HUB_CACHE"] = MODEL_CACHE
def download_weights(url, dest):
start = time.time()
print("downloading url: ", url)
print("downloading to: ", dest)
subprocess.check_call(["pget", "-x", url, dest], close_fds=False)
print("downloading took: ", time.time() - start)
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
if not os.path.exists(MODEL_CACHE):
download_weights(MODEL_URL, MODEL_CACHE)
# model_id: THUDM/CogVideoX-5b-I2V
self.pipe = CogVideoXImageToVideoPipeline.from_pretrained(
MODEL_CACHE, torch_dtype=torch.bfloat16
).to("cuda")
self.pipe.enable_model_cpu_offload()
self.pipe.vae.enable_tiling()
def predict(
self,
prompt: str = Input(
description="Input prompt", default="Starry sky slowly rotating."
),
image: Path = Input(description="Input image"),
num_inference_steps: int = Input(
description="Number of denoising steps", ge=1, le=500, default=50
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=6
),
num_frames: int = Input(
description="Number of frames for the output video", default=49
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> Path:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
img = load_image(image=str(image))
video = self.pipe(
prompt=prompt,
image=img,
num_videos_per_prompt=1,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
guidance_scale=guidance_scale,
generator=torch.Generator(device="cuda").manual_seed(seed),
).frames[0]
out_path = "/tmp/out.mp4"
export_to_video(video, out_path, fps=8)
return Path(out_path)

View File

@ -0,0 +1,87 @@
# Prediction interface for Cog ⚙️
# https://cog.run/python
import os
import subprocess
import time
import torch
from diffusers import CogVideoXPipeline
from diffusers.utils import export_to_video
from cog import BasePredictor, Input, Path
MODEL_CACHE = "model_cache"
MODEL_URL = (
f"https://weights.replicate.delivery/default/THUDM/CogVideo/{MODEL_CACHE}.tar"
)
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_HOME"] = MODEL_CACHE
os.environ["TORCH_HOME"] = MODEL_CACHE
os.environ["HF_DATASETS_CACHE"] = MODEL_CACHE
os.environ["TRANSFORMERS_CACHE"] = MODEL_CACHE
os.environ["HUGGINGFACE_HUB_CACHE"] = MODEL_CACHE
def download_weights(url, dest):
start = time.time()
print("downloading url: ", url)
print("downloading to: ", dest)
subprocess.check_call(["pget", "-x", url, dest], close_fds=False)
print("downloading took: ", time.time() - start)
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
if not os.path.exists(MODEL_CACHE):
download_weights(MODEL_URL, MODEL_CACHE)
# model_id: THUDM/CogVideoX-5b
self.pipe = CogVideoXPipeline.from_pretrained(
MODEL_CACHE,
torch_dtype=torch.bfloat16,
).to("cuda")
self.pipe.enable_model_cpu_offload()
self.pipe.vae.enable_tiling()
def predict(
self,
prompt: str = Input(
description="Input prompt",
default="A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance.",
),
num_inference_steps: int = Input(
description="Number of denoising steps", ge=1, le=500, default=50
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=6
),
num_frames: int = Input(
description="Number of frames for the output video", default=49
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> Path:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
video = self.pipe(
prompt=prompt,
num_videos_per_prompt=1,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
guidance_scale=guidance_scale,
generator=torch.Generator(device="cuda").manual_seed(seed),
).frames[0]
out_path = "/tmp/out.mp4"
export_to_video(video, out_path, fps=8)
return Path(out_path)