mirror of
https://github.com/THUDM/CogVideo.git
synced 2025-12-03 03:02:09 +08:00
rewrite predictor
This commit is contained in:
parent
484aacf5f7
commit
4d94efa6d4
4
cog.yaml
4
cog.yaml
@ -22,10 +22,10 @@ build:
|
|||||||
|
|
||||||
run:
|
run:
|
||||||
- "mkdir -p /sharefs/cogview-new; cd /sharefs/cogview-new; wget https://models.nmb.ai/cogvideo/cogvideo-stage1.tar.gz -O - | tar xz"
|
- "mkdir -p /sharefs/cogview-new; cd /sharefs/cogview-new; wget https://models.nmb.ai/cogvideo/cogvideo-stage1.tar.gz -O - | tar xz"
|
||||||
- "cd /sharefs/cogview-new; wget https://models.nmb.ai/cogvideo/cogvideo-stage2.tar.gz -O - | tar xz"
|
- "cd /sharefs/cogview-new; wget https://models.nmb.ai/cogvideo/cogvideo-stage2.tar.gz -O - | tar xz"
|
||||||
#- "cd /sharefs/cogview-new; wget https://models.nmb.ai/cogview2/cogview2-dsr.tar.gz -O - | tar xz"
|
|
||||||
- "mkdir -p /root/.icetk_models; wget -O /root/.icetk_models/ice_text.model https://models.nmb.ai/cogvideo/ice_text.model"
|
- "mkdir -p /root/.icetk_models; wget -O /root/.icetk_models/ice_text.model https://models.nmb.ai/cogvideo/ice_text.model"
|
||||||
- "mkdir -p /root/.tcetk_models; wget -O /root/.icetk_models/ice_image.pt https://models.nmb.ai/cogvideo/ice_image.pt"
|
- "mkdir -p /root/.tcetk_models; wget -O /root/.icetk_models/ice_image.pt https://models.nmb.ai/cogvideo/ice_image.pt"
|
||||||
|
#- "cd /sharefs/cogview-new; wget https://models.nmb.ai/cogview2/cogview2-dsr.tar.gz -O - | tar xz"
|
||||||
|
|
||||||
predict: "predict.py:Predictor"
|
predict: "predict.py:Predictor"
|
||||||
image: "r8.im/nightmareai/cogvideo"
|
image: "r8.im/nightmareai/cogvideo"
|
||||||
|
|||||||
186
predict.py
186
predict.py
@ -2,7 +2,7 @@ import os
|
|||||||
from random import randint
|
from random import randint
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
import glob
|
import random
|
||||||
import typing
|
import typing
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
from deep_translator import GoogleTranslator
|
from deep_translator import GoogleTranslator
|
||||||
@ -468,18 +468,12 @@ class Predictor(BasePredictor):
|
|||||||
|
|
||||||
self.generate_frame_num = 5
|
self.generate_frame_num = 5
|
||||||
|
|
||||||
invalid_slices = [slice(tokenizer.num_image_tokens, None)]
|
|
||||||
self.strategy_cogview2 = CoglmStrategy(invalid_slices,
|
|
||||||
temperature=1.0, top_k=16)
|
|
||||||
self.strategy_cogvideo = CoglmStrategy(invalid_slices,
|
|
||||||
temperature=args.temperature, top_k=args.top_k,
|
|
||||||
temperature2=args.coglm_temperature2)
|
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
def predict(
|
def predict(
|
||||||
self,
|
self,
|
||||||
prompt: str = Input(description="Prompt"),
|
prompt: str = Input(description="Prompt"),
|
||||||
seed: int = Input(description="Seed (leave empty to use a random seed)", default=None, le=(2**32 - 1), ge=0),
|
seed: int = Input(description="Seed (-1 to use a random seed)", default=-1, le=(2**32 - 1), ge=-1),
|
||||||
translate: bool = Input(
|
translate: bool = Input(
|
||||||
description="Translate prompt from English to Simplified Chinese (required if not entering Chinese text)",
|
description="Translate prompt from English to Simplified Chinese (required if not entering Chinese text)",
|
||||||
default=True,
|
default=True,
|
||||||
@ -491,53 +485,52 @@ class Predictor(BasePredictor):
|
|||||||
) -> typing.Iterator[Path]:
|
) -> typing.Iterator[Path]:
|
||||||
if translate:
|
if translate:
|
||||||
prompt = self.translator.translate(prompt.strip())
|
prompt = self.translator.translate(prompt.strip())
|
||||||
|
|
||||||
|
if seed == -1:
|
||||||
|
seed = randint(0, 2**32)
|
||||||
|
|
||||||
|
self.args.seed = seed
|
||||||
|
self.args.use_guidance_stage1 = use_guidance
|
||||||
|
self.prompt = prompt
|
||||||
|
self.args.both_stages = both_stages
|
||||||
|
|
||||||
|
yield [Path(file) for file in self.run()]
|
||||||
|
|
||||||
|
logging.debug("complete, exiting")
|
||||||
|
return
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
invalid_slices = [slice(tokenizer.num_image_tokens, None)]
|
||||||
|
strategy_cogview2 = CoglmStrategy(invalid_slices,
|
||||||
|
temperature=1.0, top_k=16)
|
||||||
|
strategy_cogvideo = CoglmStrategy(invalid_slices,
|
||||||
|
temperature=self.args.temperature, top_k=self.args.top_k,
|
||||||
|
temperature2=self.args.coglm_temperature2)
|
||||||
|
|
||||||
|
torch.manual_seed(self.args.seed)
|
||||||
|
random.seed(self.args.seed)
|
||||||
workdir = tempfile.mkdtemp()
|
workdir = tempfile.mkdtemp()
|
||||||
os.makedirs(f"{workdir}/output/stage1", exist_ok=True)
|
os.makedirs(f"{workdir}/output/stage1", exist_ok=True)
|
||||||
os.makedirs(f"{workdir}/output/stage2", exist_ok=True)
|
os.makedirs(f"{workdir}/output/stage2", exist_ok=True)
|
||||||
|
|
||||||
if seed is None:
|
move_start_time = time.time()
|
||||||
seed = randint(0, 2**32)
|
logging.debug("moving stage 2 model to cpu")
|
||||||
|
self.model_stage2 = self.model_stage2.cpu()
|
||||||
self.args.seed = seed
|
torch.cuda.empty_cache()
|
||||||
self.args.use_guidance_stage1 = use_guidance
|
logging.debug("moving stage 1 model to cuda")
|
||||||
with torch.no_grad():
|
self.model_stage1 = self.model_stage1.cuda()
|
||||||
move_start_time = time.time()
|
logging.debug("moving in model1 takes time: {:.2f}".format(time.time()-move_start_time))
|
||||||
logging.debug("moving stage 2 model to cpu")
|
|
||||||
self.model_stage2 = self.model_stage2.cpu()
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
logging.debug("moving stage 1 model to cuda")
|
|
||||||
self.model_stage1 = self.model_stage1.cuda()
|
|
||||||
logging.debug("moving in model1 takes time: {:.2f}".format(time.time()-move_start_time))
|
|
||||||
|
|
||||||
parent_given_tokens = self.process_stage1(self.model_stage1, prompt, duration=4.0, video_raw_text=prompt, video_guidance_text="视频",
|
|
||||||
image_text_suffix=" 高清摄影",
|
|
||||||
outputdir=f'{workdir}/output/stage1', batch_size=1)
|
|
||||||
|
|
||||||
logging.debug("moving stage 1 model to cpu")
|
|
||||||
self.model_stage1 = self.model_stage1.cpu()
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
|
|
||||||
yield Path(f"{workdir}/output/stage1/0.gif")
|
|
||||||
|
|
||||||
if both_stages:
|
|
||||||
move_start_time = time.time()
|
|
||||||
logging.debug("moving stage 2 model to cuda")
|
|
||||||
self.model_stage2 = self.model_stage2.cuda()
|
|
||||||
logging.debug("moving in model2 takes time: {:.2f}".format(time.time()-move_start_time))
|
|
||||||
self.process_stage2(self.model_stage2, prompt, duration=2.0, video_raw_text=prompt+" 视频",
|
|
||||||
video_guidance_text="视频", parent_given_tokens=parent_given_tokens,
|
|
||||||
outputdir=f'{workdir}/output/stage2',
|
|
||||||
gpu_rank=0, gpu_parallel_size=1)
|
|
||||||
yield Path(f"{workdir}/output/stage2/0.gif")
|
|
||||||
return
|
|
||||||
|
|
||||||
def process_stage1(self, model, seq_text, duration, video_raw_text=None, video_guidance_text="视频", image_text_suffix="", outputdir=None, batch_size=1):
|
|
||||||
process_start_time = time.time()
|
process_start_time = time.time()
|
||||||
args = self.args
|
args = self.args
|
||||||
use_guide = args.use_guidance_stage1
|
use_guide = args.use_guidance_stage1
|
||||||
|
batch_size = 1
|
||||||
if video_raw_text is None:
|
seq_text = self.prompt
|
||||||
video_raw_text = seq_text
|
video_raw_text = self.prompt
|
||||||
|
duration=4.0
|
||||||
|
video_guidance_text="视频"
|
||||||
|
image_text_suffix=" 高清摄影"
|
||||||
|
outputdir=f'{workdir}/output/stage1'
|
||||||
mbz = args.stage1_max_inference_batch_size if args.stage1_max_inference_batch_size > 0 else args.max_inference_batch_size
|
mbz = args.stage1_max_inference_batch_size if args.stage1_max_inference_batch_size > 0 else args.max_inference_batch_size
|
||||||
assert batch_size < mbz or batch_size % mbz == 0
|
assert batch_size < mbz or batch_size % mbz == 0
|
||||||
frame_len = 400
|
frame_len = 400
|
||||||
@ -553,13 +546,13 @@ class Predictor(BasePredictor):
|
|||||||
for tim in range(max(batch_size // mbz, 1)):
|
for tim in range(max(batch_size // mbz, 1)):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
output_list_1st.append(
|
output_list_1st.append(
|
||||||
my_filling_sequence(model, args,seq_1st.clone(),
|
my_filling_sequence(self.model_stage1, args,seq_1st.clone(),
|
||||||
batch_size=min(batch_size, mbz),
|
batch_size=min(batch_size, mbz),
|
||||||
get_masks_and_position_ids=get_masks_and_position_ids_stage1,
|
get_masks_and_position_ids=get_masks_and_position_ids_stage1,
|
||||||
text_len=text_len_1st,
|
text_len=text_len_1st,
|
||||||
frame_len=frame_len,
|
frame_len=frame_len,
|
||||||
strategy=self.strategy_cogview2,
|
strategy=strategy_cogview2,
|
||||||
strategy2=self.strategy_cogvideo,
|
strategy2=strategy_cogvideo,
|
||||||
log_text_attention_weights=1.4,
|
log_text_attention_weights=1.4,
|
||||||
enforce_no_swin=True,
|
enforce_no_swin=True,
|
||||||
mode_stage1=True,
|
mode_stage1=True,
|
||||||
@ -600,12 +593,12 @@ class Predictor(BasePredictor):
|
|||||||
input_seq = seq[:min(batch_size, mbz)].clone() if tim == 0 else seq[mbz*tim:mbz*(tim+1)].clone()
|
input_seq = seq[:min(batch_size, mbz)].clone() if tim == 0 else seq[mbz*tim:mbz*(tim+1)].clone()
|
||||||
guider_seq2 = (guider_seq[:min(batch_size, mbz)].clone() if tim == 0 else guider_seq[mbz*tim:mbz*(tim+1)].clone()) if guider_seq is not None else None
|
guider_seq2 = (guider_seq[:min(batch_size, mbz)].clone() if tim == 0 else guider_seq[mbz*tim:mbz*(tim+1)].clone()) if guider_seq is not None else None
|
||||||
output_list.append(
|
output_list.append(
|
||||||
my_filling_sequence(model, args,input_seq,
|
my_filling_sequence(self.model_stage1, args,input_seq,
|
||||||
batch_size=min(batch_size, mbz),
|
batch_size=min(batch_size, mbz),
|
||||||
get_masks_and_position_ids=get_masks_and_position_ids_stage1,
|
get_masks_and_position_ids=get_masks_and_position_ids_stage1,
|
||||||
text_len=text_len, frame_len=frame_len,
|
text_len=text_len, frame_len=frame_len,
|
||||||
strategy=self.strategy_cogview2,
|
strategy=strategy_cogview2,
|
||||||
strategy2=self.strategy_cogvideo,
|
strategy2=strategy_cogvideo,
|
||||||
log_text_attention_weights=video_log_text_attention_weights,
|
log_text_attention_weights=video_log_text_attention_weights,
|
||||||
guider_seq=guider_seq2,
|
guider_seq=guider_seq2,
|
||||||
guider_text_len=guider_text_len,
|
guider_text_len=guider_text_len,
|
||||||
@ -629,27 +622,37 @@ class Predictor(BasePredictor):
|
|||||||
for clip_i in range(len(imgs)):
|
for clip_i in range(len(imgs)):
|
||||||
# os.makedirs(output_dir_full_paths[clip_i], exist_ok=True)
|
# os.makedirs(output_dir_full_paths[clip_i], exist_ok=True)
|
||||||
my_save_multiple_images(imgs[clip_i], outputdir, subdir=f"frames/{clip_i}", debug=False)
|
my_save_multiple_images(imgs[clip_i], outputdir, subdir=f"frames/{clip_i}", debug=False)
|
||||||
os.system(f"gifmaker -i '{outputdir}'/frames/'{clip_i}'/0*.jpg -o '{outputdir}/{clip_i}.gif' -d 0.25")
|
out_filename = f'{outputdir}/{clip_i}.gif'
|
||||||
|
os.system(f"gifmaker -i '{outputdir}'/frames/'{clip_i}'/0*.jpg -o '{out_filename}' -d 0.25")
|
||||||
|
yield out_filename
|
||||||
torch.save(save_tokens, os.path.join(outputdir, 'frame_tokens.pt'))
|
torch.save(save_tokens, os.path.join(outputdir, 'frame_tokens.pt'))
|
||||||
|
|
||||||
logging.info("CogVideo Stage1 completed. Taken time {:.2f}\n".format(time.time() - process_start_time))
|
logging.info("CogVideo Stage1 completed. Taken time {:.2f}\n".format(time.time() - process_start_time))
|
||||||
|
|
||||||
return save_tokens
|
logging.debug("moving stage 1 model to cpu")
|
||||||
|
self.model_stage1 = self.model_stage1.cpu()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def process_stage2(self, model, seq_text, duration, video_raw_text=None, video_guidance_text="视频", parent_given_tokens=None, conddir=None, outputdir=None, gpu_rank=0, gpu_parallel_size=1):
|
if not self.args.both_stages:
|
||||||
stage2_starttime = time.time()
|
logging.info("only stage 1 selected, exiting")
|
||||||
args = self.args
|
return
|
||||||
use_guidance = args.use_guidance_stage2
|
|
||||||
if args.both_stages:
|
gpu_rank=0
|
||||||
move_start_time = time.time()
|
gpu_parallel_size=1
|
||||||
logging.debug("moving stage-2 model to cuda")
|
video_raw_text=self.prompt+" 视频"
|
||||||
model = model.cuda()
|
duration=2.0
|
||||||
logging.debug("moving in stage-2 model takes time: {:.2f}".format(time.time()-move_start_time))
|
video_guidance_text="视频"
|
||||||
|
outputdir=f'{workdir}/output/stage2'
|
||||||
|
parent_given_tokens = save_tokens
|
||||||
|
stage2_starttime = time.time()
|
||||||
|
use_guidance = args.use_guidance_stage2
|
||||||
|
|
||||||
|
move_start_time = time.time()
|
||||||
|
logging.debug("moving stage-2 model to cuda")
|
||||||
|
self.model_stage2 = self.model_stage2.cuda()
|
||||||
|
logging.debug("moving in stage-2 model takes time: {:.2f}".format(time.time()-move_start_time))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if parent_given_tokens is None:
|
|
||||||
assert conddir is not None
|
|
||||||
parent_given_tokens = torch.load(os.path.join(conddir, 'frame_tokens.pt'), map_location='cpu')
|
|
||||||
sample_num_allgpu = parent_given_tokens.shape[0]
|
sample_num_allgpu = parent_given_tokens.shape[0]
|
||||||
sample_num = sample_num_allgpu // gpu_parallel_size
|
sample_num = sample_num_allgpu // gpu_parallel_size
|
||||||
assert sample_num * gpu_parallel_size == sample_num_allgpu
|
assert sample_num * gpu_parallel_size == sample_num_allgpu
|
||||||
@ -704,12 +707,12 @@ class Predictor(BasePredictor):
|
|||||||
input_seq = seq[:min(generate_batchsize_total, mbz)].clone() if tim == 0 else seq[mbz*tim:mbz*(tim+1)].clone()
|
input_seq = seq[:min(generate_batchsize_total, mbz)].clone() if tim == 0 else seq[mbz*tim:mbz*(tim+1)].clone()
|
||||||
guider_seq2 = (guider_seq[:min(generate_batchsize_total, mbz)].clone() if tim == 0 else guider_seq[mbz*tim:mbz*(tim+1)].clone()) if guider_seq is not None else None
|
guider_seq2 = (guider_seq[:min(generate_batchsize_total, mbz)].clone() if tim == 0 else guider_seq[mbz*tim:mbz*(tim+1)].clone()) if guider_seq is not None else None
|
||||||
output_list.append(
|
output_list.append(
|
||||||
my_filling_sequence(model, args, input_seq,
|
my_filling_sequence(self.model_stage2, args, input_seq,
|
||||||
batch_size=min(generate_batchsize_total, mbz),
|
batch_size=min(generate_batchsize_total, mbz),
|
||||||
get_masks_and_position_ids=get_masks_and_position_ids_stage2,
|
get_masks_and_position_ids=get_masks_and_position_ids_stage2,
|
||||||
text_len=text_len, frame_len=frame_len,
|
text_len=text_len, frame_len=frame_len,
|
||||||
strategy=self.strategy_cogview2,
|
strategy=strategy_cogview2,
|
||||||
strategy2=self.strategy_cogvideo,
|
strategy2=strategy_cogvideo,
|
||||||
log_text_attention_weights=video_log_text_attention_weights,
|
log_text_attention_weights=video_log_text_attention_weights,
|
||||||
mode_stage1=False,
|
mode_stage1=False,
|
||||||
guider_seq=guider_seq2,
|
guider_seq=guider_seq2,
|
||||||
@ -735,10 +738,10 @@ class Predictor(BasePredictor):
|
|||||||
|
|
||||||
enc_text = tokenizer.encode(seq_text)
|
enc_text = tokenizer.encode(seq_text)
|
||||||
frame_num_per_sample = parent_given_tokens.shape[1]
|
frame_num_per_sample = parent_given_tokens.shape[1]
|
||||||
parent_given_tokens_2d = parent_given_tokens.reshape(-1, 400)
|
parent_given_tokens_2d = parent_given_tokens.reshape(-1, 400)
|
||||||
|
|
||||||
logging.debug("moving stage 2 model to cpu")
|
logging.debug("moving stage 2 model to cpu")
|
||||||
self.model_stage2 = self.model_stage2.cpu()
|
self.model_stage2 = self.model_stage2.cpu()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
# use dsr if loaded
|
# use dsr if loaded
|
||||||
@ -759,21 +762,30 @@ class Predictor(BasePredictor):
|
|||||||
|
|
||||||
for sample_i in range(sample_num):
|
for sample_i in range(sample_num):
|
||||||
my_save_multiple_images(decoded_sr_videos[sample_i], outputdir,subdir=f"frames/{sample_i+sample_num*gpu_rank}", debug=False)
|
my_save_multiple_images(decoded_sr_videos[sample_i], outputdir,subdir=f"frames/{sample_i+sample_num*gpu_rank}", debug=False)
|
||||||
os.system(f"gifmaker -i '{outputdir}'/frames/'{sample_i+sample_num*gpu_rank}'/0*.jpg -o '{outputdir}/{sample_i+sample_num*gpu_rank}.gif' -d 0.125")
|
output_file = f'{outputdir}/{sample_i+sample_num*gpu_rank}.gif'
|
||||||
|
os.system(f"gifmaker -i '{outputdir}'/frames/'{sample_i+sample_num*gpu_rank}'/0*.jpg -o '{output_file}' -d 0.125")
|
||||||
|
yield output_file
|
||||||
|
|
||||||
logging.info("Direct super-resolution completed. Taken time {:.2f}\n".format(time.time() - dsr_starttime))
|
logging.info("Direct super-resolution completed. Taken time {:.2f}\n".format(time.time() - dsr_starttime))
|
||||||
else:
|
else:
|
||||||
output_tokens = torch.cat(output_list, dim=0)[:, 1+text_len:]
|
#imgs = [torch.nn.functional.interpolate(tokenizer.decode(image_ids=seq.tolist()), size=(480, 480)) for seq in output_tokens_merge]
|
||||||
|
#os.makedirs(outputdir, exist_ok=True)
|
||||||
|
#my_save_multiple_images(imgs, outputdir,subdir="frames", debug=False)
|
||||||
|
#os.system(f"gifmaker -i '{outputdir}'/frames/0*.jpg -o '{outputdir}/{str(float(duration))}_concat.gif' -d 0.2")
|
||||||
|
|
||||||
|
|
||||||
imgs, sred_imgs, txts = [], [], []
|
output_tokens = torch.cat(output_list, dim=0)[:, 1+text_len:]
|
||||||
for seq in output_tokens:
|
decoded_videos = []
|
||||||
decoded_imgs = [torch.nn.functional.interpolate(tokenizer.decode(image_ids=seq.tolist()[i*400: (i+1)*400]), size=(480, 480)) for i in range(total_frames)]
|
|
||||||
imgs.append(decoded_imgs) # only the last image (target)
|
|
||||||
|
|
||||||
if outputdir is not None:
|
|
||||||
for clip_i in range(len(imgs)):
|
|
||||||
# os.makedirs(output_dir_full_paths[clip_i], exist_ok=True)
|
|
||||||
my_save_multiple_images(imgs[clip_i], outputdir, subdir=f"frames/{clip_i}", debug=False)
|
|
||||||
os.system(f"gifmaker -i '{outputdir}'/frames/'{clip_i}'/0*.jpg -o '{outputdir}/{clip_i}.gif' -d 0.25")
|
|
||||||
|
|
||||||
|
for sample_i in range(sample_num):
|
||||||
|
decoded_imgs = []
|
||||||
|
for frame_i in range(frame_num_per_sample):
|
||||||
|
decoded_img = tokenizer.decode(image_ids=parent_given_tokens_2d[frame_i+sample_i*frame_num_per_sample][-3600:])
|
||||||
|
decoded_imgs.append(torch.nn.functional.interpolate(decoded_img, size=(480, 480)))
|
||||||
|
decoded_videos.append(decoded_imgs)
|
||||||
|
|
||||||
|
for sample_i in range(sample_num):
|
||||||
|
my_save_multiple_images(decoded_videos[sample_i], outputdir,subdir=f"frames/{sample_i+sample_num*gpu_rank}", debug=False)
|
||||||
|
output_file = f'{outputdir}/{sample_i+sample_num*gpu_rank}.gif'
|
||||||
|
os.system(f"gifmaker -i '{outputdir}'/frames/'{sample_i+sample_num*gpu_rank}'/0*.jpg -o '{output_file}' -d 0.125")
|
||||||
|
yield output_file
|
||||||
Loading…
x
Reference in New Issue
Block a user