Merge branch 'main' into CogVideoX_dev

This commit is contained in:
zR 2024-08-13 13:41:00 +08:00 committed by GitHub
commit a5711370f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 21 additions and 9 deletions

View File

@ -11,7 +11,7 @@
🤗 Experience on <a href="https://huggingface.co/spaces/THUDM/CogVideoX" target="_blank">CogVideoX Huggingface Space</a>
</p>
<p align="center">
📚 Check here to view <a href="https://arxiv.org/pdf/2408.06072" target="_blank">Paper</a>
📚 Check here to view <a href="https://arxiv.org/abs/2408.06072" target="_blank">Paper</a>
</p>
<p align="center">
👋 Join our <a href="resources/WECHAT.md" target="_blank">WeChat</a> and <a href="https://discord.gg/Ewaabk6s" target="_blank">Discord</a>
@ -215,10 +215,14 @@ hands-on practice on text-to-video generation. *The original input is in Chinese
🌟 If you find our work helpful, please leave us a star and cite our paper.
```
@article{yang2024cogvideox,
@misc{yang2024cogvideoxtexttovideodiffusionmodels,
title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
year={2024},
eprint={2408.06072},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.06072},
}
@article{hong2022cogvideo,
title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers},

View File

@ -11,7 +11,7 @@
🤗 <a href="https://huggingface.co/spaces/THUDM/CogVideoX" target="_blank">CogVideoX Huggingface Space</a> で体験
</p>
<p align="center">
📚 <a href="https://arxiv.org/pdf/2408.06072" target="_blank">論文</a> をチェック
📚 <a href="https://arxiv.org/abs/2408.06072" target="_blank">論文</a> をチェック
</p>
<p align="center">
👋 <a href="resources/WECHAT.md" target="_blank">WeChat</a><a href="https://discord.gg/Ewaabk6s" target="_blank">Discord</a> に参加
@ -211,10 +211,14 @@ CogVideoのデモは [https://models.aminer.cn/cogvideo](https://models.aminer.c
🌟 私たちの仕事が役立つと思われた場合、ぜひスターを付けていただき、論文を引用してください。
```
@article{yang2024cogvideox,
@misc{yang2024cogvideoxtexttovideodiffusionmodels,
title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
year={2024},
eprint={2408.06072},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.06072},
}
@article{hong2022cogvideo,
title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers},

View File

@ -12,7 +12,7 @@
🤗 在 <a href="https://huggingface.co/spaces/THUDM/CogVideoX" target="_blank">CogVideoX Huggingface Space</a> 体验视频生成模型
</p>
<p align="center">
📚 查看 <a href="https://arxiv.org/pdf/2408.06072" target="_blank">论文</a>
📚 查看 <a href="https://arxiv.org/abs/2408.06072" target="_blank">论文</a>
</p>
<p align="center">
👋 加入我们的 <a href="resources/WECHAT.md" target="_blank">微信</a><a href="https://discord.gg/Ewaabk6s" target="_blank">Discord</a>
@ -188,10 +188,14 @@ CogVideo的demo网站在[https://models.aminer.cn/cogvideo](https://models.amine
🌟 如果您发现我们的工作有所帮助欢迎引用我们的文章留下宝贵的stars
```
@article{yang2024cogvideox,
@misc{yang2024cogvideoxtexttovideodiffusionmodels,
title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang},
year={2024},
eprint={2408.06072},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.06072},
}
@article{hong2022cogvideo,
title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers},