From 43e7432ccb5c68c71fdc0598515e9471b5d1ba24 Mon Sep 17 00:00:00 2001 From: huangshiyu Date: Tue, 13 Aug 2024 10:17:17 +0800 Subject: [PATCH] update paper link and citation in README.md --- README.md | 10 +++++++--- README_ja.md | 10 +++++++--- README_zh.md | 10 +++++++--- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 1307d1f..a14e15f 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ 🤗 Experience on CogVideoX Huggingface Space

-📚 Check here to view Paper +📚 Check here to view Paper

👋 Join our WeChat and Discord @@ -211,10 +211,14 @@ hands-on practice on text-to-video generation. *The original input is in Chinese 🌟 If you find our work helpful, please leave us a star and cite our paper. ``` -@article{yang2024cogvideox, +@misc{yang2024cogvideoxtexttovideodiffusionmodels, title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, + author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, year={2024}, + eprint={2408.06072}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2408.06072}, } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers}, diff --git a/README_ja.md b/README_ja.md index 398de54..40c97e4 100644 --- a/README_ja.md +++ b/README_ja.md @@ -10,7 +10,7 @@ 🤗 CogVideoX Huggingface Space で体験

-📚 論文 をチェック +📚 論文 をチェック

👋 WeChatDiscord に参加 @@ -209,10 +209,14 @@ CogVideoのデモは [https://models.aminer.cn/cogvideo](https://models.aminer.c 🌟 私たちの仕事が役立つと思われた場合、ぜひスターを付けていただき、論文を引用してください。 ``` -@article{yang2024cogvideox, +@misc{yang2024cogvideoxtexttovideodiffusionmodels, title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, + author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, year={2024}, + eprint={2408.06072}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2408.06072}, } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers}, diff --git a/README_zh.md b/README_zh.md index 8a9d6d0..b7ad3c3 100644 --- a/README_zh.md +++ b/README_zh.md @@ -11,7 +11,7 @@ 🤗 在 CogVideoX Huggingface Space 体验视频生成模型

-📚 查看 论文 +📚 查看 论文

👋 加入我们的 微信Discord @@ -183,10 +183,14 @@ CogVideo的demo网站在[https://models.aminer.cn/cogvideo](https://models.amine 🌟 如果您发现我们的工作有所帮助,欢迎引用我们的文章,留下宝贵的stars ``` -@article{yang2024cogvideox, +@misc{yang2024cogvideoxtexttovideodiffusionmodels, title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and JiaZheng Xu and Yuanming Yang and Xiaohan Zhang and Xiaotao Gu and Guanyu Feng and Da Yin and Wenyi Hong and Weihan Wang and Yean Cheng and Yuxuan Zhang and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, + author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, year={2024}, + eprint={2408.06072}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2408.06072}, } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers},