From b57797c0ce1a35f04a6ff3d3028cb1697aa9c5bc Mon Sep 17 00:00:00 2001 From: huangshiyu Date: Fri, 16 Aug 2024 01:40:30 +0800 Subject: [PATCH] update citation --- README.md | 13 +++++-------- README_ja.md | 13 +++++-------- README_zh.md | 13 +++++-------- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index becdc42..2c65094 100644 --- a/README.md +++ b/README.md @@ -215,14 +215,11 @@ hands-on practice on text-to-video generation. *The original input is in Chinese 🌟 If you find our work helpful, please leave us a star and cite our paper. ``` -@misc{yang2024cogvideoxtexttovideodiffusionmodels, - title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, - year={2024}, - eprint={2408.06072}, - archivePrefix={arXiv}, - primaryClass={cs.CV}, - url={https://arxiv.org/abs/2408.06072}, +@article{yang2024cogvideox, + title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, + author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others}, + journal={arXiv preprint arXiv:2408.06072}, + year={2024} } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers}, diff --git a/README_ja.md b/README_ja.md index 142bf75..e817528 100644 --- a/README_ja.md +++ b/README_ja.md @@ -211,14 +211,11 @@ CogVideoのデモは [https://models.aminer.cn/cogvideo](https://models.aminer.c 🌟 私たちの仕事が役立つと思われた場合、ぜひスターを付けていただき、論文を引用してください。 ``` -@misc{yang2024cogvideoxtexttovideodiffusionmodels, - title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, - year={2024}, - eprint={2408.06072}, - archivePrefix={arXiv}, - primaryClass={cs.CV}, - url={https://arxiv.org/abs/2408.06072}, +@article{yang2024cogvideox, + title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, + author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others}, + journal={arXiv preprint arXiv:2408.06072}, + year={2024} } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers}, diff --git a/README_zh.md b/README_zh.md index 6fd4373..cf70471 100644 --- a/README_zh.md +++ b/README_zh.md @@ -188,14 +188,11 @@ CogVideo的demo网站在[https://models.aminer.cn/cogvideo](https://models.amine 🌟 如果您发现我们的工作有所帮助,欢迎引用我们的文章,留下宝贵的stars ``` -@misc{yang2024cogvideoxtexttovideodiffusionmodels, - title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, - author={Zhuoyi Yang and Jiayan Teng and Wendi Zheng and Ming Ding and Shiyu Huang and Jiazheng Xu and Yuanming Yang and Wenyi Hong and Xiaohan Zhang and Guanyu Feng and Da Yin and Xiaotao Gu and Yuxuan Zhang and Weihan Wang and Yean Cheng and Ting Liu and Bin Xu and Yuxiao Dong and Jie Tang}, - year={2024}, - eprint={2408.06072}, - archivePrefix={arXiv}, - primaryClass={cs.CV}, - url={https://arxiv.org/abs/2408.06072}, +@article{yang2024cogvideox, + title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer}, + author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others}, + journal={arXiv preprint arXiv:2408.06072}, + year={2024} } @article{hong2022cogvideo, title={CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers},