BestWishYsh commited on
Commit
efce5a0
·
verified ·
1 Parent(s): 542066c

Update constants.py

Browse files
Files changed (1) hide show
  1. constants.py +16 -6
constants.py CHANGED
@@ -70,20 +70,30 @@ LEADERBORAD_INTRODUCTION = """
70
 
71
  SUBMIT_INTRODUCTION = """# Submission Guidelines
72
  1. Fill in *'Model Name'* if it is your first time to submit your result **or** Fill in *'Revision Model Name'* if you want to update your result.
73
- 2. Fill in your home page to *'Model Link'*.
74
  3. After evaluation, follow the guidance in the [github repository](https://github.com/PKU-YuanGroup/OpenS2V-Nexus) to obtain `model_name.json` and upload it here.
75
  4. Click the *'Submit Eval'* button.
76
  5. Click *'Refresh'* to obtain the updated leaderboard.
77
  """
78
 
79
- TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
80
- We use Aesthetic, Motion, FaceSim, GmeScore, NexusScore, and NaturalScore as the primary evaluation metric for each tasks.
 
 
 
81
  """
82
 
83
- TABLE_INTRODUCTION_HUMAN = """In the table below, we summarize each task performance of all the models.
84
- We use Aesthetic, Motion, FaceSim, GmeScore, and NaturalScore as the primary evaluation metric for each tasks.
 
 
 
85
  """
86
 
87
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
88
- CITATION_BUTTON_TEXT = r"""@article{
 
 
 
 
89
  }"""
 
70
 
71
  SUBMIT_INTRODUCTION = """# Submission Guidelines
72
  1. Fill in *'Model Name'* if it is your first time to submit your result **or** Fill in *'Revision Model Name'* if you want to update your result.
73
+ 2. Fill in your home page to *'Model Link'* and your team name to *'Your Team Name'*.
74
  3. After evaluation, follow the guidance in the [github repository](https://github.com/PKU-YuanGroup/OpenS2V-Nexus) to obtain `model_name.json` and upload it here.
75
  4. Click the *'Submit Eval'* button.
76
  5. Click *'Refresh'* to obtain the updated leaderboard.
77
  """
78
 
79
+ TABLE_INTRODUCTION = """In the table below, we use six dimensions as the primary evaluation metrics for each task.
80
+ 1. Visual Quality: Aesthetics.
81
+ 2. Motion Amplitude: Motion.
82
+ 3. Text Relevance: GmeScore.
83
+ 4. Subject Consistency: FaceSim and NexusScore.
84
  """
85
 
86
+ TABLE_INTRODUCTION_HUMAN = """In the table below, we use six dimensions as the primary evaluation metrics for each task.
87
+ 1. Visual Quality: Aesthetics.
88
+ 2. Motion Amplitude: Motion.
89
+ 3. Text Relevance: GmeScore.
90
+ 4. Subject Consistency: FaceSim.
91
  """
92
 
93
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
94
+ CITATION_BUTTON_TEXT = r"""@article{yuan2025opens2v,
95
+ title={OpenS2V-Nexus: A Detailed Benchmark and Million-Scale Dataset for Subject-to-Video Generation},
96
+ author={Yuan, Shenghai and He, Xianyi and Deng, Yufan and Ye, Yang and Huang, Jinfa and Ma, Chongyang and Luo, Jiebo and Yuan, Li},
97
+ journal={arXiv preprint arXiv:2505.20292},
98
+ year={2025}
99
  }"""