ynhe commited on
Commit
6198401
·
verified ·
1 Parent(s): 2f6f4bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -24
app.py CHANGED
@@ -11,12 +11,7 @@ from datasets import config
11
  hf_token = os.environ['hf_token'] # 确保环境变量中有你的令牌
12
 
13
  local_dir = "VBench_sampled_video" # 本地文件夹路径
14
- # dataset = load_dataset("Vchitect/VBench_sampled_video")
15
- # print(os.listdir("~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"))
16
- # root = "~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"
17
- # print(config.HF_DATASETS_CACHE)
18
- # root = config.HF_DATASETS_CACHE
19
- # print(root)
20
  def print_directory_contents(path, indent=0):
21
  # 打印当前目录的内容
22
  try:
@@ -32,10 +27,10 @@ def print_directory_contents(path, indent=0):
32
  os.makedirs(local_dir, exist_ok=True)
33
  hf_api = HfApi(endpoint="https://huggingface.co", token=hf_token)
34
  hf_api = HfApi(token=hf_token)
35
- repo_id = "Vchitect/VBench_sampled_video"
36
 
37
  model_names=[]
38
- for i in hf_api.list_repo_tree('Vchitect/VBench_sampled_video',repo_type='dataset'):
39
  model_name = i.path
40
  if '.git' not in model_name and '.md' not in model_name:
41
  model_names.append(model_name)
@@ -51,8 +46,24 @@ with open("videos_by_dimension.json") as f:
51
  # with open("all_videos.json") as f:
52
  # all_videos = json.load(f)
53
 
54
- types = ['appearance_style', 'color', 'temporal_style', 'spatial_relationship', 'temporal_flickering', 'scene', 'multiple_objects', 'object_class', 'human_action', 'overall_consistency', 'subject_consistency']
55
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def get_video_path_local(model_name, type, prompt):
57
  if 'Show-1' in model_name:
58
  video_path_subfolder = os.path.join(model_name, type, 'super2')
@@ -61,8 +72,8 @@ def get_video_path_local(model_name, type, prompt):
61
  else:
62
  video_path_subfolder = os.path.join(model_name, type)
63
 
64
- if model_name == 'cogvideo':
65
- prompt = prompt.replace(".mp4",".gif")
66
 
67
  try:
68
  return hf_api.hf_hub_download(
@@ -121,15 +132,15 @@ def record_user_feedback_a(model_name1, model_name2, type, prompt):
121
  # 0 means model A better, 1 means model B better, -1 means tie;
122
  hf_api.hf_hub_download(
123
  repo_id = "Vchitect/VBench_human_annotation",
124
- filename = "arena_feedback.csv",
125
  repo_type = "dataset",
126
  local_dir = './'
127
  )
128
- with open("arena_feedback.csv",'a') as f:
129
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{0}\n")
130
  hf_api.upload_file(
131
- path_or_fileobj="arena_feedback.csv",
132
- path_in_repo="arena_feedback.csv",
133
  repo_id="Vchitect/VBench_human_annotation",
134
  token=hf_token,
135
  repo_type="dataset",
@@ -141,15 +152,15 @@ def record_user_feedback_b(model_name1, model_name2, type, prompt):
141
  # 0 means model A better, 1 means model B better , -1 means tie;
142
  hf_api.hf_hub_download(
143
  repo_id = "Vchitect/VBench_human_annotation",
144
- filename = "arena_feedback.csv",
145
  repo_type = "dataset",
146
  local_dir = './'
147
  )
148
- with open("arena_feedback.csv",'a') as f:
149
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{1}\n")
150
  hf_api.upload_file(
151
- path_or_fileobj="arena_feedback.csv",
152
- path_in_repo="arena_feedback.csv",
153
  repo_id="Vchitect/VBench_human_annotation",
154
  token=hf_token,
155
  repo_type="dataset",
@@ -161,15 +172,15 @@ def record_user_feedback_tie(model_name1, model_name2, type, prompt):
161
  # 0 means model A better, 1 means model B better , -1 means tie;
162
  hf_api.hf_hub_download(
163
  repo_id = "Vchitect/VBench_human_annotation",
164
- filename = "arena_feedback.csv",
165
  repo_type = "dataset",
166
  local_dir = './'
167
  )
168
- with open("arena_feedback.csv",'a') as f:
169
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{-1}\n")
170
  hf_api.upload_file(
171
- path_or_fileobj="arena_feedback.csv",
172
- path_in_repo="arena_feedback.csv",
173
  repo_id="Vchitect/VBench_human_annotation",
174
  token=hf_token,
175
  repo_type="dataset",
 
11
  hf_token = os.environ['hf_token'] # 确保环境变量中有你的令牌
12
 
13
  local_dir = "VBench_sampled_video" # 本地文件夹路径
14
+
 
 
 
 
 
15
  def print_directory_contents(path, indent=0):
16
  # 打印当前目录的内容
17
  try:
 
27
  os.makedirs(local_dir, exist_ok=True)
28
  hf_api = HfApi(endpoint="https://huggingface.co", token=hf_token)
29
  hf_api = HfApi(token=hf_token)
30
+ repo_id = "Vchitect/VBench-2.0_sampled_videos"
31
 
32
  model_names=[]
33
+ for i in hf_api.list_repo_tree('Vchitect/VBench-2.0_sampled_videos',repo_type='dataset'):
34
  model_name = i.path
35
  if '.git' not in model_name and '.md' not in model_name:
36
  model_names.append(model_name)
 
46
  # with open("all_videos.json") as f:
47
  # all_videos = json.load(f)
48
 
49
+ types =['Material',
50
+ 'Human_Anatomy',
51
+ 'Complex_Plot',
52
+ 'Multi-View_Consistency',
53
+ 'Motion_Rationality',
54
+ 'Human_Clothes',
55
+ 'Instance_Preservation',
56
+ 'Motion_Order_Understanding',
57
+ 'Dynamic_Attribute',
58
+ 'Dynamic_Spatial_Relationship',
59
+ 'Thermotics',
60
+ 'Human_Interaction',
61
+ 'Camera_Motion',
62
+ 'Diversity',
63
+ 'Composition',
64
+ 'Complex_Landscape',
65
+ 'Human_Identity',
66
+ 'Mechanics']
67
  def get_video_path_local(model_name, type, prompt):
68
  if 'Show-1' in model_name:
69
  video_path_subfolder = os.path.join(model_name, type, 'super2')
 
72
  else:
73
  video_path_subfolder = os.path.join(model_name, type)
74
 
75
+ # if model_name == 'cogvideo':
76
+ # prompt = prompt.replace(".mp4",".gif")
77
 
78
  try:
79
  return hf_api.hf_hub_download(
 
132
  # 0 means model A better, 1 means model B better, -1 means tie;
133
  hf_api.hf_hub_download(
134
  repo_id = "Vchitect/VBench_human_annotation",
135
+ filename = "VBench2_arena_feedback.csv",
136
  repo_type = "dataset",
137
  local_dir = './'
138
  )
139
+ with open("VBench2_arena_feedback.csv",'a') as f:
140
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{0}\n")
141
  hf_api.upload_file(
142
+ path_or_fileobj="VBench2_arena_feedback.csv",
143
+ path_in_repo="VBench2_arena_feedback.csv",
144
  repo_id="Vchitect/VBench_human_annotation",
145
  token=hf_token,
146
  repo_type="dataset",
 
152
  # 0 means model A better, 1 means model B better , -1 means tie;
153
  hf_api.hf_hub_download(
154
  repo_id = "Vchitect/VBench_human_annotation",
155
+ filename = "VBench2_arena_feedback.csv",
156
  repo_type = "dataset",
157
  local_dir = './'
158
  )
159
+ with open("VBench2_arena_feedback.csv",'a') as f:
160
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{1}\n")
161
  hf_api.upload_file(
162
+ path_or_fileobj="VBench2_arena_feedback.csv",
163
+ path_in_repo="VBench2_arena_feedback.csv",
164
  repo_id="Vchitect/VBench_human_annotation",
165
  token=hf_token,
166
  repo_type="dataset",
 
172
  # 0 means model A better, 1 means model B better , -1 means tie;
173
  hf_api.hf_hub_download(
174
  repo_id = "Vchitect/VBench_human_annotation",
175
+ filename = "VBench2_arena_feedback.csv",
176
  repo_type = "dataset",
177
  local_dir = './'
178
  )
179
+ with open("VBench2_arena_feedback.csv",'a') as f:
180
  f.write(f"{model_name1}\t{model_name2}\t{type}\t{prompt}\t{-1}\n")
181
  hf_api.upload_file(
182
+ path_or_fileobj="VBench2_arena_feedback.csv",
183
+ path_in_repo="VBench2_arena_feedback.csv",
184
  repo_id="Vchitect/VBench_human_annotation",
185
  token=hf_token,
186
  repo_type="dataset",