mirageco commited on
Commit
2d29ab2
1 Parent(s): 318071f

Apply 8 categories from FinBen paper

Browse files
Files changed (3) hide show
  1. app.py +118 -44
  2. src/about.py +35 -35
  3. src/display/utils.py +9 -16
app.py CHANGED
@@ -64,20 +64,29 @@ leaderboard_df = original_df.copy()
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
  columns_info: list,
67
- columns_eval: list,
68
- columns_metadata: list,
69
- columns_popularity: list,
70
- columns_revision: list,
 
 
 
 
 
71
  type_query: list,
72
  precision_query: list,
73
  size_query: list,
74
  show_deleted: bool,
75
  query: str,
76
  ):
 
 
 
 
 
 
77
  filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
78
  filtered_df = filter_queries(query, filtered_df)
79
- # Combine all column selections
80
- selected_columns = columns_info + columns_eval + columns_metadata + columns_popularity + columns_revision
81
  df = select_columns(filtered_df, selected_columns)
82
  return df
83
 
@@ -91,13 +100,18 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
91
  AutoEvalColumn.model_type_symbol.name,
92
  AutoEvalColumn.model.name,
93
  ]
 
 
 
 
94
  # We use COLS to maintain sorting
95
  filtered_df = df[
96
- always_here_cols + [c for c in COLS if c in df.columns and c in columns]
97
  ]
98
  return filtered_df
99
 
100
 
 
101
  def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
102
  final_df = []
103
  if query != "":
@@ -138,7 +152,7 @@ def filter_models(
138
  return filtered_df
139
 
140
  def uncheck_all():
141
- return [], [], [], [], []
142
 
143
  demo = gr.Blocks(css=custom_css)
144
  with demo:
@@ -164,32 +178,67 @@ with demo:
164
  label="Model Information",
165
  interactive=True,
166
  )
167
- with gr.Tab("Evaluation Scores"):
168
- shown_columns_eval = gr.CheckboxGroup(
169
- choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Evaluation Scores"],
170
- value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Evaluation Scores"],
171
- label="Evaluation Scores",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  interactive=True,
173
  )
174
- with gr.Tab("Model Metadata"):
175
- shown_columns_metadata = gr.CheckboxGroup(
176
- choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Model Metadata"],
177
- value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Model Metadata"],
178
- label="Model Metadata",
179
  interactive=True,
180
  )
181
- with gr.Tab("Popularity Metrics"):
182
- shown_columns_popularity = gr.CheckboxGroup(
183
- choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Popularity Metrics"],
184
- value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Popularity Metrics"],
185
- label="Popularity Metrics",
186
  interactive=True,
187
  )
188
- with gr.Tab("Revision and Availability"):
189
- shown_columns_revision = gr.CheckboxGroup(
190
- choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Revision and Availability"],
191
- value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Revision and Availability"],
192
- label="Revision and Availability",
 
 
 
 
 
 
 
193
  interactive=True,
194
  )
195
  with gr.Row():
@@ -199,10 +248,16 @@ with demo:
199
  inputs=[],
200
  outputs=[
201
  shown_columns_info,
202
- shown_columns_eval,
203
- shown_columns_metadata,
204
- shown_columns_popularity,
205
- shown_columns_revision
 
 
 
 
 
 
206
  ],
207
  )
208
  with gr.Row():
@@ -236,16 +291,17 @@ with demo:
236
  leaderboard_table = gr.Dataframe(
237
  value=leaderboard_df[
238
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
239
- + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default]
240
  ],
241
  headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
242
- + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
243
  datatype=TYPES,
244
  elem_id="leaderboard-table",
245
  interactive=False,
246
  visible=True,
247
  )
248
 
 
249
  # Dummy leaderboard for handling the case when the user uses backspace key
250
  hidden_leaderboard_table_for_search = gr.Dataframe(
251
  value=original_df[COLS],
@@ -258,10 +314,15 @@ with demo:
258
  inputs=[
259
  hidden_leaderboard_table_for_search,
260
  shown_columns_info,
261
- shown_columns_eval,
262
- shown_columns_metadata,
263
- shown_columns_popularity,
264
- shown_columns_revision,
 
 
 
 
 
265
  filter_columns_type,
266
  filter_columns_precision,
267
  filter_columns_size,
@@ -271,8 +332,16 @@ with demo:
271
  outputs=leaderboard_table,
272
  )
273
  for selector in [
274
- shown_columns_info, shown_columns_eval, shown_columns_metadata,
275
- shown_columns_popularity, shown_columns_revision,
 
 
 
 
 
 
 
 
276
  filter_columns_type, filter_columns_precision,
277
  filter_columns_size, deleted_models_visibility
278
  ]:
@@ -281,10 +350,15 @@ with demo:
281
  inputs=[
282
  hidden_leaderboard_table_for_search,
283
  shown_columns_info,
284
- shown_columns_eval,
285
- shown_columns_metadata,
286
- shown_columns_popularity,
287
- shown_columns_revision,
 
 
 
 
 
288
  filter_columns_type,
289
  filter_columns_precision,
290
  filter_columns_size,
 
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
  columns_info: list,
67
+ columns_IE: list,
68
+ columns_TA: list,
69
+ columns_QA: list,
70
+ columns_TG: list,
71
+ columns_RM: list,
72
+ columns_FO: list,
73
+ columns_DM: list,
74
+ columns_spanish: list,
75
+ columns_other: list,
76
  type_query: list,
77
  precision_query: list,
78
  size_query: list,
79
  show_deleted: bool,
80
  query: str,
81
  ):
82
+ # Combine all column selections
83
+ selected_columns = (
84
+ columns_info + columns_IE + columns_TA + columns_QA + columns_TG +
85
+ columns_RM + columns_FO + columns_DM + columns_spanish + columns_other
86
+ )
87
+ # Filter models based on queries
88
  filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
89
  filtered_df = filter_queries(query, filtered_df)
 
 
90
  df = select_columns(filtered_df, selected_columns)
91
  return df
92
 
 
100
  AutoEvalColumn.model_type_symbol.name,
101
  AutoEvalColumn.model.name,
102
  ]
103
+
104
+ # Ensure no duplicates when never_hidden and displayed_by_default are both True
105
+ unique_columns = set(always_here_cols + columns)
106
+
107
  # We use COLS to maintain sorting
108
  filtered_df = df[
109
+ [c for c in COLS if c in df.columns and c in unique_columns]
110
  ]
111
  return filtered_df
112
 
113
 
114
+
115
  def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
116
  final_df = []
117
  if query != "":
 
152
  return filtered_df
153
 
154
  def uncheck_all():
155
+ return [], [], [], [], [], [], [], [], [], []
156
 
157
  demo = gr.Blocks(css=custom_css)
158
  with demo:
 
178
  label="Model Information",
179
  interactive=True,
180
  )
181
+ with gr.Tab("Information Extraction (IE)"):
182
+ shown_columns_IE = gr.CheckboxGroup(
183
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Information Extraction (IE)"],
184
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Information Extraction (IE)"],
185
+ label="Information Extraction (IE)",
186
+ interactive=True,
187
+ )
188
+ with gr.Tab("Textual Analysis (TA)"):
189
+ shown_columns_TA = gr.CheckboxGroup(
190
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Textual Analysis (TA)"],
191
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Textual Analysis (TA)"],
192
+ label="Textual Analysis (TA)",
193
+ interactive=True,
194
+ )
195
+ with gr.Tab("Question Answering (QA)"):
196
+ shown_columns_QA = gr.CheckboxGroup(
197
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Question Answering (QA)"],
198
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Question Answering (QA)"],
199
+ label="Question Answering (QA)",
200
+ interactive=True,
201
+ )
202
+ with gr.Tab("Text Generation (TG)"):
203
+ shown_columns_TG = gr.CheckboxGroup(
204
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Text Generation (TG)"],
205
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Text Generation (TG)"],
206
+ label="Text Generation (TG)",
207
+ interactive=True,
208
+ )
209
+ with gr.Tab("Risk Management (RM)"):
210
+ shown_columns_RM = gr.CheckboxGroup(
211
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Risk Management (RM)"],
212
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Risk Management (RM)"],
213
+ label="Risk Management (RM)",
214
  interactive=True,
215
  )
216
+ with gr.Tab("Forecasting (FO)"):
217
+ shown_columns_FO = gr.CheckboxGroup(
218
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Forecasting (FO)"],
219
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Forecasting (FO)"],
220
+ label="Forecasting (FO)",
221
  interactive=True,
222
  )
223
+ with gr.Tab("Decision-Making (DM)"):
224
+ shown_columns_DM = gr.CheckboxGroup(
225
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Decision-Making (DM)"],
226
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Decision-Making (DM)"],
227
+ label="Decision-Making (DM)",
228
  interactive=True,
229
  )
230
+ with gr.Tab("Spanish"):
231
+ shown_columns_spanish = gr.CheckboxGroup(
232
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Spanish"],
233
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Spanish"],
234
+ label="Spanish",
235
+ interactive=True,
236
+ )
237
+ with gr.Tab("Other"):
238
+ shown_columns_other = gr.CheckboxGroup(
239
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Other"],
240
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Other"],
241
+ label="Other",
242
  interactive=True,
243
  )
244
  with gr.Row():
 
248
  inputs=[],
249
  outputs=[
250
  shown_columns_info,
251
+ shown_columns_IE,
252
+ shown_columns_TA,
253
+ shown_columns_QA,
254
+ shown_columns_TG,
255
+ shown_columns_RM,
256
+ shown_columns_FO,
257
+ shown_columns_DM,
258
+ shown_columns_spanish,
259
+ shown_columns_other,
260
+
261
  ],
262
  )
263
  with gr.Row():
 
291
  leaderboard_table = gr.Dataframe(
292
  value=leaderboard_df[
293
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
294
+ + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden]
295
  ],
296
  headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
297
+ + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden],
298
  datatype=TYPES,
299
  elem_id="leaderboard-table",
300
  interactive=False,
301
  visible=True,
302
  )
303
 
304
+
305
  # Dummy leaderboard for handling the case when the user uses backspace key
306
  hidden_leaderboard_table_for_search = gr.Dataframe(
307
  value=original_df[COLS],
 
314
  inputs=[
315
  hidden_leaderboard_table_for_search,
316
  shown_columns_info,
317
+ shown_columns_IE,
318
+ shown_columns_TA,
319
+ shown_columns_QA,
320
+ shown_columns_TG,
321
+ shown_columns_RM,
322
+ shown_columns_FO,
323
+ shown_columns_DM,
324
+ shown_columns_spanish,
325
+ shown_columns_other,
326
  filter_columns_type,
327
  filter_columns_precision,
328
  filter_columns_size,
 
332
  outputs=leaderboard_table,
333
  )
334
  for selector in [
335
+ shown_columns_info,
336
+ shown_columns_IE,
337
+ shown_columns_TA,
338
+ shown_columns_QA,
339
+ shown_columns_TG,
340
+ shown_columns_RM,
341
+ shown_columns_FO,
342
+ shown_columns_DM,
343
+ shown_columns_spanish,
344
+ shown_columns_other,
345
  filter_columns_type, filter_columns_precision,
346
  filter_columns_size, deleted_models_visibility
347
  ]:
 
350
  inputs=[
351
  hidden_leaderboard_table_for_search,
352
  shown_columns_info,
353
+ shown_columns_IE,
354
+ shown_columns_TA,
355
+ shown_columns_QA,
356
+ shown_columns_TG,
357
+ shown_columns_RM,
358
+ shown_columns_FO,
359
+ shown_columns_DM,
360
+ shown_columns_spanish,
361
+ shown_columns_other,
362
  filter_columns_type,
363
  filter_columns_precision,
364
  filter_columns_size,
src/about.py CHANGED
@@ -7,46 +7,46 @@ class Task:
7
  benchmark: str
8
  metric: str
9
  col_name: str
 
10
 
11
 
12
  # Select your tasks here
13
  # ---------------------------------------------------
14
  class Tasks(Enum):
15
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
16
- task0 = Task("FPB", "F1", "FPB")
17
- task2 = Task("FiQA-SA", "F1", "FiQA-SA")
18
- task3 = Task("TSA", "RMSE", "TSA")
19
- task4 = Task("Headlines", "AvgF1", "Headlines")
20
- task5 = Task("FOMC", "F1", "FOMC")
21
- task7 = Task("FinArg-ACC", "MicroF1", "FinArg-ACC")
22
- task8 = Task("FinArg-ARC", "MicroF1", "FinArg-ARC")
23
- task9 = Task("MultiFin", "MicroF1", "Multifin")
24
- task10 = Task("MA", "MicroF1", "MA")
25
- task11 = Task("MLESG", "MicroF1", "MLESG")
26
- task12 = Task("NER", "EntityF1", "NER")
27
- task13 = Task("FINER-ORD", "EntityF1", "FINER-ORD")
28
- task14 = Task("FinRED", "F1", "FinRED")
29
- task15 = Task("SC", "F1", "SC")
30
- task16 = Task("CD", "F1", "CD")
31
- task17 = Task("FinQA", "EmAcc", "FinQA")
32
- task18 = Task("TATQA", "EmAcc", "TATQA")
33
- task19 = Task("ConvFinQA", "EmAcc", "ConvFinQA")
34
- task20 = Task("FNXL", "EntityF1", "FNXL")
35
- task21 = Task("FSRL", "EntityF1", "FSRL")
36
- task22 = Task("EDTSUM", "Rouge-1", "EDTSUM")
37
- task25 = Task("ECTSUM", "Rouge-1", "ECTSUM")
38
- task28 = Task("BigData22", "Acc", "BigData22")
39
- task30 = Task("ACL18", "Acc", "ACL18")
40
- task32 = Task("CIKM18", "Acc", "CIKM18")
41
- task34 = Task("German", "MCC", "German")
42
- task36 = Task("Australian", "MCC", "Australian")
43
- task38 = Task("LendingClub", "MCC", "LendingClub")
44
- task40 = Task("ccf", "MCC", "ccf")
45
- task42 = Task("ccfraud", "MCC", "ccfraud")
46
- task44 = Task("polish", "MCC", "polish")
47
- task46 = Task("taiwan", "MCC", "taiwan")
48
- task48 = Task("portoseguro", "MCC", "portoseguro")
49
- task50 = Task("travelinsurance", "MCC", "travelinsurance")
50
 
51
 
52
  NUM_FEWSHOT = 0 # Change with your few shot
 
7
  benchmark: str
8
  metric: str
9
  col_name: str
10
+ category: str
11
 
12
 
13
  # Select your tasks here
14
  # ---------------------------------------------------
15
  class Tasks(Enum):
16
+ task0 = Task("FPB", "F1", "FPB", category="Spanish")
17
+ task2 = Task("FiQA-SA", "F1", "FiQA-SA", category="Textual Analysis (TA)")
18
+ task3 = Task("TSA", "RMSE", "TSA", category="Textual Analysis (TA)")
19
+ task4 = Task("Headlines", "AvgF1", "Headlines", category="Textual Analysis (TA)")
20
+ task5 = Task("FOMC", "F1", "FOMC", category="Forecasting (FO)")
21
+ task7 = Task("FinArg-ACC", "MicroF1", "FinArg-ACC", category="Textual Analysis (TA)")
22
+ task8 = Task("FinArg-ARC", "MicroF1", "FinArg-ARC", category="Textual Analysis (TA)")
23
+ task9 = Task("MultiFin", "MicroF1", "Multifin", category="Textual Analysis (TA)")
24
+ task10 = Task("MA", "MicroF1", "MA", category="Textual Analysis (TA)")
25
+ task11 = Task("MLESG", "MicroF1", "MLESG", category="Textual Analysis (TA)")
26
+ task12 = Task("NER", "EntityF1", "NER", category="Information Extraction (IE)")
27
+ task13 = Task("FINER-ORD", "EntityF1", "FINER-ORD", category="Information Extraction (IE)")
28
+ task14 = Task("FinRED", "F1", "FinRED", category="Information Extraction (IE)")
29
+ task15 = Task("SC", "F1", "SC", category="Spanish")
30
+ task16 = Task("CD", "F1", "CD", category="Spanish")
31
+ task17 = Task("FinQA", "EmAcc", "FinQA", category="Question Answering (QA)")
32
+ task18 = Task("TATQA", "EmAcc", "TATQA", category="Question Answering (QA)")
33
+ task19 = Task("ConvFinQA", "EmAcc", "ConvFinQA", category="Question Answering (QA)")
34
+ task20 = Task("FNXL", "EntityF1", "FNXL", category="Information Extraction (IE)")
35
+ task21 = Task("FSRL", "EntityF1", "FSRL", category="Information Extraction (IE)")
36
+ task22 = Task("EDTSUM", "Rouge-1", "EDTSUM", category="Text Generation (TG)")
37
+ task25 = Task("ECTSUM", "Rouge-1", "ECTSUM", category="Text Generation (TG)")
38
+ task28 = Task("BigData22", "Acc", "BigData22", category="Risk Management (RM)")
39
+ task30 = Task("ACL18", "Acc", "ACL18", category="Decision-Making (DM)")
40
+ task32 = Task("CIKM18", "Acc", "CIKM18", category="Decision-Making (DM)")
41
+ task34 = Task("German", "MCC", "German", category="Decision-Making (DM)")
42
+ task36 = Task("Australian", "MCC", "Australian", category="Decision-Making (DM)")
43
+ task38 = Task("LendingClub", "MCC", "LendingClub", category="Risk Management (RM)")
44
+ task40 = Task("ccf", "MCC", "ccf", category="Risk Management (RM)")
45
+ task42 = Task("ccfraud", "MCC", "ccfraud", category="Risk Management (RM)")
46
+ task44 = Task("polish", "MCC", "polish", category="Risk Management (RM)")
47
+ task46 = Task("taiwan", "MCC", "taiwan", category="Risk Management (RM)")
48
+ task48 = Task("portoseguro", "MCC", "portoseguro", category="Risk Management (RM)")
49
+ task50 = Task("travelinsurance", "MCC", "travelinsurance", category="Risk Management (RM)")
 
50
 
51
 
52
  NUM_FEWSHOT = 0 # Change with your few shot
src/display/utils.py CHANGED
@@ -27,26 +27,19 @@ auto_eval_column_dict = []
27
  # Model Information
28
  auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, category="Model Information", never_hidden=True)])
29
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, category="Model Information", never_hidden=True)])
 
30
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False, category="Model Information")])
31
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False, category="Model Information")])
 
 
 
 
 
 
 
32
 
33
- # Evaluation Scores
34
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True, category="Evaluation Scores")])
35
  for task in Tasks:
36
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True, category="Evaluation Scores")])
37
-
38
- # Model Metadata
39
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, category="Model Metadata", hidden=True)])
40
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False, category="Model Metadata")])
41
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False, category="Model Metadata")])
42
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False, category="Model Metadata")])
43
-
44
- # Popularity Metrics
45
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False, category="Popularity Metrics")])
46
-
47
- # Revision and Availability
48
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False, category="Revision and Availability")])
49
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, category="Revision and Availability", hidden=False)])
50
 
51
  # We use make_dataclass to dynamically fill the scores from Tasks
52
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
27
  # Model Information
28
  auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, category="Model Information", never_hidden=True)])
29
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, category="Model Information", never_hidden=True)])
30
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True, category="Model Information")])
31
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False, category="Model Information")])
32
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False, category="Model Information")])
33
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, category="Model Information", hidden=True)])
34
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False, category="Model Information")])
35
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False, category="Model Information")])
36
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False, category="Model Information")])
37
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False, category="Model Information")])
38
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False, category="Model Information")])
39
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, category="Model Information", hidden=False)])
40
 
 
 
41
  for task in Tasks:
42
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True, category=task.value.category)])
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  # We use make_dataclass to dynamically fill the scores from Tasks
45
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)