mirageco commited on
Commit
318071f
1 Parent(s): 75fb8ba

Move buttoms under select columns window into categories

Browse files
Files changed (2) hide show
  1. app.py +87 -31
  2. src/display/utils.py +26 -18
app.py CHANGED
@@ -63,16 +63,22 @@ leaderboard_df = original_df.copy()
63
  # Searching and filtering
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
- columns: list,
 
 
 
 
67
  type_query: list,
68
- precision_query: str,
69
  size_query: list,
70
  show_deleted: bool,
71
  query: str,
72
  ):
73
  filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
74
  filtered_df = filter_queries(query, filtered_df)
75
- df = select_columns(filtered_df, columns)
 
 
76
  return df
77
 
78
 
@@ -131,6 +137,8 @@ def filter_models(
131
 
132
  return filtered_df
133
 
 
 
134
 
135
  demo = gr.Blocks(css=custom_css)
136
  with demo:
@@ -148,20 +156,54 @@ with demo:
148
  elem_id="search-bar",
149
  )
150
  with gr.Row():
151
- shown_columns = gr.CheckboxGroup(
152
- choices=[
153
- c.name
154
- for c in fields(AutoEvalColumn)
155
- if not c.hidden and not c.never_hidden
156
- ],
157
- value=[
158
- c.name
159
- for c in fields(AutoEvalColumn)
160
- if c.displayed_by_default and not c.hidden and not c.never_hidden
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  ],
162
- label="Select columns to show",
163
- elem_id="column-select",
164
- interactive=True,
165
  )
166
  with gr.Row():
167
  deleted_models_visibility = gr.Checkbox(
@@ -191,12 +233,13 @@ with demo:
191
  elem_id="filter-columns-size",
192
  )
193
 
194
- leaderboard_table = gr.components.Dataframe(
195
  value=leaderboard_df[
196
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
197
- + shown_columns.value
198
  ],
199
- headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
 
200
  datatype=TYPES,
201
  elem_id="leaderboard-table",
202
  interactive=False,
@@ -204,7 +247,7 @@ with demo:
204
  )
205
 
206
  # Dummy leaderboard for handling the case when the user uses backspace key
207
- hidden_leaderboard_table_for_search = gr.components.Dataframe(
208
  value=original_df[COLS],
209
  headers=COLS,
210
  datatype=TYPES,
@@ -212,30 +255,43 @@ with demo:
212
  )
213
  search_bar.submit(
214
  update_table,
215
- [
216
  hidden_leaderboard_table_for_search,
217
- shown_columns,
 
 
 
 
218
  filter_columns_type,
219
  filter_columns_precision,
220
  filter_columns_size,
221
  deleted_models_visibility,
222
  search_bar,
223
  ],
224
- leaderboard_table,
225
  )
226
- for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
 
 
 
 
 
227
  selector.change(
228
  update_table,
229
- [
230
  hidden_leaderboard_table_for_search,
231
- shown_columns,
 
 
 
 
232
  filter_columns_type,
233
  filter_columns_precision,
234
  filter_columns_size,
235
  deleted_models_visibility,
236
  search_bar,
237
  ],
238
- leaderboard_table,
239
  queue=True,
240
  )
241
 
@@ -253,7 +309,7 @@ with demo:
253
  open=False,
254
  ):
255
  with gr.Row():
256
- finished_eval_table = gr.components.Dataframe(
257
  value=finished_eval_queue_df,
258
  headers=EVAL_COLS,
259
  datatype=EVAL_TYPES,
@@ -264,7 +320,7 @@ with demo:
264
  open=False,
265
  ):
266
  with gr.Row():
267
- running_eval_table = gr.components.Dataframe(
268
  value=running_eval_queue_df,
269
  headers=EVAL_COLS,
270
  datatype=EVAL_TYPES,
@@ -276,7 +332,7 @@ with demo:
276
  open=False,
277
  ):
278
  with gr.Row():
279
- pending_eval_table = gr.components.Dataframe(
280
  value=pending_eval_queue_df,
281
  headers=EVAL_COLS,
282
  datatype=EVAL_TYPES,
@@ -342,4 +398,4 @@ with demo:
342
  scheduler = BackgroundScheduler()
343
  scheduler.add_job(restart_space, "interval", seconds=1800)
344
  scheduler.start()
345
- demo.queue(default_concurrency_limit=40).launch()
 
63
  # Searching and filtering
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
+ columns_info: list,
67
+ columns_eval: list,
68
+ columns_metadata: list,
69
+ columns_popularity: list,
70
+ columns_revision: list,
71
  type_query: list,
72
+ precision_query: list,
73
  size_query: list,
74
  show_deleted: bool,
75
  query: str,
76
  ):
77
  filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
78
  filtered_df = filter_queries(query, filtered_df)
79
+ # Combine all column selections
80
+ selected_columns = columns_info + columns_eval + columns_metadata + columns_popularity + columns_revision
81
+ df = select_columns(filtered_df, selected_columns)
82
  return df
83
 
84
 
 
137
 
138
  return filtered_df
139
 
140
+ def uncheck_all():
141
+ return [], [], [], [], []
142
 
143
  demo = gr.Blocks(css=custom_css)
144
  with demo:
 
156
  elem_id="search-bar",
157
  )
158
  with gr.Row():
159
+ with gr.Accordion("Select columns to show"):
160
+ with gr.Tab("Model Information"):
161
+ shown_columns_info = gr.CheckboxGroup(
162
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Model Information"],
163
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Model Information"],
164
+ label="Model Information",
165
+ interactive=True,
166
+ )
167
+ with gr.Tab("Evaluation Scores"):
168
+ shown_columns_eval = gr.CheckboxGroup(
169
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Evaluation Scores"],
170
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Evaluation Scores"],
171
+ label="Evaluation Scores",
172
+ interactive=True,
173
+ )
174
+ with gr.Tab("Model Metadata"):
175
+ shown_columns_metadata = gr.CheckboxGroup(
176
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Model Metadata"],
177
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Model Metadata"],
178
+ label="Model Metadata",
179
+ interactive=True,
180
+ )
181
+ with gr.Tab("Popularity Metrics"):
182
+ shown_columns_popularity = gr.CheckboxGroup(
183
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Popularity Metrics"],
184
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Popularity Metrics"],
185
+ label="Popularity Metrics",
186
+ interactive=True,
187
+ )
188
+ with gr.Tab("Revision and Availability"):
189
+ shown_columns_revision = gr.CheckboxGroup(
190
+ choices=[c.name for c in fields(AutoEvalColumn) if c.category == "Revision and Availability"],
191
+ value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.category == "Revision and Availability"],
192
+ label="Revision and Availability",
193
+ interactive=True,
194
+ )
195
+ with gr.Row():
196
+ uncheck_all_button = gr.Button("Uncheck All")
197
+ uncheck_all_button.click(
198
+ uncheck_all,
199
+ inputs=[],
200
+ outputs=[
201
+ shown_columns_info,
202
+ shown_columns_eval,
203
+ shown_columns_metadata,
204
+ shown_columns_popularity,
205
+ shown_columns_revision
206
  ],
 
 
 
207
  )
208
  with gr.Row():
209
  deleted_models_visibility = gr.Checkbox(
 
233
  elem_id="filter-columns-size",
234
  )
235
 
236
+ leaderboard_table = gr.Dataframe(
237
  value=leaderboard_df[
238
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
239
+ + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default]
240
  ],
241
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
242
+ + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
243
  datatype=TYPES,
244
  elem_id="leaderboard-table",
245
  interactive=False,
 
247
  )
248
 
249
  # Dummy leaderboard for handling the case when the user uses backspace key
250
+ hidden_leaderboard_table_for_search = gr.Dataframe(
251
  value=original_df[COLS],
252
  headers=COLS,
253
  datatype=TYPES,
 
255
  )
256
  search_bar.submit(
257
  update_table,
258
+ inputs=[
259
  hidden_leaderboard_table_for_search,
260
+ shown_columns_info,
261
+ shown_columns_eval,
262
+ shown_columns_metadata,
263
+ shown_columns_popularity,
264
+ shown_columns_revision,
265
  filter_columns_type,
266
  filter_columns_precision,
267
  filter_columns_size,
268
  deleted_models_visibility,
269
  search_bar,
270
  ],
271
+ outputs=leaderboard_table,
272
  )
273
+ for selector in [
274
+ shown_columns_info, shown_columns_eval, shown_columns_metadata,
275
+ shown_columns_popularity, shown_columns_revision,
276
+ filter_columns_type, filter_columns_precision,
277
+ filter_columns_size, deleted_models_visibility
278
+ ]:
279
  selector.change(
280
  update_table,
281
+ inputs=[
282
  hidden_leaderboard_table_for_search,
283
+ shown_columns_info,
284
+ shown_columns_eval,
285
+ shown_columns_metadata,
286
+ shown_columns_popularity,
287
+ shown_columns_revision,
288
  filter_columns_type,
289
  filter_columns_precision,
290
  filter_columns_size,
291
  deleted_models_visibility,
292
  search_bar,
293
  ],
294
+ outputs=leaderboard_table,
295
  queue=True,
296
  )
297
 
 
309
  open=False,
310
  ):
311
  with gr.Row():
312
+ finished_eval_table = gr.Dataframe(
313
  value=finished_eval_queue_df,
314
  headers=EVAL_COLS,
315
  datatype=EVAL_TYPES,
 
320
  open=False,
321
  ):
322
  with gr.Row():
323
+ running_eval_table = gr.Dataframe(
324
  value=running_eval_queue_df,
325
  headers=EVAL_COLS,
326
  datatype=EVAL_TYPES,
 
332
  open=False,
333
  ):
334
  with gr.Row():
335
+ pending_eval_table = gr.Dataframe(
336
  value=pending_eval_queue_df,
337
  headers=EVAL_COLS,
338
  datatype=EVAL_TYPES,
 
398
  scheduler = BackgroundScheduler()
399
  scheduler.add_job(restart_space, "interval", seconds=1800)
400
  scheduler.start()
401
+ demo.queue(default_concurrency_limit=40).launch()
src/display/utils.py CHANGED
@@ -17,30 +17,38 @@ class ColumnContent:
17
  name: str
18
  type: str
19
  displayed_by_default: bool
 
20
  hidden: bool = False
21
  never_hidden: bool = False
22
 
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
- # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
 
 
 
 
30
  for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
-
43
- # We use make dataclass to dynamically fill the scores from Tasks
 
 
 
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
 
46
  ## For the queue columns in the submission tab
 
17
  name: str
18
  type: str
19
  displayed_by_default: bool
20
+ category: str = "" # New attribute to hold the category
21
  hidden: bool = False
22
  never_hidden: bool = False
23
 
24
  ## Leaderboard columns
25
  auto_eval_column_dict = []
26
+
27
+ # Model Information
28
+ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, category="Model Information", never_hidden=True)])
29
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, category="Model Information", never_hidden=True)])
30
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False, category="Model Information")])
31
+ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False, category="Model Information")])
32
+
33
+ # Evaluation Scores
34
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True, category="Evaluation Scores")])
35
  for task in Tasks:
36
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True, category="Evaluation Scores")])
37
+
38
+ # Model Metadata
39
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, category="Model Metadata", hidden=True)])
40
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False, category="Model Metadata")])
41
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False, category="Model Metadata")])
42
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False, category="Model Metadata")])
43
+
44
+ # Popularity Metrics
45
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False, category="Popularity Metrics")])
46
+
47
+ # Revision and Availability
48
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False, category="Revision and Availability")])
49
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, category="Revision and Availability", hidden=False)])
50
+
51
+ # We use make_dataclass to dynamically fill the scores from Tasks
52
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
53
 
54
  ## For the queue columns in the submission tab