Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Alina Lozowski
commited on
Commit
•
5a5efd9
1
Parent(s):
e79731f
Rename MAINTAINERS_HIGHLIGHT to OFFICIAL_PROVIDERS
Browse files- app.py +1 -1
- src/display/utils.py +4 -4
- src/envs.py +1 -1
- src/leaderboard/filter_models.py +2 -2
app.py
CHANGED
@@ -215,7 +215,7 @@ def init_leaderboard(dataframe):
|
|
215 |
),
|
216 |
ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False),
|
217 |
ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True),
|
218 |
-
ColumnFilter(AutoEvalColumn.
|
219 |
],
|
220 |
bool_checkboxgroup_label="Hide models",
|
221 |
interactive=False,
|
|
|
215 |
),
|
216 |
ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False),
|
217 |
ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True),
|
218 |
+
ColumnFilter(AutoEvalColumn.official_providers.name, type="boolean", label="Show only official providers", default=False),
|
219 |
],
|
220 |
bool_checkboxgroup_label="Hide models",
|
221 |
interactive=False,
|
src/display/utils.py
CHANGED
@@ -6,13 +6,13 @@ import logging
|
|
6 |
from datetime import datetime
|
7 |
import pandas as pd
|
8 |
|
9 |
-
from src.envs import
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
13 |
|
14 |
-
dataset = load_dataset(
|
15 |
-
|
16 |
|
17 |
# Convert ISO 8601 dates to datetime objects for comparison
|
18 |
def parse_iso8601_datetime(date_str):
|
@@ -120,7 +120,7 @@ auto_eval_column_dict.append(["submission_date", ColumnContent, ColumnContent("S
|
|
120 |
auto_eval_column_dict.append(["upload_to_hub", ColumnContent, ColumnContent("Upload To Hub Date", "bool", False, hidden=False)])
|
121 |
|
122 |
auto_eval_column_dict.append(["use_chat_template", ColumnContent, ColumnContent("Chat Template", "bool", False)])
|
123 |
-
auto_eval_column_dict.append(["
|
124 |
|
125 |
# fullname structure: <user>/<model_name>
|
126 |
auto_eval_column_dict.append(["fullname", ColumnContent, ColumnContent("fullname", "str", False, dummy=True)])
|
|
|
6 |
from datetime import datetime
|
7 |
import pandas as pd
|
8 |
|
9 |
+
from src.envs import OFFICIAL_PROVIDERS_REPO
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
13 |
|
14 |
+
dataset = load_dataset(OFFICIAL_PROVIDERS_REPO)
|
15 |
+
official_providers = dataset["train"][0]["CURATED_SET"]
|
16 |
|
17 |
# Convert ISO 8601 dates to datetime objects for comparison
|
18 |
def parse_iso8601_datetime(date_str):
|
|
|
120 |
auto_eval_column_dict.append(["upload_to_hub", ColumnContent, ColumnContent("Upload To Hub Date", "bool", False, hidden=False)])
|
121 |
|
122 |
auto_eval_column_dict.append(["use_chat_template", ColumnContent, ColumnContent("Chat Template", "bool", False)])
|
123 |
+
auto_eval_column_dict.append(["official_providers", ColumnContent, ColumnContent("Official Providers", "bool", False, hidden=True)])
|
124 |
|
125 |
# fullname structure: <user>/<model_name>
|
126 |
auto_eval_column_dict.append(["fullname", ColumnContent, ColumnContent("fullname", "str", False, dummy=True)])
|
src/envs.py
CHANGED
@@ -8,7 +8,7 @@ REPO_ID = "open-llm-leaderboard/open_llm_leaderboard"
|
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
9 |
AGGREGATED_REPO = "open-llm-leaderboard/contents"
|
10 |
VOTES_REPO = "open-llm-leaderboard/votes"
|
11 |
-
|
12 |
|
13 |
HF_HOME = os.getenv("HF_HOME", ".")
|
14 |
|
|
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
9 |
AGGREGATED_REPO = "open-llm-leaderboard/contents"
|
10 |
VOTES_REPO = "open-llm-leaderboard/votes"
|
11 |
+
OFFICIAL_PROVIDERS_REPO = "open-llm-leaderboard/official-providers"
|
12 |
|
13 |
HF_HOME = os.getenv("HF_HOME", ".")
|
14 |
|
src/leaderboard/filter_models.py
CHANGED
@@ -19,8 +19,8 @@ DO_NOT_SUBMIT_MODELS = [
|
|
19 |
def flag_models(leaderboard_data: list[dict]):
|
20 |
"""Flags models based on external criteria or flagged status."""
|
21 |
for model_data in leaderboard_data:
|
22 |
-
# Skip flagging if
|
23 |
-
if model_data.get(AutoEvalColumn.
|
24 |
model_data[AutoEvalColumn.not_flagged.name] = True
|
25 |
continue
|
26 |
|
|
|
19 |
def flag_models(leaderboard_data: list[dict]):
|
20 |
"""Flags models based on external criteria or flagged status."""
|
21 |
for model_data in leaderboard_data:
|
22 |
+
# Skip flagging if official providers is True
|
23 |
+
if model_data.get(AutoEvalColumn.official_providers.name, False):
|
24 |
model_data[AutoEvalColumn.not_flagged.name] = True
|
25 |
continue
|
26 |
|