Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Clémentine
commited on
Commit
•
5e9f7f7
1
Parent(s):
5822157
date
Browse files
app.py
CHANGED
@@ -42,6 +42,7 @@ def get_dataframe_from_results(eval_results, split):
|
|
42 |
local_df = local_df.rename_column("score", "Average score (%)")
|
43 |
for i in [1, 2, 3]:
|
44 |
local_df = local_df.rename_column(f"score_level{i}", f"Level {i} score (%)")
|
|
|
45 |
df = pd.DataFrame(local_df)
|
46 |
df = df.sort_values(by=["Average score (%)"], ascending=False)
|
47 |
|
@@ -63,7 +64,7 @@ gold_results = {split: {row["task_id"]: row for row in gold_dataset[split]} for
|
|
63 |
def restart_space():
|
64 |
api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
|
65 |
|
66 |
-
TYPES = ["markdown", "number", "number", "number", "number", "str", "str"]
|
67 |
|
68 |
def add_new_eval(
|
69 |
val_or_test: str,
|
@@ -155,6 +156,7 @@ def add_new_eval(
|
|
155 |
"score_level1": scores[1]/num_questions[1],
|
156 |
"score_level2": scores[2]/num_questions[2],
|
157 |
"score_level3": scores[3]/num_questions[3],
|
|
|
158 |
}
|
159 |
eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
|
160 |
print(eval_results)
|
|
|
42 |
local_df = local_df.rename_column("score", "Average score (%)")
|
43 |
for i in [1, 2, 3]:
|
44 |
local_df = local_df.rename_column(f"score_level{i}", f"Level {i} score (%)")
|
45 |
+
local_df = local_df.rename_column("date", "Submission date")
|
46 |
df = pd.DataFrame(local_df)
|
47 |
df = df.sort_values(by=["Average score (%)"], ascending=False)
|
48 |
|
|
|
64 |
def restart_space():
|
65 |
api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
|
66 |
|
67 |
+
TYPES = ["markdown", "number", "number", "number", "number", "str", "str", "str"]
|
68 |
|
69 |
def add_new_eval(
|
70 |
val_or_test: str,
|
|
|
156 |
"score_level1": scores[1]/num_questions[1],
|
157 |
"score_level2": scores[2]/num_questions[2],
|
158 |
"score_level3": scores[3]/num_questions[3],
|
159 |
+
"date": datetime.datetime.today().strftime('%Y-%m-%d')
|
160 |
}
|
161 |
eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
|
162 |
print(eval_results)
|