Spaces:
Running
Running
sanjeev-kumar-4
commited on
Commit
•
7d08741
1
Parent(s):
12fdafc
Enabled 'Embedding Dimensions' and 'Max Tokens' in all the tabs across task and language
Browse files- refresh.py +7 -7
refresh.py
CHANGED
@@ -238,7 +238,7 @@ def get_external_model_results():
|
|
238 |
}
|
239 |
models_to_run = EXTERNAL_MODELS
|
240 |
|
241 |
-
pbar = tqdm(models_to_run, desc="Fetching external model results")
|
242 |
for model in pbar:
|
243 |
pbar.set_description(f"Fetching external model results for {model!r}")
|
244 |
try:
|
@@ -596,7 +596,7 @@ def get_mteb_average(task_dict: dict) -> tuple[Any, dict]:
|
|
596 |
for task_category, task_category_list in task_dict.items():
|
597 |
DATA_TASKS[task_category] = add_rank(
|
598 |
DATA_OVERALL[
|
599 |
-
["Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)"] + task_category_list
|
600 |
]
|
601 |
)
|
602 |
DATA_TASKS[task_category] = DATA_TASKS[task_category][
|
@@ -663,9 +663,9 @@ def refresh_leaderboard() -> tuple[list, dict]:
|
|
663 |
data_task_category = get_mteb_data(
|
664 |
tasks=[task_category], datasets=task_category_list
|
665 |
)
|
666 |
-
data_task_category.drop(
|
667 |
-
|
668 |
-
)
|
669 |
boards_data[board]["data_tasks"][task_category] = data_task_category
|
670 |
all_data_tasks.append(data_task_category)
|
671 |
if board == "bright_long":
|
@@ -777,8 +777,8 @@ if __name__ == "__main__":
|
|
777 |
print("Done calculating, saving...")
|
778 |
# save them so that the leaderboard can use them. They're quite complex though
|
779 |
# but we can't use pickle files because of git-lfs.
|
780 |
-
write_out_results(all_data_tasks, "
|
781 |
-
write_out_results(boards_data, "
|
782 |
|
783 |
# to load them use
|
784 |
# all_data_tasks = load_results("all_data_tasks")
|
|
|
238 |
}
|
239 |
models_to_run = EXTERNAL_MODELS
|
240 |
|
241 |
+
pbar = tqdm(models_to_run[:10], desc="Fetching external model results")
|
242 |
for model in pbar:
|
243 |
pbar.set_description(f"Fetching external model results for {model!r}")
|
244 |
try:
|
|
|
596 |
for task_category, task_category_list in task_dict.items():
|
597 |
DATA_TASKS[task_category] = add_rank(
|
598 |
DATA_OVERALL[
|
599 |
+
["Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)", "Embedding Dimensions", "Max Tokens"] + task_category_list
|
600 |
]
|
601 |
)
|
602 |
DATA_TASKS[task_category] = DATA_TASKS[task_category][
|
|
|
663 |
data_task_category = get_mteb_data(
|
664 |
tasks=[task_category], datasets=task_category_list
|
665 |
)
|
666 |
+
# data_task_category.drop(
|
667 |
+
# columns=["Embedding Dimensions", "Max Tokens"], inplace=True
|
668 |
+
# )
|
669 |
boards_data[board]["data_tasks"][task_category] = data_task_category
|
670 |
all_data_tasks.append(data_task_category)
|
671 |
if board == "bright_long":
|
|
|
777 |
print("Done calculating, saving...")
|
778 |
# save them so that the leaderboard can use them. They're quite complex though
|
779 |
# but we can't use pickle files because of git-lfs.
|
780 |
+
write_out_results(all_data_tasks, "all_data_tasks_temp2")
|
781 |
+
write_out_results(boards_data, "boards_data_temp2")
|
782 |
|
783 |
# to load them use
|
784 |
# all_data_tasks = load_results("all_data_tasks")
|