Aaron Mueller commited on
Commit
d162626
·
1 Parent(s): 31b9f7c
src/leaderboard/read_evals.py CHANGED
@@ -56,7 +56,6 @@ class EvalResult:
56
 
57
  def _get_task_results(task):
58
  # We average all scores of a given metric (not all metrics are present in all files)
59
- print(data["results"].items())
60
  accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
61
  if accs.size == 0 or any([acc is None for acc in accs]):
62
  return None
 
56
 
57
  def _get_task_results(task):
58
  # We average all scores of a given metric (not all metrics are present in all files)
 
59
  accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
60
  if accs.size == 0 or any([acc is None for acc in accs]):
61
  return None
src/submission/submit.py CHANGED
@@ -47,19 +47,25 @@ def add_new_eval(
47
  out_message = ""
48
 
49
  # Is the model info correctly filled?
 
50
  try:
51
  model_info = API.model_info(repo_id=model_id, revision=revision)
52
  except Exception:
53
  out_message += styled_warning("Could not get your model information. The leaderboard entry will not have a link to its HF repo.") + "<br>"
 
54
 
55
  modelcard_OK, error_msg = check_model_card(model_name)
56
  if not modelcard_OK:
57
  out_message += styled_warning(error_msg) + "<br>"
 
 
58
 
59
  predictions_OK, error_msg = is_valid_predictions(predictions)
60
  if not predictions_OK:
61
  return styled_error(error_msg) + "<br>"
62
 
 
 
63
  # Seems good, creating the eval
64
  print("Adding new eval")
65
 
@@ -74,6 +80,8 @@ def add_new_eval(
74
  "private": False,
75
  }
76
 
 
 
77
  # Check for duplicate submission
78
  if f"{model_name}_{revision}_{track}" in REQUESTED_MODELS:
79
  return styled_error("A model with this name has been already submitted.")
@@ -83,6 +91,8 @@ def add_new_eval(
83
  os.makedirs(OUT_DIR, exist_ok=True)
84
  out_path = f"{OUT_DIR}/{model_path}_{revision}_eval_request_False_{track}.json"
85
 
 
 
86
  with open(out_path, "w") as f:
87
  f.write(json.dumps(eval_entry))
88
 
@@ -95,6 +105,8 @@ def add_new_eval(
95
  commit_message=f"Add {model_name} to eval queue",
96
  )
97
 
 
 
98
  # Remove the local file
99
  os.remove(out_path)
100
 
 
47
  out_message = ""
48
 
49
  # Is the model info correctly filled?
50
+ print("Made it before 1")
51
  try:
52
  model_info = API.model_info(repo_id=model_id, revision=revision)
53
  except Exception:
54
  out_message += styled_warning("Could not get your model information. The leaderboard entry will not have a link to its HF repo.") + "<br>"
55
+ print("Made it after 1")
56
 
57
  modelcard_OK, error_msg = check_model_card(model_name)
58
  if not modelcard_OK:
59
  out_message += styled_warning(error_msg) + "<br>"
60
+
61
+ print("Made it after 2")
62
 
63
  predictions_OK, error_msg = is_valid_predictions(predictions)
64
  if not predictions_OK:
65
  return styled_error(error_msg) + "<br>"
66
 
67
+ print("Made it after 3")
68
+
69
  # Seems good, creating the eval
70
  print("Adding new eval")
71
 
 
80
  "private": False,
81
  }
82
 
83
+ print("Made it after 4")
84
+
85
  # Check for duplicate submission
86
  if f"{model_name}_{revision}_{track}" in REQUESTED_MODELS:
87
  return styled_error("A model with this name has been already submitted.")
 
91
  os.makedirs(OUT_DIR, exist_ok=True)
92
  out_path = f"{OUT_DIR}/{model_path}_{revision}_eval_request_False_{track}.json"
93
 
94
+ print("Made it after 5")
95
+
96
  with open(out_path, "w") as f:
97
  f.write(json.dumps(eval_entry))
98
 
 
105
  commit_message=f"Add {model_name} to eval queue",
106
  )
107
 
108
+ print("Made it after 6")
109
+
110
  # Remove the local file
111
  os.remove(out_path)
112