Aaron Mueller commited on
Commit
2b71798
·
1 Parent(s): 70dcf65
src/display/utils.py CHANGED
@@ -23,7 +23,6 @@ class ColumnContent:
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
 
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
 
26
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
27
  #Scores
28
  auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
src/leaderboard/read_evals.py CHANGED
@@ -144,6 +144,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
144
  """From the path of the results folder root, extract all needed info for results"""
145
  model_result_filepaths = []
146
 
 
147
  print(results_path)
148
  for root, _, files in os.walk(results_path):
149
  # We should only have json files in model results
@@ -160,6 +161,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
160
  for file in files:
161
  model_result_filepaths.append(os.path.join(root, file))
162
 
 
163
  print(model_result_filepaths)
164
  eval_results = {}
165
  for model_result_filepath in model_result_filepaths:
 
144
  """From the path of the results folder root, extract all needed info for results"""
145
  model_result_filepaths = []
146
 
147
+ print("Here 1")
148
  print(results_path)
149
  for root, _, files in os.walk(results_path):
150
  # We should only have json files in model results
 
161
  for file in files:
162
  model_result_filepaths.append(os.path.join(root, file))
163
 
164
+ print("Here 2")
165
  print(model_result_filepaths)
166
  eval_results = {}
167
  for model_result_filepath in model_result_filepaths: