Adam Jirkovsky commited on
Commit
3ec72c6
·
1 Parent(s): a97954f

List uploaded eval results

Browse files
Files changed (2) hide show
  1. app.py +0 -3
  2. src/leaderboard/read_evals.py +1 -0
app.py CHANGED
@@ -33,15 +33,12 @@ from src.populate import get_evaluation_queue_df, get_leaderboard_df
33
  from src.submission.submit import add_new_eval
34
  from captcha.image import ImageCaptcha
35
  from PIL import Image
36
- from os import listdir
37
  import random, string
38
 
39
 
40
  original_df = None
41
  leaderboard_df = None
42
 
43
- print(listdir(EVAL_REQUESTS_PATH))
44
- print(listdir(EVAL_RESULTS_PATH))
45
 
46
  def restart_space():
47
  API.restart_space(repo_id=REPO_ID, token=TOKEN)
 
33
  from src.submission.submit import add_new_eval
34
  from captcha.image import ImageCaptcha
35
  from PIL import Image
 
36
  import random, string
37
 
38
 
39
  original_df = None
40
  leaderboard_df = None
41
 
 
 
42
 
43
  def restart_space():
44
  API.restart_space(repo_id=REPO_ID, token=TOKEN)
src/leaderboard/read_evals.py CHANGED
@@ -166,6 +166,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
166
  files = [files[-1]]
167
 
168
  for file in files:
 
169
  model_result_filepaths.append(os.path.join(root, file))
170
 
171
  eval_results = []
 
166
  files = [files[-1]]
167
 
168
  for file in files:
169
+ print(file)
170
  model_result_filepaths.append(os.path.join(root, file))
171
 
172
  eval_results = []