xinchen9 commited on
Commit
0c78756
·
verified ·
1 Parent(s): d4bee2c

[Update]Comment line37-57

Browse files
Files changed (1) hide show
  1. app.py +29 -29
app.py CHANGED
@@ -27,40 +27,40 @@ from src.display.utils import (
27
  Precision
28
  )
29
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
- from src.submission.submit import add_new_eval
32
  from PIL import Image
33
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
34
  import copy
35
 
36
 
37
- def restart_space():
38
- API.restart_space(repo_id=REPO_ID)
39
-
40
- try:
41
- print(EVAL_REQUESTS_PATH)
42
- snapshot_download(
43
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
44
- )
45
- except Exception:
46
- restart_space()
47
- try:
48
- print(EVAL_RESULTS_PATH)
49
- snapshot_download(
50
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
51
- )
52
- except Exception:
53
- restart_space()
54
-
55
-
56
- raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
57
- leaderboard_df = original_df.copy()
58
-
59
- (
60
- finished_eval_queue_df,
61
- running_eval_queue_df,
62
- pending_eval_queue_df,
63
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
64
 
65
 
66
  # Searching and filtering
 
27
  Precision
28
  )
29
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
+ # from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
+ # from src.submission.submit import add_new_eval
32
  from PIL import Image
33
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
34
  import copy
35
 
36
 
37
+ # def restart_space():
38
+ # API.restart_space(repo_id=REPO_ID)
39
+
40
+ # try:
41
+ # print(EVAL_REQUESTS_PATH)
42
+ # snapshot_download(
43
+ # repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
44
+ # )
45
+ # except Exception:
46
+ # restart_space()
47
+ # try:
48
+ # print(EVAL_RESULTS_PATH)
49
+ # snapshot_download(
50
+ # repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
51
+ # )
52
+ # except Exception:
53
+ # restart_space()
54
+
55
+
56
+ # raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
57
+ # leaderboard_df = original_df.copy()
58
+
59
+ # (
60
+ # finished_eval_queue_df,
61
+ # running_eval_queue_df,
62
+ # pending_eval_queue_df,
63
+ # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
64
 
65
 
66
  # Searching and filtering