Spaces:
Runtime error
Runtime error
Aaron Mueller
commited on
Commit
β’
31b9f7c
1
Parent(s):
1bbb1d0
edit upload logic
Browse files- app.py +4 -6
- src/submission/submit.py +0 -1
app.py
CHANGED
@@ -64,8 +64,6 @@ def init_leaderboard(dataframe, track):
|
|
64 |
if dataframe is None or dataframe.empty:
|
65 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
66 |
# filter for correct track
|
67 |
-
print(dataframe["Track"])
|
68 |
-
print(track)
|
69 |
dataframe = dataframe.loc[dataframe["Track"] == track]
|
70 |
return Leaderboard(
|
71 |
value=dataframe,
|
@@ -97,7 +95,8 @@ def process_json(temp_file):
|
|
97 |
except Exception as e:
|
98 |
raise gr.Error(f"Error processing file: {str(e)}")
|
99 |
|
100 |
-
|
|
|
101 |
|
102 |
|
103 |
demo = gr.Blocks(css=custom_css)
|
@@ -116,7 +115,7 @@ with demo:
|
|
116 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=4):
|
117 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
118 |
|
119 |
-
with gr.TabItem("
|
120 |
with gr.Column():
|
121 |
with gr.Row():
|
122 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
@@ -174,11 +173,10 @@ with demo:
|
|
174 |
|
175 |
predictions_data = gr.State()
|
176 |
upload_button = gr.UploadButton(label="Upload predictions", file_types=[".json", ".gz"], file_count="single")
|
177 |
-
json_display = gr.JSON()
|
178 |
upload_button.upload(
|
179 |
fn=process_json,
|
180 |
inputs=upload_button,
|
181 |
-
outputs=
|
182 |
api_name="upload_json"
|
183 |
)
|
184 |
|
|
|
64 |
if dataframe is None or dataframe.empty:
|
65 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
66 |
# filter for correct track
|
|
|
|
|
67 |
dataframe = dataframe.loc[dataframe["Track"] == track]
|
68 |
return Leaderboard(
|
69 |
value=dataframe,
|
|
|
95 |
except Exception as e:
|
96 |
raise gr.Error(f"Error processing file: {str(e)}")
|
97 |
|
98 |
+
gr.Markdown("Upload successful!")
|
99 |
+
return data
|
100 |
|
101 |
|
102 |
demo = gr.Blocks(css=custom_css)
|
|
|
115 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=4):
|
116 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
117 |
|
118 |
+
with gr.TabItem("πΆ Submit", elem_id="llm-benchmark-tab-table", id=5):
|
119 |
with gr.Column():
|
120 |
with gr.Row():
|
121 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
|
|
173 |
|
174 |
predictions_data = gr.State()
|
175 |
upload_button = gr.UploadButton(label="Upload predictions", file_types=[".json", ".gz"], file_count="single")
|
|
|
176 |
upload_button.upload(
|
177 |
fn=process_json,
|
178 |
inputs=upload_button,
|
179 |
+
outputs=predictions_data,
|
180 |
api_name="upload_json"
|
181 |
)
|
182 |
|
src/submission/submit.py
CHANGED
@@ -28,7 +28,6 @@ def add_new_eval(
|
|
28 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
29 |
|
30 |
out_message = ""
|
31 |
-
print(predictions) # debugging
|
32 |
|
33 |
user_name = ""
|
34 |
model_path = model_name
|
|
|
28 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
29 |
|
30 |
out_message = ""
|
|
|
31 |
|
32 |
user_name = ""
|
33 |
model_path = model_name
|