Spaces:
Runtime error
Runtime error
Adam Jirkovsky
commited on
Commit
·
4ac8818
1
Parent(s):
50e453b
Add submission checks
Browse files- app.py +26 -21
- src/submission/submit.py +18 -6
app.py
CHANGED
@@ -342,26 +342,31 @@ with demo:
|
|
342 |
#upload_button = gr.UploadButton("Upload json", file_types=['.json'])
|
343 |
#upload_button.upload(validate_upload, upload_button, file_input)
|
344 |
|
345 |
-
with gr.
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
|
|
|
|
|
|
|
|
|
|
365 |
|
366 |
submit_button = gr.Button("Submit Eval", interactive=True)
|
367 |
submission_result = gr.Markdown()
|
@@ -375,7 +380,7 @@ with demo:
|
|
375 |
contact_email,
|
376 |
captcha_correct,
|
377 |
],
|
378 |
-
outputs = [submission_result
|
379 |
)
|
380 |
|
381 |
with gr.Row():
|
|
|
342 |
#upload_button = gr.UploadButton("Upload json", file_types=['.json'])
|
343 |
#upload_button.upload(validate_upload, upload_button, file_input)
|
344 |
|
345 |
+
with gr.Column():
|
346 |
+
pass
|
347 |
+
with gr.Column():
|
348 |
+
with gr.Group():
|
349 |
+
captcha_correct = gr.State(False)
|
350 |
+
text = gr.State("")
|
351 |
+
image, text.value = generate_captcha()
|
352 |
+
captcha_img = gr.Image(
|
353 |
+
image,
|
354 |
+
label="Prove your humanity",
|
355 |
+
interactive=False,
|
356 |
+
show_download_button=False,
|
357 |
+
show_fullscreen_button=False,
|
358 |
+
show_share_button=False,
|
359 |
+
)
|
360 |
+
captcha_input = gr.Textbox(placeholder="Enter the text in the image above", show_label=False, container=False)
|
361 |
+
check_button = gr.Button("Validate", interactive=True)
|
362 |
+
captcha_result = gr.Markdown()
|
363 |
+
check_button.click(
|
364 |
+
fn = validate_captcha,
|
365 |
+
inputs = [captcha_input, text, captcha_img],
|
366 |
+
outputs = [captcha_correct, captcha_result, text, captcha_img],
|
367 |
+
)
|
368 |
+
with gr.Column():
|
369 |
+
pass
|
370 |
|
371 |
submit_button = gr.Button("Submit Eval", interactive=True)
|
372 |
submission_result = gr.Markdown()
|
|
|
380 |
contact_email,
|
381 |
captcha_correct,
|
382 |
],
|
383 |
+
outputs = [submission_result],
|
384 |
)
|
385 |
|
386 |
with gr.Row():
|
src/submission/submit.py
CHANGED
@@ -33,7 +33,19 @@ def add_new_eval(
|
|
33 |
):
|
34 |
try:
|
35 |
if not captcha_ok:
|
36 |
-
return styled_error("Please prove you are a human!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
with open(upload, mode="r") as f:
|
39 |
data = json.load(f)
|
@@ -59,11 +71,11 @@ def add_new_eval(
|
|
59 |
continue
|
60 |
if k not in BENCHMARK_COL_IDS:
|
61 |
print(f"Missing: {k}")
|
62 |
-
return styled_error(f'Missing: {k}')
|
63 |
|
64 |
if len(BENCHMARK_COL_IDS) != len(ret) - 4:
|
65 |
print(f"Missing columns")
|
66 |
-
return styled_error(f'Missing
|
67 |
|
68 |
# TODO add complex validation
|
69 |
#print(results.keys())
|
@@ -87,7 +99,7 @@ def add_new_eval(
|
|
87 |
|
88 |
if ret['eval_name'] in existing_eval_names:
|
89 |
print(f"Model name {ret['eval_name']} is used!")
|
90 |
-
return styled_error(f"Model name {ret['eval_name']} is used!")
|
91 |
|
92 |
out_path = f"{OUT_DIR}/{eval_name}_eval_request.json"
|
93 |
|
@@ -206,7 +218,7 @@ def add_new_eval(
|
|
206 |
"""
|
207 |
return styled_message(
|
208 |
"Your results have been successfully submitted. They will be added to the leaderboard upon verification."
|
209 |
-
)
|
210 |
|
211 |
except Exception as e:
|
212 |
-
return styled_error(f"An error occurred: {e}")
|
|
|
33 |
):
|
34 |
try:
|
35 |
if not captcha_ok:
|
36 |
+
return styled_error("Please prove you are a human!")
|
37 |
+
|
38 |
+
if not eval_name:
|
39 |
+
return styled_error("Please provide a model name.")
|
40 |
+
|
41 |
+
if not precision:
|
42 |
+
return styled_error("Please select precision.")
|
43 |
+
|
44 |
+
if not contact_email:
|
45 |
+
return styled_error("Please provide your contact email.")
|
46 |
+
|
47 |
+
if not upload:
|
48 |
+
return styled_error("Please upload a results file.")
|
49 |
|
50 |
with open(upload, mode="r") as f:
|
51 |
data = json.load(f)
|
|
|
71 |
continue
|
72 |
if k not in BENCHMARK_COL_IDS:
|
73 |
print(f"Missing: {k}")
|
74 |
+
return styled_error(f'Missing: {k}')
|
75 |
|
76 |
if len(BENCHMARK_COL_IDS) != len(ret) - 4:
|
77 |
print(f"Missing columns")
|
78 |
+
return styled_error(f'Missing result entries')
|
79 |
|
80 |
# TODO add complex validation
|
81 |
#print(results.keys())
|
|
|
99 |
|
100 |
if ret['eval_name'] in existing_eval_names:
|
101 |
print(f"Model name {ret['eval_name']} is used!")
|
102 |
+
return styled_error(f"Model name {ret['eval_name']} is used!")
|
103 |
|
104 |
out_path = f"{OUT_DIR}/{eval_name}_eval_request.json"
|
105 |
|
|
|
218 |
"""
|
219 |
return styled_message(
|
220 |
"Your results have been successfully submitted. They will be added to the leaderboard upon verification."
|
221 |
+
)
|
222 |
|
223 |
except Exception as e:
|
224 |
+
return styled_error(f"An error occurred: {e}")
|