Spaces:
Runtime error
Runtime error
Aaron Mueller
commited on
Commit
•
fb22d4b
1
Parent(s):
11f8328
testing now
Browse files- src/submission/submit.py +2 -36
src/submission/submit.py
CHANGED
@@ -18,11 +18,7 @@ def add_new_eval(
|
|
18 |
model_name: str,
|
19 |
preds_path: str,
|
20 |
track: str,
|
21 |
-
base_model: str,
|
22 |
revision: str,
|
23 |
-
precision: str,
|
24 |
-
weight_type: str,
|
25 |
-
model_type: str,
|
26 |
):
|
27 |
global REQUESTED_MODELS
|
28 |
global USERS_TO_SUBMISSION_DATES
|
@@ -35,12 +31,8 @@ def add_new_eval(
|
|
35 |
user_name = model_name.split("/")[0]
|
36 |
model_path = model_name.split("/")[1]
|
37 |
|
38 |
-
precision = precision.split(" ")[0]
|
39 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
40 |
|
41 |
-
if model_type is None or model_type == "":
|
42 |
-
return styled_error("Please select a model type.")
|
43 |
-
|
44 |
if preds_path is None or preds_path == "":
|
45 |
return styled_error("Please enter a URL where your predictions file can be downloaded.")
|
46 |
|
@@ -51,31 +43,12 @@ def add_new_eval(
|
|
51 |
if revision == "":
|
52 |
revision = "main"
|
53 |
|
54 |
-
# Is the model on the hub?
|
55 |
-
if weight_type in ["Delta", "Adapter"]:
|
56 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
57 |
-
if not base_model_on_hub:
|
58 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
59 |
-
|
60 |
-
if not weight_type == "Adapter":
|
61 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model_name, revision=revision, token=TOKEN, test_tokenizer=True)
|
62 |
-
if not model_on_hub:
|
63 |
-
return styled_error(f'Model "{model_name}" {error}')
|
64 |
-
|
65 |
# Is the model info correctly filled?
|
66 |
try:
|
67 |
model_info = API.model_info(repo_id=model_name, revision=revision)
|
68 |
except Exception:
|
69 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
70 |
|
71 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
72 |
-
|
73 |
-
# Were the model card and license filled?
|
74 |
-
try:
|
75 |
-
license = model_info.cardData["license"]
|
76 |
-
except Exception:
|
77 |
-
return styled_error("Please select a license for your model")
|
78 |
-
|
79 |
modelcard_OK, error_msg = check_model_card(model_name)
|
80 |
if not modelcard_OK:
|
81 |
return styled_error(error_msg)
|
@@ -87,27 +60,20 @@ def add_new_eval(
|
|
87 |
"model_name": model_name,
|
88 |
"preds_path": preds_path,
|
89 |
"track": track,
|
90 |
-
"base_model": base_model,
|
91 |
"revision": revision,
|
92 |
-
"precision": precision,
|
93 |
-
"weight_type": weight_type,
|
94 |
"status": "PENDING",
|
95 |
"submitted_time": current_time,
|
96 |
-
"model_type": model_type,
|
97 |
-
"likes": model_info.likes,
|
98 |
-
"params": model_size,
|
99 |
-
"license": license,
|
100 |
"private": False,
|
101 |
}
|
102 |
|
103 |
# Check for duplicate submission
|
104 |
-
if f"{model_name}_{revision}_{
|
105 |
return styled_warning("This model has been already submitted.")
|
106 |
|
107 |
print("Creating eval file")
|
108 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
109 |
os.makedirs(OUT_DIR, exist_ok=True)
|
110 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{
|
111 |
|
112 |
with open(out_path, "w") as f:
|
113 |
f.write(json.dumps(eval_entry))
|
|
|
18 |
model_name: str,
|
19 |
preds_path: str,
|
20 |
track: str,
|
|
|
21 |
revision: str,
|
|
|
|
|
|
|
22 |
):
|
23 |
global REQUESTED_MODELS
|
24 |
global USERS_TO_SUBMISSION_DATES
|
|
|
31 |
user_name = model_name.split("/")[0]
|
32 |
model_path = model_name.split("/")[1]
|
33 |
|
|
|
34 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
35 |
|
|
|
|
|
|
|
36 |
if preds_path is None or preds_path == "":
|
37 |
return styled_error("Please enter a URL where your predictions file can be downloaded.")
|
38 |
|
|
|
43 |
if revision == "":
|
44 |
revision = "main"
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
# Is the model info correctly filled?
|
47 |
try:
|
48 |
model_info = API.model_info(repo_id=model_name, revision=revision)
|
49 |
except Exception:
|
50 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
modelcard_OK, error_msg = check_model_card(model_name)
|
53 |
if not modelcard_OK:
|
54 |
return styled_error(error_msg)
|
|
|
60 |
"model_name": model_name,
|
61 |
"preds_path": preds_path,
|
62 |
"track": track,
|
|
|
63 |
"revision": revision,
|
|
|
|
|
64 |
"status": "PENDING",
|
65 |
"submitted_time": current_time,
|
|
|
|
|
|
|
|
|
66 |
"private": False,
|
67 |
}
|
68 |
|
69 |
# Check for duplicate submission
|
70 |
+
if f"{model_name}_{revision}_{track}" in REQUESTED_MODELS:
|
71 |
return styled_warning("This model has been already submitted.")
|
72 |
|
73 |
print("Creating eval file")
|
74 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
75 |
os.makedirs(OUT_DIR, exist_ok=True)
|
76 |
+
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{track}.json"
|
77 |
|
78 |
with open(out_path, "w") as f:
|
79 |
f.write(json.dumps(eval_entry))
|