Spaces:
Running
Running
File size: 3,117 Bytes
63cb7f9 a5487ef 63cb7f9 de60bd6 e1db744 63cb7f9 1ab72b5 63cb7f9 e1db744 e7750ca e1db744 63cb7f9 de60bd6 9b785e1 de60bd6 63cb7f9 de60bd6 63cb7f9 a5487ef 63cb7f9 e1db744 63cb7f9 a5487ef 63cb7f9 9b785e1 63cb7f9 a5487ef 63cb7f9 de60bd6 e1db744 63cb7f9 1ab72b5 63cb7f9 fb22d4b a5487ef 63cb7f9 a5487ef 63cb7f9 9b785e1 63cb7f9 1ab72b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import json
import os
from datetime import datetime, timezone
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from src.submission.check_validity import (
already_submitted_models,
check_model_card,
get_model_size,
is_model_on_hub,
is_valid_predictions,
)
REQUESTED_MODELS = None
USERS_TO_SUBMISSION_DATES = None
def add_new_eval(
model_name: str,
model_id: str,
revision: str,
track: str,
predictions: dict,
):
global REQUESTED_MODELS
global USERS_TO_SUBMISSION_DATES
if not REQUESTED_MODELS:
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
out_message = ""
print(predictions) # debugging
user_name = ""
model_path = model_name
if "/" in model_name:
user_name = model_name.split("/")[0]
model_path = model_name.split("/")[1]
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
if track is None:
return styled_error("Please select a track.")
# Does the model actually exist?
if revision == "":
revision = "main"
out_message = ""
# Is the model info correctly filled?
try:
model_info = API.model_info(repo_id=model_id, revision=revision)
except Exception:
out_message += styled_warning("Could not get your model information. The leaderboard entry will not have a link to its HF repo.") + "<br>"
modelcard_OK, error_msg = check_model_card(model_name)
if not modelcard_OK:
out_message += styled_warning(error_msg) + "<br>"
predictions_OK, error_msg = is_valid_predictions(predictions)
if not predictions_OK:
return styled_error(error_msg) + "<br>"
# Seems good, creating the eval
print("Adding new eval")
eval_entry = {
"model_name": model_name,
"hf_repo": model_id,
"revision": revision,
"track": track,
"predictions": predictions,
"status": "PENDING",
"submitted_time": current_time,
"private": False,
}
# Check for duplicate submission
if f"{model_name}_{revision}_{track}" in REQUESTED_MODELS:
return styled_error("A model with this name has been already submitted.")
print("Creating eval file")
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_path}_{revision}_eval_request_False_{track}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
print("Uploading eval file")
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model_name} to eval queue",
)
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the request to show in the PENDING list."
) |