Spaces:
Runtime error
Runtime error
File size: 8,144 Bytes
a557d54 c00ae85 a557d54 c00ae85 5f44f14 c00ae85 a557d54 30c6c6d c00ae85 6d09ca9 a557d54 c00ae85 338fec2 5f44f14 c00ae85 59f829c 5597cc4 c00ae85 30c6c6d 2acc05f bda069e 2acc05f bda069e 2acc05f bda069e 2acc05f bda069e 2acc05f a557d54 c00ae85 6d09ca9 55a3478 a557d54 6d09ca9 55a3478 30c6c6d 55a3478 30c6c6d a557d54 a353f77 59f829c a353f77 9d75c96 a353f77 9757ddd 55a3478 a557d54 a353f77 a557d54 6d09ca9 5f96f95 5597cc4 5f96f95 5f44f14 5f96f95 55a3478 5f96f95 5f44f14 55a3478 c00ae85 30c6c6d 338fec2 5f44f14 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
import json
import os
import shutil
from datetime import datetime
from pathlib import Path
import jsonlines
import streamlit as st
from dotenv import load_dotenv
from huggingface_hub import HfApi, Repository, hf_hub_url, cached_download
from utils import http_post, validate_json
if Path(".env").is_file():
load_dotenv(".env")
HF_TOKEN = os.getenv("HF_TOKEN")
AUTONLP_USERNAME = os.getenv("AUTONLP_USERNAME")
HF_AUTONLP_BACKEND_API = os.getenv("HF_AUTONLP_BACKEND_API")
LOCAL_REPO = "submission_repo"
LOGS_REPO = "submission-logs"
## TODO ##
# 1. Add check that fields are nested under `tasks` field correctly
# 2. Add check that names of tasks and datasets are valid
MARKDOWN = """---
benchmark: gem
type: prediction
submission_name: {submission_name}
tags:
- evaluation
- benchmark
---
# GEM Submission
Submission name: {submission_name}
"""
def generate_dataset_card(submission_name):
"""
Generate dataset card for the submission
"""
markdown = MARKDOWN.format(
submission_name=submission_name,
)
with open(os.path.join(LOCAL_REPO, "README.md"), "w") as f:
f.write(markdown)
def load_json(path):
with open(path, "r") as f:
return json.load(f)
# The GEM frontend requires the submission names to be unique, so here we
# download all submission names and use them as a check against the user
# submissions
scores_url = hf_hub_url("GEM-submissions/submission-scores", "scores.json", repo_type="dataset")
scores_filepath = cached_download(scores_url)
scores_data = load_json(scores_filepath)
submission_names = [score["submission_name"] for score in scores_data]
###########
### APP ###
###########
st.title("GEM Submissions")
st.markdown(
"""
Welcome to the [GEM benchmark](https://gem-benchmark.com/)! GEM is a benchmark
environment for Natural Language Generation with a focus on its Evaluation, both
through human annotations and automated Metrics.
GEM aims to:
- measure NLG progress across many NLG tasks across languages.
- audit data and models and present results via data cards and model robustness
reports.
- develop standards for evaluation of generated text using both automated and
human metrics.
Use this page to submit your system's predictions to the benchmark.
"""
)
with st.form(key="form"):
# Flush local repo
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
submission_errors = 0
uploaded_file = st.file_uploader("Upload submission file", type=["json"])
if uploaded_file:
data = str(uploaded_file.read(), "utf-8")
json_data = json.loads(data)
if json_data["submission_name"] in submission_names:
st.error("π Submission name is already taken. Please rename your submission")
submission_errors += 1
else:
is_valid, message = validate_json(json_data)
if is_valid:
st.success(message)
else:
st.error(message)
submission_errors += 1
with st.expander("Submission format"):
st.markdown(
"""
Please follow this JSON format for your `submission.json` file:
```json
{
"submission_name": "An identifying name of your system",
"param_count": 123, # The number of parameters your system has.
"description": "An optional brief description of the system that will be shown on the results page",
"tasks":
{
"dataset_identifier": {
"values": ["output-0", "output-1", "..."], # A list of system outputs.
"keys": ["gem_id-0", "gem_id-1", ...] # A list of GEM IDs.
}
}
}
```
Here, `dataset_identifier` is the identifier of the dataset followed by
an identifier of the set the outputs were created from, for example
`_validation` or `_test`. For example, the `mlsum_de` test set has the
identifier `mlsum_de_test`. The `keys` field is needed to avoid
accidental shuffling that will impact your metrics. Simply add a list of
IDs from the `gem_id` column of each evaluation dataset in the same
order as your values. Please see the sample submission below:
"""
)
with open("sample-submission.json", "r") as f:
example_submission = json.load(f)
st.json(example_submission)
user_name = st.text_input("Enter your π€ Hub username.")
submit_button = st.form_submit_button("Make Submission")
if submit_button and submission_errors == 0:
with st.spinner("β³ Preparing submission for evaluation ..."):
submission_name = json_data["submission_name"]
submission_name_formatted = submission_name.lower().replace(" ", "-").replace("/", "-")
submission_time = str(int(datetime.now().timestamp()))
# Create submission dataset under benchmarks ORG
submission_repo_id = f"{user_name}__{submission_name_formatted}__{submission_time}"
dataset_repo_url = f"https://huggingface.co/datasets/GEM-submissions/{submission_repo_id}"
repo = Repository(
local_dir=LOCAL_REPO,
clone_from=dataset_repo_url,
repo_type="dataset",
private=False,
use_auth_token=HF_TOKEN,
)
generate_dataset_card(submission_name)
with open(f"{LOCAL_REPO}/submission.json", "w", encoding="utf-8") as f:
json.dump(json_data, f)
# TODO: add informative commit msg
commit_url = repo.push_to_hub()
if commit_url is not None:
commit_sha = commit_url.split("/")[-1]
else:
commit_sha = repo.git_head_commit_url().split("/")[-1]
submission_id = submission_name + "__" + commit_sha + "__" + submission_time
payload = {
"username": AUTONLP_USERNAME,
"dataset": "GEM/references",
"task": 1,
"model": "gem",
"submission_dataset": f"GEM-submissions/{submission_repo_id}",
"submission_id": submission_id,
"col_mapping": {},
"split": "test",
"config": None,
}
json_resp = http_post(
path="/evaluate/create", payload=payload, token=HF_TOKEN, domain=HF_AUTONLP_BACKEND_API
).json()
logs_repo_url = f"https://huggingface.co/datasets/GEM-submissions/{LOGS_REPO}"
logs_repo = Repository(
local_dir=LOGS_REPO,
clone_from=logs_repo_url,
repo_type="dataset",
private=True,
use_auth_token=HF_TOKEN,
)
json_resp["submission_name"] = submission_name
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl") as r:
lines = []
for obj in r:
lines.append(obj)
lines.append(json_resp)
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl", mode="w") as writer:
for job in lines:
writer.write(job)
logs_repo.push_to_hub(commit_message=f"Submission with job ID {json_resp['id']}")
if json_resp["status"] == 1:
st.success(
f"β
Submission {submission_name} was successfully submitted for evaluation with job ID {json_resp['id']}"
)
st.markdown(
f"""
Evaluation takes appoximately 1-2 hours to complete, so grab a β or π΅ while you wait:
* π Click [here](https://huggingface.co/spaces/GEM/results) to view the results from your submission
* πΎ Click [here]({dataset_repo_url}) to view your submission file on the Hugging Face Hub
Please [contact the organisers](mailto:[email protected]) if you would like your submission and/or evaluation scores deleted.
"""
)
else:
st.error(
"π Oh noes, there was an error submitting your submission! Please [contact the organisers](mailto:[email protected])"
)
# # Flush local repos
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
shutil.rmtree(LOGS_REPO, ignore_errors=True)
|