Datasets:
tner
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
tweetner7 / get_model_list.py
asahi417's picture
readme
3feb3b7
raw
history blame
2.65 kB
import json
import os
import requests
import pandas as pd
dataset_link = "[`tweetner7`](https://huggingface.co/datasets/tner/tweetner7)"
metric_dir = 'metric_files'
os.makedirs(metric_dir, exist_ok=True)
def lm_link(_model): return f"[`{_model}`](https://huggingface.co/{_model})"
def model_link(_model, _type): return f"[`tner/{_model}-tweetner7-{_type}`](https://huggingface.co/tner/{_model}-tweetner7-{_type})"
def download(_model, _type):
url = f"https://huggingface.co/tner/{_model}-tweetner7-{_type}/raw/main/eval"
filename = f"{metric_dir}/{_model}-{_type}.json"
print(url, filename)
try:
with open(filename) as f:
return json.load(f)
except Exception:
tmp = {}
for metric in ["metric.test_2021", "metric.test_2020", "metric_span.test_2021", "metric_span.test_2020"]:
year = metric[-4:]
if metric not in tmp:
_metric = json.loads(requests.get(f"{url}/{metric}.json").content)
if '_span' in metric:
tmp[f"Entity-Span F1 ({year})"] = round(100 * _metric["micro/f1"], 2)
else:
tmp[f"Micro F1 ({year})"] = round(100 * _metric["micro/f1"], 2)
tmp[f"Macro F1 ({year})"] = round(100 * _metric["macro/f1"], 2)
tmp.update({f"F1 ({year})/{k}": round(100 * v['f1'], 2) for k, v in _metric["per_entity_metric"].items()})
with open(filename, "w") as f:
json.dump(tmp, f)
return tmp
lms = [
"roberta-large",
"roberta-base",
"cardiffnlp/twitter-roberta-base-2019-90m",
"cardiffnlp/twitter-roberta-base-dec2020",
"cardiffnlp/twitter-roberta-base-dec2021"
"vinai/bertweet-large",
"vinai/bertweet-base",
"bert-large",
"bert-base"
]
types = [
["all", "continuous", "2021", "2020"],
["random"],
[
"selflabel2020",
"selflabel2021",
"2020-selflabel2020-all",
"2020-selflabel2021-all",
"selflabel2020-continuous",
"selflabel2021-continuous"
]
]
for tt in types:
metrics = []
for t in tt:
for lm in lms:
if 'selflabel' in t and lm != "roberta-large":
continue
_lm_link = lm_link(lm)
lm = os.path.basename(lm)
_model_link = model_link(lm, t)
__metric = {
"Model (link)": model_link(lm, t),
"Data": dataset_link,
"Language Model": _lm_link
}
__metric.update(download(lm, t))
metrics.append(__metric)
df = pd.DataFrame(metrics)
print(tt)
print(df.to_markdown(index=False))
print()