Update readme.py
Browse files
readme.py
CHANGED
@@ -21,22 +21,9 @@ bib = """
|
|
21 |
|
22 |
def get_readme(model_name: str,
|
23 |
metric: Dict,
|
24 |
-
config: Dict
|
|
|
25 |
language_model = config['model']
|
26 |
-
dataset = None
|
27 |
-
dataset_alias = "custom"
|
28 |
-
if config["dataset"] is not None:
|
29 |
-
dataset = sorted([i for i in config["dataset"]])
|
30 |
-
dataset_alias = ','.join(dataset)
|
31 |
-
config_text = "\n".join([f" - {k}: {v}" for k, v in config.items()])
|
32 |
-
ci_micro = '\n'.join([f' - {k}%: {v}' for k, v in metric["micro/f1_ci"].items()])
|
33 |
-
ci_macro = '\n'.join([f' - {k}%: {v}' for k, v in metric["micro/f1_ci"].items()])
|
34 |
-
per_entity_metric = '\n'.join([f'- {k}: {v["f1"]}' for k, v in metric['per_entity_metric'].items()])
|
35 |
-
if dataset is None:
|
36 |
-
dataset_link = 'custom'
|
37 |
-
else:
|
38 |
-
dataset = [dataset] if type(dataset) is str else dataset
|
39 |
-
dataset_link = ','.join([f"[{d}](https://huggingface.co/datasets/{d})" for d in dataset])
|
40 |
return f"""---
|
41 |
datasets:
|
42 |
- cardiffnlp/tweet_topic_multi
|
@@ -73,7 +60,8 @@ widget:
|
|
73 |
---
|
74 |
# {model_name}
|
75 |
|
76 |
-
This model is a fine-tuned version of [{language_model}](https://huggingface.co/{language_model}) on the [tweet_topic_multi](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi).
|
|
|
77 |
|
78 |
- F1 (micro): {metric['test/eval_f1']}
|
79 |
- F1 (macro): {metric['test/eval_f1_macro']}
|
|
|
21 |
|
22 |
def get_readme(model_name: str,
|
23 |
metric: Dict,
|
24 |
+
config: Dict,
|
25 |
+
extra_desc: str = ''):
|
26 |
language_model = config['model']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
return f"""---
|
28 |
datasets:
|
29 |
- cardiffnlp/tweet_topic_multi
|
|
|
60 |
---
|
61 |
# {model_name}
|
62 |
|
63 |
+
This model is a fine-tuned version of [{language_model}](https://huggingface.co/{language_model}) on the [tweet_topic_multi](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi). {extra_desc}
|
64 |
+
Fine-tuning script can be found [here](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi/blob/main/lm_finetuning.py). It achieves the following results on the test_2021 set:
|
65 |
|
66 |
- F1 (micro): {metric['test/eval_f1']}
|
67 |
- F1 (macro): {metric['test/eval_f1_macro']}
|