Update lm_finetuning.py
Browse files- lm_finetuning.py +10 -0
lm_finetuning.py
CHANGED
@@ -36,6 +36,9 @@ from datasets import load_dataset, load_metric
|
|
36 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
37 |
from ray import tune
|
38 |
|
|
|
|
|
|
|
39 |
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
|
40 |
|
41 |
PARALLEL = bool(int(os.getenv("PARALLEL", 1)))
|
@@ -232,6 +235,13 @@ def main():
|
|
232 |
tokenizer.push_to_hub(opt.model_alias, **args)
|
233 |
if os.path.exists(summary_file):
|
234 |
shutil.copy2(summary_file, opt.model_alias)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
os.system(
|
236 |
f"cd {opt.model_alias} && git lfs install && git add . && git commit -m 'model update' && git push && cd ../")
|
237 |
shutil.rmtree(f"{opt.model_alias}") # clean up the cloned repo
|
|
|
36 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
37 |
from ray import tune
|
38 |
|
39 |
+
from readme import get_readme
|
40 |
+
|
41 |
+
|
42 |
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
|
43 |
|
44 |
PARALLEL = bool(int(os.getenv("PARALLEL", 1)))
|
|
|
235 |
tokenizer.push_to_hub(opt.model_alias, **args)
|
236 |
if os.path.exists(summary_file):
|
237 |
shutil.copy2(summary_file, opt.model_alias)
|
238 |
+
extra_desc = f"This model is fine-tuned on `{opt.split_train}` split and validated on `{opt.split_test}` split of tweet_topic."
|
239 |
+
readme = get_readme(
|
240 |
+
model_name=opt.model_alias,
|
241 |
+
metric=f"{opt.model_alias}/{summary_file}",
|
242 |
+
language_model=opt.model,
|
243 |
+
extra_desc= extra_desc
|
244 |
+
)
|
245 |
os.system(
|
246 |
f"cd {opt.model_alias} && git lfs install && git add . && git commit -m 'model update' && git push && cd ../")
|
247 |
shutil.rmtree(f"{opt.model_alias}") # clean up the cloned repo
|