Update lm_finetuning.py
Browse files- lm_finetuning.py +15 -11
lm_finetuning.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
-
|
2 |
wandb offline
|
3 |
export WANDB_DISABLED='true'
|
4 |
export RAY_RESULTS='ray_results'
|
5 |
-
python lm_finetuning.py -m "roberta-large" -c "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "
|
6 |
python lm_finetuning.py -m "roberta-base" -c "ckpt/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
|
7 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -c "ckpt/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
|
8 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -c "ckpt/twitter-roberta-base-dec2020"
|
9 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -c "ckpt/twitter-roberta-base-dec2021"
|
10 |
-
|
|
|
11 |
import argparse
|
12 |
import json
|
13 |
import logging
|
@@ -65,6 +66,9 @@ def main():
|
|
65 |
parser = argparse.ArgumentParser(description='Fine-tuning language model.')
|
66 |
parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
|
67 |
parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_multi', type=str)
|
|
|
|
|
|
|
68 |
parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
|
69 |
parser.add_argument('--random-seed', help='', default=42, type=int)
|
70 |
parser.add_argument('--eval-step', help='', default=50, type=int)
|
@@ -86,7 +90,7 @@ def main():
|
|
86 |
tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
|
87 |
model = AutoModelForSequenceClassification.from_pretrained(
|
88 |
opt.model,
|
89 |
-
num_labels=len(dataset[
|
90 |
local_files_only=not network,
|
91 |
problem_type="multi_label_classification"
|
92 |
)
|
@@ -106,11 +110,11 @@ def main():
|
|
106 |
eval_steps=opt.eval_step,
|
107 |
seed=opt.random_seed
|
108 |
),
|
109 |
-
train_dataset=tokenized_datasets[
|
110 |
-
eval_dataset=tokenized_datasets[
|
111 |
compute_metrics=compute_metric_search,
|
112 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
113 |
-
opt.model, return_dict=True, num_labels=dataset[
|
114 |
)
|
115 |
# parameter search
|
116 |
if PARALLEL:
|
@@ -145,7 +149,7 @@ def main():
|
|
145 |
# evaluation
|
146 |
model = AutoModelForSequenceClassification.from_pretrained(
|
147 |
best_model_path,
|
148 |
-
num_labels=dataset[
|
149 |
local_files_only=not network)
|
150 |
trainer = Trainer(
|
151 |
model=model,
|
@@ -154,11 +158,11 @@ def main():
|
|
154 |
evaluation_strategy="no",
|
155 |
seed=opt.random_seed
|
156 |
),
|
157 |
-
train_dataset=tokenized_datasets[
|
158 |
-
eval_dataset=tokenized_datasets[
|
159 |
compute_metrics=compute_metric_all,
|
160 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
161 |
-
opt.model, return_dict=True, num_labels=dataset[
|
162 |
)
|
163 |
summary_file = pj(opt.output_dir, opt.summary_file)
|
164 |
if not opt.skip_eval:
|
|
|
1 |
+
'''
|
2 |
wandb offline
|
3 |
export WANDB_DISABLED='true'
|
4 |
export RAY_RESULTS='ray_results'
|
5 |
+
python lm_finetuning.py -m "roberta-large" -c "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
|
6 |
python lm_finetuning.py -m "roberta-base" -c "ckpt/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
|
7 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -c "ckpt/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
|
8 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -c "ckpt/twitter-roberta-base-dec2020"
|
9 |
python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -c "ckpt/twitter-roberta-base-dec2021"
|
10 |
+
'''
|
11 |
+
|
12 |
import argparse
|
13 |
import json
|
14 |
import logging
|
|
|
66 |
parser = argparse.ArgumentParser(description='Fine-tuning language model.')
|
67 |
parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
|
68 |
parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_multi', type=str)
|
69 |
+
parser.add_argument('--split-train', help='', required=True, type=str)
|
70 |
+
parser.add_argument('--split-validation', help='', required=True, type=str)
|
71 |
+
parser.add_argument('--split-test', help='', required=True, type=str)
|
72 |
parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
|
73 |
parser.add_argument('--random-seed', help='', default=42, type=int)
|
74 |
parser.add_argument('--eval-step', help='', default=50, type=int)
|
|
|
90 |
tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
|
91 |
model = AutoModelForSequenceClassification.from_pretrained(
|
92 |
opt.model,
|
93 |
+
num_labels=len(dataset[opt.split_train][0]['label']),
|
94 |
local_files_only=not network,
|
95 |
problem_type="multi_label_classification"
|
96 |
)
|
|
|
110 |
eval_steps=opt.eval_step,
|
111 |
seed=opt.random_seed
|
112 |
),
|
113 |
+
train_dataset=tokenized_datasets[opt.split_train],
|
114 |
+
eval_dataset=tokenized_datasets[opt.split_validation],
|
115 |
compute_metrics=compute_metric_search,
|
116 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
117 |
+
opt.model, return_dict=True, num_labels=dataset[opt.split_train].features['label'].num_classes)
|
118 |
)
|
119 |
# parameter search
|
120 |
if PARALLEL:
|
|
|
149 |
# evaluation
|
150 |
model = AutoModelForSequenceClassification.from_pretrained(
|
151 |
best_model_path,
|
152 |
+
num_labels=dataset[opt.split_train].features['label'].num_classes,
|
153 |
local_files_only=not network)
|
154 |
trainer = Trainer(
|
155 |
model=model,
|
|
|
158 |
evaluation_strategy="no",
|
159 |
seed=opt.random_seed
|
160 |
),
|
161 |
+
train_dataset=tokenized_datasets[opt.split_train],
|
162 |
+
eval_dataset=tokenized_datasets[opt.split_test],
|
163 |
compute_metrics=compute_metric_all,
|
164 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
165 |
+
opt.model, return_dict=True, num_labels=dataset[opt.split_train].features['label'].num_classes)
|
166 |
)
|
167 |
summary_file = pj(opt.output_dir, opt.summary_file)
|
168 |
if not opt.skip_eval:
|