init
Browse files- training_scripts/finetune_t5.py +81 -115
- training_scripts/requirements.txt +6 -0
- training_scripts/script.sh +6 -0
training_scripts/finetune_t5.py
CHANGED
@@ -6,18 +6,17 @@ python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja -
|
|
6 |
import json
|
7 |
import logging
|
8 |
import os
|
9 |
-
import multiprocessing
|
10 |
import argparse
|
11 |
import gc
|
12 |
from typing import List, Set
|
13 |
from shutil import copyfile
|
14 |
from statistics import mean
|
|
|
15 |
|
16 |
import torch
|
17 |
import transformers
|
18 |
from datasets import load_dataset
|
19 |
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
|
20 |
-
from ray import tune, init
|
21 |
from huggingface_hub import Repository
|
22 |
|
23 |
|
@@ -60,6 +59,7 @@ def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]):
|
|
60 |
scores.append(f1)
|
61 |
return {'f1': mean(scores)}
|
62 |
|
|
|
63 |
def train(
|
64 |
model_name: str,
|
65 |
model_low_cpu_mem_usage: bool,
|
@@ -70,47 +70,26 @@ def train(
|
|
70 |
dataset_split_train: str,
|
71 |
dataset_split_validation: str,
|
72 |
dataset_split_test: str,
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
down_sample_train: int,
|
77 |
down_sample_validation: int,
|
78 |
random_seed: int,
|
79 |
use_auth_token: bool,
|
80 |
-
n_trials: int,
|
81 |
-
eval_step: int,
|
82 |
-
parallel_cpu: bool,
|
83 |
output_dir: str,
|
84 |
-
ray_result_dir: str,
|
85 |
model_alias: str,
|
86 |
model_organization: str,
|
|
|
|
|
|
|
|
|
87 |
eval_batch_size: int = 16):
|
88 |
"""Fine-tune seq2seq model."""
|
89 |
-
logging.info(
|
90 |
-
f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name}), \n\t *Num of Trial: {n_trials}'
|
91 |
-
)
|
92 |
# set up the output directory
|
93 |
if output_dir is None:
|
94 |
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
95 |
-
ray_result_dir = ray_result_dir
|
96 |
-
if ray_result_dir is None:
|
97 |
-
ray_result_dir = f'ray/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
98 |
-
|
99 |
-
# define search space
|
100 |
-
search_range_lr = [1e-6, 1e-4] if search_range_lr is None else search_range_lr
|
101 |
-
assert len(search_range_lr) == 2, f'`search_range_lr` should contain [min_lr, max_lr]: {search_range_lr}'
|
102 |
-
search_range_epoch = [2, 6] if search_range_epoch is None else search_range_epoch
|
103 |
-
assert len(search_range_epoch) == 2, f'`search_range_epoch` should contain [min_epoch, max_epoch]: {search_range_epoch}'
|
104 |
-
search_list_batch = [64, 128] if search_list_batch is None else search_list_batch
|
105 |
-
search_space = {
|
106 |
-
'learning_rate': tune.loguniform(search_range_lr[0], search_range_lr[1]),
|
107 |
-
'num_train_epochs': tune.choice(list(range(search_range_epoch[0], search_range_epoch[1]))),
|
108 |
-
'per_device_train_batch_size': tune.choice(search_list_batch)
|
109 |
-
}
|
110 |
-
resources_per_trial = {'cpu': multiprocessing.cpu_count() if parallel_cpu else 1, 'gpu': torch.cuda.device_count()}
|
111 |
-
init(ignore_reinit_error=True, num_cpus=resources_per_trial['cpu'])
|
112 |
-
logging.info(f'[RESOURCE]\n{json.dumps(resources_per_trial, indent=4)}')
|
113 |
-
|
114 |
# dataset process
|
115 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
116 |
dataset_split = {
|
@@ -156,69 +135,52 @@ def train(
|
|
156 |
generation_decode = decode_tokens(generation_token_id)
|
157 |
return get_f1_score(references_decode, generation_decode)
|
158 |
|
159 |
-
if not
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
# grid search
|
183 |
-
best_run = trainer.hyperparameter_search(
|
184 |
-
hp_space=lambda x: search_space,
|
185 |
-
local_dir=ray_result_dir,
|
186 |
-
direction='maximize',
|
187 |
-
backend='ray',
|
188 |
-
n_trials=n_trials,
|
189 |
-
resources_per_trial=resources_per_trial
|
190 |
)
|
191 |
-
|
|
|
|
|
|
|
|
|
|
|
192 |
json.dump(best_run.hyperparameters, f)
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
logging.info(f'fine-tuning with the best config')
|
198 |
-
with open(f'{output_dir}/model/hyperparameters.json') as f:
|
199 |
-
best_hyperparameters = json.load(f)
|
200 |
-
for n, v in best_hyperparameters.items():
|
201 |
-
setattr(trainer.args, n, v)
|
202 |
-
setattr(trainer, 'train_dataset', tokenized_dataset['train'])
|
203 |
-
setattr(trainer.args, 'evaluation_strategy', 'no')
|
204 |
-
trainer.train()
|
205 |
-
trainer.save_model(f'{output_dir}/model')
|
206 |
-
tokenizer.save_pretrained(f'{output_dir}/model')
|
207 |
-
logging.info(f'model saved at {output_dir}/model')
|
208 |
-
del trainer
|
209 |
-
gc.collect()
|
210 |
-
torch.cuda.empty_cache()
|
211 |
else:
|
212 |
logging.info('skip hyperparameter search & model training (already done)')
|
213 |
|
214 |
# get metric on the test set
|
215 |
-
if
|
216 |
logging.info('run evaluation on test set')
|
217 |
if not os.path.exists(f'{output_dir}/model/prediction_test.txt'):
|
218 |
pipe = pipeline(
|
219 |
'text2text-generation',
|
220 |
model=f'{output_dir}/model',
|
221 |
-
device='cuda:0' if
|
222 |
)
|
223 |
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
|
224 |
output = pipe(input_data, batch_size=eval_batch_size)
|
@@ -237,8 +199,9 @@ def train(
|
|
237 |
with open(f'{output_dir}/model/evaluation_metrics.json', 'w') as f:
|
238 |
json.dump(eval_metric, f)
|
239 |
|
240 |
-
if
|
241 |
-
assert
|
|
|
242 |
logging.info('uploading to huggingface')
|
243 |
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
|
244 |
model = load_model(model_name=f'{output_dir}/model')
|
@@ -289,42 +252,45 @@ if __name__ == '__main__':
|
|
289 |
parser.add_argument('--dataset-split-train', default='train', type=str)
|
290 |
parser.add_argument('--dataset-split-validation', default='validation', type=str)
|
291 |
parser.add_argument('--dataset-split-test', default='test', type=str)
|
292 |
-
parser.add_argument('--
|
293 |
-
parser.add_argument('--
|
294 |
-
parser.add_argument('--
|
295 |
parser.add_argument('--down-sample-train', default=None, type=int)
|
296 |
parser.add_argument('--down-sample-validation', default=2000, type=int)
|
297 |
parser.add_argument('--random-seed', default=42, type=int)
|
298 |
parser.add_argument('--use-auth-token', action='store_true')
|
299 |
-
parser.add_argument('--
|
300 |
-
parser.add_argument('--eval-step', default=100, type=int)
|
301 |
-
parser.add_argument('--parallel-cpu', action='store_true')
|
302 |
parser.add_argument('--output-dir', default=None, type=str)
|
303 |
-
parser.add_argument('--ray-result-dir', default=None, type=str)
|
304 |
parser.add_argument('--model-alias', default=None, type=str)
|
305 |
parser.add_argument('--model-organization', default=None, type=str)
|
|
|
|
|
|
|
306 |
opt = parser.parse_args()
|
307 |
|
308 |
-
train(
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
|
|
|
|
|
|
|
6 |
import json
|
7 |
import logging
|
8 |
import os
|
|
|
9 |
import argparse
|
10 |
import gc
|
11 |
from typing import List, Set
|
12 |
from shutil import copyfile
|
13 |
from statistics import mean
|
14 |
+
from itertools import product
|
15 |
|
16 |
import torch
|
17 |
import transformers
|
18 |
from datasets import load_dataset
|
19 |
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
|
|
|
20 |
from huggingface_hub import Repository
|
21 |
|
22 |
|
|
|
59 |
scores.append(f1)
|
60 |
return {'f1': mean(scores)}
|
61 |
|
62 |
+
|
63 |
def train(
|
64 |
model_name: str,
|
65 |
model_low_cpu_mem_usage: bool,
|
|
|
70 |
dataset_split_train: str,
|
71 |
dataset_split_validation: str,
|
72 |
dataset_split_test: str,
|
73 |
+
lr: List,
|
74 |
+
epoch: int,
|
75 |
+
batch: List,
|
76 |
down_sample_train: int,
|
77 |
down_sample_validation: int,
|
78 |
random_seed: int,
|
79 |
use_auth_token: bool,
|
|
|
|
|
|
|
80 |
output_dir: str,
|
|
|
81 |
model_alias: str,
|
82 |
model_organization: str,
|
83 |
+
skip_train: bool = False,
|
84 |
+
skip_test: bool = False,
|
85 |
+
skip_upload: bool = False,
|
86 |
+
eval_steps: float = 0.25,
|
87 |
eval_batch_size: int = 16):
|
88 |
"""Fine-tune seq2seq model."""
|
89 |
+
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
|
|
|
|
90 |
# set up the output directory
|
91 |
if output_dir is None:
|
92 |
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
# dataset process
|
94 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
95 |
dataset_split = {
|
|
|
135 |
generation_decode = decode_tokens(generation_token_id)
|
136 |
return get_f1_score(references_decode, generation_decode)
|
137 |
|
138 |
+
if not skip_train:
|
139 |
+
lr = [1e-6, 1e-4] if lr is None else lr
|
140 |
+
batch = [64] if batch is None else batch
|
141 |
+
for n, (lr_tmp, batch_tmp) in enumerate(product(lr, batch)):
|
142 |
+
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
143 |
+
trainer = Seq2SeqTrainer(
|
144 |
+
args=Seq2SeqTrainingArguments(
|
145 |
+
num_train_epochs=epoch,
|
146 |
+
learning_rate=lr_tmp,
|
147 |
+
output_dir=f'{output_dir}/runs',
|
148 |
+
evaluation_strategy='steps',
|
149 |
+
eval_steps=eval_steps,
|
150 |
+
per_device_eval_batch_size=eval_batch_size,
|
151 |
+
seed=random_seed,
|
152 |
+
per_device_train_batch_size=batch_tmp,
|
153 |
+
),
|
154 |
+
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=load_model(
|
155 |
+
model_name=model_name,
|
156 |
+
use_auth_token=use_auth_token,
|
157 |
+
low_cpu_mem_usage=model_low_cpu_mem_usage)),
|
158 |
+
train_dataset=tokenized_dataset['train_ds'],
|
159 |
+
eval_dataset=tokenized_dataset['validation_ds'],
|
160 |
+
compute_metrics=compute_metric,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
)
|
162 |
+
os.makedirs(f'{output_dir}/model_{n}', exist_ok=True)
|
163 |
+
best_run = trainer.train()
|
164 |
+
trainer.save_model(f'{output_dir}/model_{n}')
|
165 |
+
tokenizer.save_pretrained(f'{output_dir}/model_{n}')
|
166 |
+
# grid search
|
167 |
+
with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
|
168 |
json.dump(best_run.hyperparameters, f)
|
169 |
+
del trainer
|
170 |
+
gc.collect()
|
171 |
+
torch.cuda.empty_cache()
|
172 |
+
logging.info(f'model saved at {output_dir}/model_{n}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
else:
|
174 |
logging.info('skip hyperparameter search & model training (already done)')
|
175 |
|
176 |
# get metric on the test set
|
177 |
+
if not skip_test:
|
178 |
logging.info('run evaluation on test set')
|
179 |
if not os.path.exists(f'{output_dir}/model/prediction_test.txt'):
|
180 |
pipe = pipeline(
|
181 |
'text2text-generation',
|
182 |
model=f'{output_dir}/model',
|
183 |
+
device='cuda:0' if torch.cuda.is_available() else 'cpu',
|
184 |
)
|
185 |
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
|
186 |
output = pipe(input_data, batch_size=eval_batch_size)
|
|
|
199 |
with open(f'{output_dir}/model/evaluation_metrics.json', 'w') as f:
|
200 |
json.dump(eval_metric, f)
|
201 |
|
202 |
+
if not skip_upload:
|
203 |
+
assert model_alias is not None and model_organization is not None,\
|
204 |
+
'model_organization must be specified when model_alias is specified'
|
205 |
logging.info('uploading to huggingface')
|
206 |
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
|
207 |
model = load_model(model_name=f'{output_dir}/model')
|
|
|
252 |
parser.add_argument('--dataset-split-train', default='train', type=str)
|
253 |
parser.add_argument('--dataset-split-validation', default='validation', type=str)
|
254 |
parser.add_argument('--dataset-split-test', default='test', type=str)
|
255 |
+
parser.add_argument('--lr', nargs='+', default=None, type=float)
|
256 |
+
parser.add_argument('--epoch', default=5, type=int)
|
257 |
+
parser.add_argument('--batch', nargs='+', default=None, type=int)
|
258 |
parser.add_argument('--down-sample-train', default=None, type=int)
|
259 |
parser.add_argument('--down-sample-validation', default=2000, type=int)
|
260 |
parser.add_argument('--random-seed', default=42, type=int)
|
261 |
parser.add_argument('--use-auth-token', action='store_true')
|
262 |
+
parser.add_argument('--eval-steps', default=100, type=int)
|
|
|
|
|
263 |
parser.add_argument('--output-dir', default=None, type=str)
|
|
|
264 |
parser.add_argument('--model-alias', default=None, type=str)
|
265 |
parser.add_argument('--model-organization', default=None, type=str)
|
266 |
+
parser.add_argument('--skip-train', action='store_true')
|
267 |
+
parser.add_argument('--skip-test', action='store_true')
|
268 |
+
parser.add_argument('--skip-upload', action='store_true')
|
269 |
opt = parser.parse_args()
|
270 |
|
271 |
+
train(
|
272 |
+
model_name=opt.model_name,
|
273 |
+
model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
|
274 |
+
dataset=opt.dataset,
|
275 |
+
dataset_name=opt.dataset_name,
|
276 |
+
dataset_column_label=opt.dataset_column_label,
|
277 |
+
dataset_column_text=opt.dataset_column_text,
|
278 |
+
dataset_split_train=opt.dataset_split_train,
|
279 |
+
dataset_split_validation=opt.dataset_split_validation,
|
280 |
+
dataset_split_test=opt.dataset_split_test,
|
281 |
+
lr=opt.lr,
|
282 |
+
epoch=opt.epoch,
|
283 |
+
batch=opt.batch,
|
284 |
+
down_sample_train=opt.down_sample_train,
|
285 |
+
down_sample_validation=opt.down_sample_validation,
|
286 |
+
random_seed=opt.random_seed,
|
287 |
+
use_auth_token=opt.use_auth_token,
|
288 |
+
n_trials=opt.n_trials,
|
289 |
+
eval_steps=opt.eval_steps,
|
290 |
+
output_dir=opt.output_dir,
|
291 |
+
model_alias=opt.model_alias,
|
292 |
+
model_organization=opt.model_organization,
|
293 |
+
skip_train=opt.skip_train,
|
294 |
+
skip_test=opt.skip_test,
|
295 |
+
skip_upload=opt.skip_upload
|
296 |
+
)
|
training_scripts/requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ray
|
2 |
+
ray[tune]
|
3 |
+
torch
|
4 |
+
datasets
|
5 |
+
transformers
|
6 |
+
huggingface_hub
|
training_scripts/script.sh
CHANGED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
|
2 |
+
python finetune_t5.py --dataset-name gr --model-alias mt5-small-tweet-topic-gr --model-organization cardiffnlp
|
3 |
+
python finetune_t5.py --dataset-name es --model-alias mt5-small-tweet-topic-es --model-organization cardiffnlp
|
4 |
+
python finetune_t5.py --dataset-name en --model-alias mt5-small-tweet-topic-en --model-organization cardiffnlp
|
5 |
+
|
6 |
+
|