init
Browse files- training_scripts/finetune_t5.py +238 -222
- training_scripts/script.sh +5 -1
training_scripts/finetune_t5.py
CHANGED
@@ -24,6 +24,9 @@ from huggingface_hub import Repository
|
|
24 |
|
25 |
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
|
26 |
os.environ['WANDB_DISABLED'] = 'true' # disable wandb
|
|
|
|
|
|
|
27 |
|
28 |
|
29 |
def load_model(
|
@@ -44,23 +47,7 @@ def load_model(
|
|
44 |
else:
|
45 |
raise ValueError(f'unsupported model type: {config.model_type}')
|
46 |
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
|
47 |
-
|
48 |
-
return model
|
49 |
-
|
50 |
-
|
51 |
-
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> Dict[str, float]:
|
52 |
-
scores = []
|
53 |
-
for g, r in zip(references, predictions):
|
54 |
-
tp = len(set(g).intersection(set(r)))
|
55 |
-
fp = len([_g for _g in g if _g not in r])
|
56 |
-
fn = len([_r for _r in r if _r not in g])
|
57 |
-
if tp == 0:
|
58 |
-
f1 = 0
|
59 |
-
else:
|
60 |
-
f1 = 2 * tp / (2 * tp + fp + fn)
|
61 |
-
scores.append(f1)
|
62 |
-
return {'f1': mean(scores)}
|
63 |
-
|
64 |
|
65 |
def train(
|
66 |
model_name: str,
|
@@ -69,201 +56,245 @@ def train(
|
|
69 |
dataset_name: str,
|
70 |
dataset_column_label: str,
|
71 |
dataset_column_text: str,
|
72 |
-
dataset_split_train: str,
|
73 |
-
dataset_split_validation: str,
|
74 |
-
dataset_split_test: str,
|
75 |
-
lr: List,
|
76 |
-
epoch: List,
|
77 |
-
batch: List,
|
78 |
-
down_sample_train: int,
|
79 |
-
down_sample_validation: int,
|
80 |
random_seed: int,
|
81 |
-
use_auth_token: bool
|
82 |
-
output_dir: str,
|
83 |
-
model_alias: str,
|
84 |
-
model_organization: str,
|
85 |
-
skip_train: bool = False,
|
86 |
-
skip_test: bool = False,
|
87 |
-
skip_upload: bool = False,
|
88 |
-
batch_eval: int = None):
|
89 |
"""Fine-tune seq2seq model."""
|
90 |
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
91 |
-
|
92 |
-
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
93 |
-
# dataset process
|
94 |
-
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
95 |
-
dataset_split = {
|
96 |
-
'train': [dataset_split_train, down_sample_train],
|
97 |
-
'validation': [dataset_split_validation, down_sample_validation]
|
98 |
-
}
|
99 |
-
dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
|
100 |
-
tokenized_dataset = {}
|
101 |
-
for s, (s_dataset, down_sample) in dataset_split.items():
|
102 |
-
tokenized_dataset[s] = []
|
103 |
-
dataset_tmp = dataset_instance[s_dataset]
|
104 |
-
dataset_tmp.shuffle(random_seed)
|
105 |
-
for i in dataset_tmp:
|
106 |
-
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
|
107 |
-
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
|
108 |
-
tokenized_dataset[s].append(model_inputs)
|
109 |
-
|
110 |
-
if down_sample is not None and len(dataset_tmp) > down_sample:
|
111 |
-
tokenized_dataset[f'{s}_ds'] = []
|
112 |
-
dataset_tmp = dataset_tmp.select(list(range(down_sample)))
|
113 |
-
for i in dataset_tmp:
|
114 |
-
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
|
115 |
-
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
|
116 |
-
tokenized_dataset[f'{s}_ds'].append(model_inputs)
|
117 |
-
else:
|
118 |
-
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
predictions, reference_token_ids = eval_pred
|
129 |
-
# format reference
|
130 |
-
references_decode = decode_tokens(reference_token_ids)
|
131 |
-
# format prediction
|
132 |
-
logit, loss = predictions
|
133 |
-
generation_token_id = logit.argmax(-1)
|
134 |
-
generation_token_id[logit.min(-1) == -100] = -100
|
135 |
-
generation_decode = decode_tokens(generation_token_id)
|
136 |
-
return get_f1_score(references_decode, generation_decode)
|
137 |
-
|
138 |
-
if not skip_train:
|
139 |
-
lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
|
140 |
-
batch = [32] if not batch else batch
|
141 |
-
epoch = [3, 5] if not epoch else epoch
|
142 |
-
batch_eval = min(batch) if not batch_eval else batch_eval
|
143 |
-
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
144 |
-
logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
145 |
-
output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
|
146 |
-
if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
|
147 |
-
continue
|
148 |
-
model = load_model(
|
149 |
-
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
150 |
-
)
|
151 |
-
trainer = Seq2SeqTrainer(
|
152 |
-
model=model,
|
153 |
-
args=Seq2SeqTrainingArguments(
|
154 |
-
num_train_epochs=epoch_tmp,
|
155 |
-
learning_rate=lr_tmp,
|
156 |
-
output_dir=output_dir_tmp,
|
157 |
-
evaluation_strategy="no",
|
158 |
-
per_device_eval_batch_size=batch_eval,
|
159 |
-
seed=random_seed,
|
160 |
-
per_device_train_batch_size=batch_tmp,
|
161 |
-
),
|
162 |
-
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
|
163 |
-
train_dataset=tokenized_dataset['train_ds'],
|
164 |
-
eval_dataset=tokenized_dataset['validation_ds'],
|
165 |
-
compute_metrics=compute_metric,
|
166 |
-
)
|
167 |
-
# train
|
168 |
-
result = trainer.train()
|
169 |
-
trainer.log_metrics("train", result.metrics)
|
170 |
-
trainer.save_metrics("train", result.metrics)
|
171 |
-
# evaluate
|
172 |
-
metrics = trainer.evaluate()
|
173 |
-
trainer.log_metrics("eval", metrics)
|
174 |
-
trainer.save_metrics("eval", metrics)
|
175 |
-
# clean up memory
|
176 |
-
trainer.save_model()
|
177 |
-
trainer.save_state()
|
178 |
-
del trainer
|
179 |
-
del model
|
180 |
-
gc.collect()
|
181 |
-
torch.cuda.empty_cache()
|
182 |
-
# cuda.get_current_device().reset()
|
183 |
-
|
184 |
-
model_score = []
|
185 |
-
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
186 |
-
with open(eval_file) as f:
|
187 |
-
results = json.load(f)
|
188 |
-
model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']])
|
189 |
-
logging.info("Search Result")
|
190 |
-
for i in model_score:
|
191 |
-
logging.info(i)
|
192 |
-
max_metric = max(model_score, key=lambda x: x[2])
|
193 |
-
if len([i for i in model_score if i[2] == max_metric]) > 1:
|
194 |
-
best_model = sorted(model_score, key=lambda x: x[1])[0][0]
|
195 |
-
else:
|
196 |
-
best_model = sorted(model_score, key=lambda x: x[2])[-1][0]
|
197 |
-
copy_tree(best_model, f'{output_dir}/best_model')
|
198 |
-
tokenizer.save_pretrained(f'{output_dir}/best_model')
|
199 |
-
else:
|
200 |
-
logging.info('skip hyperparameter search & model training (already done)')
|
201 |
-
|
202 |
-
# get metric on the test set
|
203 |
-
if not skip_test:
|
204 |
-
logging.info('run evaluation on test set')
|
205 |
-
if not os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
|
206 |
-
pipe = pipeline(
|
207 |
-
'text2text-generation',
|
208 |
-
model=f'{output_dir}/best_model',
|
209 |
-
device='cuda:0' if torch.cuda.is_available() else 'cpu',
|
210 |
-
)
|
211 |
-
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
|
212 |
-
output = pipe(input_data, batch_size=batch_eval)
|
213 |
-
output = [i['generated_text'] for i in output]
|
214 |
-
with open(f'{output_dir}/best_model/prediction_test.txt', 'w') as f:
|
215 |
-
f.write('\n'.join(output))
|
216 |
-
with open(f'{output_dir}/best_model/prediction_test.txt') as f:
|
217 |
-
output = [set(i.split(',')) for i in f.read().split('\n')]
|
218 |
-
dataset_tmp = dataset_instance[dataset_split_test]
|
219 |
-
label_list = dataset_tmp[dataset_column_label]
|
220 |
-
_references = [
|
221 |
-
set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
|
222 |
-
]
|
223 |
-
eval_metric = get_f1_score(_references, output)
|
224 |
-
eval_metric[f'f1/{dataset}/{dataset_name}'] = eval_metric.pop('f1')
|
225 |
-
logging.info(json.dumps(eval_metric, indent=4))
|
226 |
-
with open(f'{output_dir}/best_model/evaluation_metrics.json', 'w') as f:
|
227 |
-
json.dump(eval_metric, f)
|
228 |
-
|
229 |
-
if not skip_upload:
|
230 |
-
assert model_alias is not None and model_organization is not None,\
|
231 |
-
'model_organization must be specified when model_alias is specified'
|
232 |
-
logging.info('uploading to huggingface')
|
233 |
-
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
|
234 |
-
model = load_model(model_name=f'{output_dir}/best_model')
|
235 |
-
model.push_to_hub(model_alias, **args)
|
236 |
-
tokenizer.push_to_hub(model_alias, **args)
|
237 |
-
repo = Repository(model_alias, f'{model_organization}/{model_alias}')
|
238 |
-
if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
|
239 |
-
copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
|
240 |
-
if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
|
241 |
-
copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
|
242 |
-
sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
|
243 |
-
sample = [i for i in sample if ''' not in i and ''' not in i][:3]
|
244 |
-
widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
|
245 |
-
with open(f'{model_alias}/README.md', 'w') as f:
|
246 |
-
f.write(f"""
|
247 |
-
---
|
248 |
-
widget:
|
249 |
-
{widget}
|
250 |
-
---
|
251 |
-
|
252 |
-
# {model_organization}/{model_alias}
|
253 |
-
|
254 |
-
This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
|
255 |
-
|
256 |
-
### Usage
|
257 |
-
|
258 |
-
```python
|
259 |
-
from transformers import pipeline
|
260 |
-
|
261 |
-
pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
|
262 |
-
output = pipe('{sample[0]}')
|
263 |
-
```
|
264 |
-
""")
|
265 |
-
repo.push_to_hub()
|
266 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
267 |
|
268 |
if __name__ == '__main__':
|
269 |
# arguments
|
@@ -302,21 +333,6 @@ if __name__ == '__main__':
|
|
302 |
dataset_name=opt.dataset_name,
|
303 |
dataset_column_label=opt.dataset_column_label,
|
304 |
dataset_column_text=opt.dataset_column_text,
|
305 |
-
dataset_split_train=opt.dataset_split_train,
|
306 |
-
dataset_split_validation=opt.dataset_split_validation,
|
307 |
-
dataset_split_test=opt.dataset_split_test,
|
308 |
-
lr=opt.lr,
|
309 |
-
epoch=opt.epoch,
|
310 |
-
batch=opt.batch,
|
311 |
-
batch_eval=opt.batch_eval,
|
312 |
-
down_sample_train=opt.down_sample_train,
|
313 |
-
down_sample_validation=opt.down_sample_validation,
|
314 |
random_seed=opt.random_seed,
|
315 |
use_auth_token=opt.use_auth_token,
|
316 |
-
output_dir=opt.output_dir,
|
317 |
-
model_alias=opt.model_alias,
|
318 |
-
model_organization=opt.model_organization,
|
319 |
-
skip_train=opt.skip_train,
|
320 |
-
skip_test=opt.skip_test,
|
321 |
-
skip_upload=opt.skip_upload
|
322 |
)
|
|
|
24 |
|
25 |
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
|
26 |
os.environ['WANDB_DISABLED'] = 'true' # disable wandb
|
27 |
+
_LR = [1e-6, 1e-5, 1e-4]
|
28 |
+
_BATCH = 32
|
29 |
+
_EPOCH = 5
|
30 |
|
31 |
|
32 |
def load_model(
|
|
|
47 |
else:
|
48 |
raise ValueError(f'unsupported model type: {config.model_type}')
|
49 |
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
|
50 |
+
return model_class(model_name, **param)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def train(
|
53 |
model_name: str,
|
|
|
56 |
dataset_name: str,
|
57 |
dataset_column_label: str,
|
58 |
dataset_column_text: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
random_seed: int,
|
60 |
+
use_auth_token: bool):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
"""Fine-tune seq2seq model."""
|
62 |
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
63 |
+
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
66 |
+
dataset_instance = load_dataset(dataset, dataset_name, split="train", use_auth_token=use_auth_token)
|
67 |
+
tokenized_dataset = []
|
68 |
+
for d in dataset_instance:
|
69 |
+
model_inputs = tokenizer(d[dataset_column_text], truncation=True)
|
70 |
+
model_inputs['labels'] = tokenizer(text_target=d[dataset_column_label], truncation=True)['input_ids']
|
71 |
+
tokenized_dataset.append(model_inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
+
for n, lr_tmp in enumerate(_LR):
|
74 |
+
logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}")
|
75 |
+
output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}"
|
76 |
+
if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
|
77 |
+
continue
|
78 |
+
model = load_model(
|
79 |
+
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
80 |
+
)
|
81 |
+
trainer = Seq2SeqTrainer(
|
82 |
+
model=model,
|
83 |
+
args=Seq2SeqTrainingArguments(
|
84 |
+
num_train_epochs=_EPOCH,
|
85 |
+
learning_rate=lr_tmp,
|
86 |
+
output_dir=output_dir_tmp,
|
87 |
+
save_strategy="epoch",
|
88 |
+
evaluation_strategy="no",
|
89 |
+
seed=random_seed,
|
90 |
+
per_device_train_batch_size=_BATCH,
|
91 |
+
),
|
92 |
+
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
|
93 |
+
train_dataset=tokenized_dataset,
|
94 |
+
)
|
95 |
+
# train
|
96 |
+
result = trainer.train()
|
97 |
+
trainer.log_metrics("train", result.metrics)
|
98 |
+
trainer.save_metrics("train", result.metrics)
|
99 |
+
# clean up memory
|
100 |
+
trainer.save_model()
|
101 |
+
trainer.save_state()
|
102 |
+
del trainer
|
103 |
+
del model
|
104 |
+
gc.collect()
|
105 |
+
torch.cuda.empty_cache()
|
106 |
+
#
|
107 |
+
#
|
108 |
+
# def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float:
|
109 |
+
# scores = []
|
110 |
+
# for g, r in zip(references, predictions):
|
111 |
+
# tp = len(set(g).intersection(set(r)))
|
112 |
+
# fp = len([_g for _g in g if _g not in r])
|
113 |
+
# fn = len([_r for _r in r if _r not in g])
|
114 |
+
# if tp == 0:
|
115 |
+
# f1 = 0
|
116 |
+
# else:
|
117 |
+
# f1 = 2 * tp / (2 * tp + fp + fn)
|
118 |
+
# scores.append(f1)
|
119 |
+
# return mean(scores)
|
120 |
+
#
|
121 |
+
#
|
122 |
+
#
|
123 |
+
#
|
124 |
+
# def evaluate(
|
125 |
+
# model_path: str,
|
126 |
+
# batch_eval: int,
|
127 |
+
# dataset_column_text: str,
|
128 |
+
# dataset_instance,
|
129 |
+
# dataset_split_test: str,
|
130 |
+
# dataset_column_label: str,
|
131 |
+
# ):
|
132 |
+
# prediction_file = f'{model_path}/prediction.{dataset_name}.{}.txt'
|
133 |
+
# input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
|
134 |
+
# if not os.path.exists():
|
135 |
+
# pipe = pipeline(
|
136 |
+
# 'text2text-generation',
|
137 |
+
# model=model_path,
|
138 |
+
# device='cuda:0' if torch.cuda.is_available() else 'cpu',
|
139 |
+
# )
|
140 |
+
# output = pipe(input_data, batch_size=batch_eval)
|
141 |
+
# output = [i['generated_text'] for i in output]
|
142 |
+
# with open(f'{model_path}/prediction_test.txt', 'w') as f:
|
143 |
+
# f.write('\n'.join(output))
|
144 |
+
# with open(f'{model_path}/prediction_test.txt') as f:
|
145 |
+
# output = [set(i.split(',')) for i in f.read().split('\n')]
|
146 |
+
# dataset_tmp = dataset_instance[dataset_split_test]
|
147 |
+
# label_list = dataset_tmp[dataset_column_label]
|
148 |
+
# _references = [
|
149 |
+
# set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
|
150 |
+
# ]
|
151 |
+
# eval_metric = get_f1_score(_references, output)
|
152 |
+
# eval_metric[f'f1/{dataset}/{dataset_name}'] = eval_metric.pop('f1')
|
153 |
+
# logging.info(json.dumps(eval_metric, indent=4))
|
154 |
+
# with open(f'{model_path}/evaluation_metrics.json', 'w') as f:
|
155 |
+
# json.dump(eval_metric, f)
|
156 |
+
#
|
157 |
+
#
|
158 |
+
# def train(
|
159 |
+
# model_name: str,
|
160 |
+
# model_low_cpu_mem_usage: bool,
|
161 |
+
# dataset: str,
|
162 |
+
# dataset_name: str,
|
163 |
+
# dataset_column_label: str,
|
164 |
+
# dataset_column_text: str,
|
165 |
+
# dataset_split_train: str,
|
166 |
+
# dataset_split_validation: str,
|
167 |
+
# dataset_split_test: str,
|
168 |
+
# lr: List,
|
169 |
+
# epoch: List,
|
170 |
+
# batch: List,
|
171 |
+
# down_sample_train: int,
|
172 |
+
# down_sample_validation: int,
|
173 |
+
# random_seed: int,
|
174 |
+
# use_auth_token: bool,
|
175 |
+
# output_dir: str,
|
176 |
+
# model_alias: str,
|
177 |
+
# model_organization: str,
|
178 |
+
# skip_train: bool = False,
|
179 |
+
# skip_test: bool = False,
|
180 |
+
# skip_upload: bool = False,
|
181 |
+
# batch_eval: int = None):
|
182 |
+
# """Fine-tune seq2seq model."""
|
183 |
+
# logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
184 |
+
# if not output_dir:
|
185 |
+
# output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
186 |
+
# # dataset process
|
187 |
+
# tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
188 |
+
# dataset_split = {
|
189 |
+
# 'train': dataset_split_train,
|
190 |
+
# 'validation': dataset_split_validation
|
191 |
+
# }
|
192 |
+
# dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
|
193 |
+
# tokenized_dataset = {}
|
194 |
+
# for s, s_dataset in zip(['train', 'validation'], [dataset_split_train, dataset_split_validation):
|
195 |
+
# tokenized_dataset[s] = []
|
196 |
+
# for i in dataset_instance[s_dataset]:
|
197 |
+
# model_inputs = tokenizer(i[dataset_column_text], truncation=True)
|
198 |
+
# model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
|
199 |
+
# tokenized_dataset[s].append(model_inputs)
|
200 |
+
#
|
201 |
+
# if not skip_train:
|
202 |
+
# lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
|
203 |
+
# batch = [32] if not batch else batch
|
204 |
+
# epoch = [3, 5] if not epoch else epoch
|
205 |
+
# batch_eval = min(batch) if not batch_eval else batch_eval
|
206 |
+
# for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
207 |
+
# logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
208 |
+
# output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
|
209 |
+
# if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
|
210 |
+
# continue
|
211 |
+
# model = load_model(
|
212 |
+
# model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
213 |
+
# )
|
214 |
+
# trainer = Seq2SeqTrainer(
|
215 |
+
# model=model,
|
216 |
+
# args=Seq2SeqTrainingArguments(
|
217 |
+
# num_train_epochs=epoch_tmp,
|
218 |
+
# learning_rate=lr_tmp,
|
219 |
+
# output_dir=output_dir_tmp,
|
220 |
+
# evaluation_strategy="no",
|
221 |
+
# seed=random_seed,
|
222 |
+
# per_device_train_batch_size=batch_tmp,
|
223 |
+
# ),
|
224 |
+
# data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
|
225 |
+
# train_dataset=tokenized_dataset['train_ds'],
|
226 |
+
# )
|
227 |
+
# # train
|
228 |
+
# result = trainer.train()
|
229 |
+
# trainer.log_metrics("train", result.metrics)
|
230 |
+
# trainer.save_metrics("train", result.metrics)
|
231 |
+
# # clean up memory
|
232 |
+
# trainer.save_model()
|
233 |
+
# trainer.save_state()
|
234 |
+
# del trainer
|
235 |
+
# del model
|
236 |
+
# gc.collect()
|
237 |
+
# torch.cuda.empty_cache()
|
238 |
+
#
|
239 |
+
# model_score = []
|
240 |
+
# for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
241 |
+
# with open(eval_file) as f:
|
242 |
+
# results = json.load(f)
|
243 |
+
# model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']])
|
244 |
+
# logging.info("Search Result")
|
245 |
+
# for i in model_score:
|
246 |
+
# logging.info(i)
|
247 |
+
# max_metric = max(model_score, key=lambda x: x[2])
|
248 |
+
# if len([i for i in model_score if i[2] == max_metric]) > 1:
|
249 |
+
# best_model = sorted(model_score, key=lambda x: x[1])[0][0]
|
250 |
+
# else:
|
251 |
+
# best_model = sorted(model_score, key=lambda x: x[2])[-1][0]
|
252 |
+
# copy_tree(best_model, f'{output_dir}/best_model')
|
253 |
+
# tokenizer.save_pretrained(f'{output_dir}/best_model')
|
254 |
+
# else:
|
255 |
+
# logging.info('skip hyperparameter search & model training (already done)')
|
256 |
+
#
|
257 |
+
# # get metric on the test set
|
258 |
+
# if not skip_test:
|
259 |
+
# logging.info('run evaluation on test set')
|
260 |
+
# if not skip_upload:
|
261 |
+
# assert model_alias is not None and model_organization is not None,\
|
262 |
+
# 'model_organization must be specified when model_alias is specified'
|
263 |
+
# logging.info('uploading to huggingface')
|
264 |
+
# args = {'use_auth_token': use_auth_token, 'organization': model_organization}
|
265 |
+
# model = load_model(model_name=f'{output_dir}/best_model')
|
266 |
+
# model.push_to_hub(model_alias, **args)
|
267 |
+
# tokenizer.push_to_hub(model_alias, **args)
|
268 |
+
# repo = Repository(model_alias, f'{model_organization}/{model_alias}')
|
269 |
+
# if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
|
270 |
+
# copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
|
271 |
+
# if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
|
272 |
+
# copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
|
273 |
+
# sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
|
274 |
+
# sample = [i for i in sample if ''' not in i and ''' not in i][:3]
|
275 |
+
# widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
|
276 |
+
# with open(f'{model_alias}/README.md', 'w') as f:
|
277 |
+
# f.write(f"""
|
278 |
+
# ---
|
279 |
+
# widget:
|
280 |
+
# {widget}
|
281 |
+
# ---
|
282 |
+
#
|
283 |
+
# # {model_organization}/{model_alias}
|
284 |
+
#
|
285 |
+
# This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
|
286 |
+
#
|
287 |
+
# ### Usage
|
288 |
+
#
|
289 |
+
# ```python
|
290 |
+
# from transformers import pipeline
|
291 |
+
#
|
292 |
+
# pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
|
293 |
+
# output = pipe('{sample[0]}')
|
294 |
+
# ```
|
295 |
+
# """)
|
296 |
+
# repo.push_to_hub()
|
297 |
+
#
|
298 |
|
299 |
if __name__ == '__main__':
|
300 |
# arguments
|
|
|
333 |
dataset_name=opt.dataset_name,
|
334 |
dataset_column_label=opt.dataset_column_label,
|
335 |
dataset_column_text=opt.dataset_column_text,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
336 |
random_seed=opt.random_seed,
|
337 |
use_auth_token=opt.use_auth_token,
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
)
|
training_scripts/script.sh
CHANGED
@@ -9,7 +9,6 @@ python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --datase
|
|
9 |
|
10 |
# mix
|
11 |
python finetune_t5.py --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
|
12 |
-
python finetune_t5.py --dataset-name mix_2022 --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix-2022 --model-organization cardiffnlp --use-auth-token
|
13 |
|
14 |
# single
|
15 |
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token --skip-train --skip-test
|
@@ -17,4 +16,9 @@ python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-sm
|
|
17 |
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-ja --model-organization cardiffnlp --use-auth-token
|
18 |
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token
|
19 |
|
|
|
|
|
|
|
|
|
|
|
20 |
|
|
|
9 |
|
10 |
# mix
|
11 |
python finetune_t5.py --dataset-name mix --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-mix --model-organization cardiffnlp --use-auth-token
|
|
|
12 |
|
13 |
# single
|
14 |
python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-es --model-organization cardiffnlp --use-auth-token --skip-train --skip-test
|
|
|
16 |
python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-ja --model-organization cardiffnlp --use-auth-token
|
17 |
python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-gr --model-organization cardiffnlp --use-auth-token
|
18 |
|
19 |
+
# continuous
|
20 |
+
python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-es --model-organization cardiffnlp --use-auth-token
|
21 |
+
python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-en --model-organization cardiffnlp --use-auth-token
|
22 |
+
python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-ja --model-organization cardiffnlp --use-auth-token
|
23 |
+
python finetune_t5.py -m cardiffnlp/mt5-small-tweet-topic-multi-en-2022 --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-multi-en-2022-gr --model-organization cardiffnlp --use-auth-token
|
24 |
|