asahi417's picture
init
419d31a
raw
history blame
14.6 kB
""" Fine-tune T5 on topic classification (multi-label multi-class classification)
```
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage
```
"""
import json
import logging
import os
import argparse
import gc
from glob import glob
from typing import List, Set, Dict
from shutil import copyfile
from statistics import mean
from itertools import product
from distutils.dir_util import copy_tree
import torch
import transformers
from datasets import load_dataset
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
from huggingface_hub import Repository
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
os.environ['WANDB_DISABLED'] = 'true' # disable wandb
def load_model(
model_name: str,
use_auth_token: bool = False,
low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel:
"""Load language model from huggingface model hub."""
# config & tokenizer
config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token)
if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class
model_class = transformers.T5ForConditionalGeneration.from_pretrained
elif config.model_type == 'mt5':
model_class = transformers.MT5ForConditionalGeneration.from_pretrained
elif config.model_type == 'bart':
model_class = transformers.BartForConditionalGeneration.from_pretrained
elif config.model_type == 'mbart':
model_class = transformers.MBartForConditionalGeneration.from_pretrained
else:
raise ValueError(f'unsupported model type: {config.model_type}')
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
model = model_class(model_name, **param)
return model
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> Dict[str, float]:
scores = []
for g, r in zip(references, predictions):
tp = len(set(g).intersection(set(r)))
fp = len([_g for _g in g if _g not in r])
fn = len([_r for _r in r if _r not in g])
if tp == 0:
f1 = 0
else:
f1 = 2 * tp / (2 * tp + fp + fn)
scores.append(f1)
return {'f1': mean(scores)}
def train(
model_name: str,
model_low_cpu_mem_usage: bool,
dataset: str,
dataset_name: str,
dataset_column_label: str,
dataset_column_text: str,
dataset_split_train: str,
dataset_split_validation: str,
dataset_split_test: str,
lr: List,
epoch: List,
batch: List,
down_sample_train: int,
down_sample_validation: int,
random_seed: int,
use_auth_token: bool,
output_dir: str,
model_alias: str,
model_organization: str,
skip_train: bool = False,
skip_test: bool = False,
skip_upload: bool = False,
batch_eval: int = None):
"""Fine-tune seq2seq model."""
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
if not output_dir:
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
# dataset process
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
dataset_split = {
'train': [dataset_split_train, down_sample_train],
'validation': [dataset_split_validation, down_sample_validation]
}
dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
tokenized_dataset = {}
for s, (s_dataset, down_sample) in dataset_split.items():
tokenized_dataset[s] = []
dataset_tmp = dataset_instance[s_dataset]
dataset_tmp.shuffle(random_seed)
for i in dataset_tmp:
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
tokenized_dataset[s].append(model_inputs)
if down_sample is not None and len(dataset_tmp) > down_sample:
tokenized_dataset[f'{s}_ds'] = []
dataset_tmp = dataset_tmp.select(list(range(down_sample)))
for i in dataset_tmp:
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
tokenized_dataset[f'{s}_ds'].append(model_inputs)
else:
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
def compute_metric(eval_pred) -> Dict[str, float]: # for parameter search
def decode_tokens(token_ids) -> List[Set[str]]:
return [
set(tokenizer.decode(list(filter(lambda x: x != -100, r)), skip_special_tokens=True).split(',')) for r
in token_ids
]
predictions, reference_token_ids = eval_pred
# format reference
references_decode = decode_tokens(reference_token_ids)
# format prediction
logit, loss = predictions
generation_token_id = logit.argmax(-1)
generation_token_id[logit.min(-1) == -100] = -100
generation_decode = decode_tokens(generation_token_id)
return get_f1_score(references_decode, generation_decode)
if not skip_train:
lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
batch = [32] if not batch else batch
epoch = [3, 5] if not epoch else epoch
batch_eval = min(batch) if not batch_eval else batch_eval
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
continue
model = load_model(
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
)
trainer = Seq2SeqTrainer(
model=model,
args=Seq2SeqTrainingArguments(
num_train_epochs=epoch_tmp,
learning_rate=lr_tmp,
output_dir=output_dir_tmp,
evaluation_strategy="no",
per_device_eval_batch_size=batch_eval,
seed=random_seed,
per_device_train_batch_size=batch_tmp,
),
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
train_dataset=tokenized_dataset['train_ds'],
eval_dataset=tokenized_dataset['validation_ds'],
compute_metrics=compute_metric,
)
# train
result = trainer.train()
trainer.log_metrics("train", result.metrics)
trainer.save_metrics("train", result.metrics)
# evaluate
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# clean up memory
trainer.save_model()
trainer.save_state()
del trainer
del model
gc.collect()
torch.cuda.empty_cache()
# cuda.get_current_device().reset()
model_score = []
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
with open(eval_file) as f:
results = json.load(f)
model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']])
logging.info("Search Result")
for i in model_score:
logging.info(i)
max_metric = max(model_score, key=lambda x: x[2])
if len([i for i in model_score if i[2] == max_metric]) > 1:
best_model = sorted(model_score, key=lambda x: x[1])[0][0]
else:
best_model = sorted(model_score, key=lambda x: x[2])[-1][0]
copy_tree(best_model, f'{output_dir}/best_model')
tokenizer.save_pretrained(f'{output_dir}/best_model')
else:
logging.info('skip hyperparameter search & model training (already done)')
# get metric on the test set
if not skip_test:
logging.info('run evaluation on test set')
if not os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
pipe = pipeline(
'text2text-generation',
model=f'{output_dir}/best_model',
device='cuda:0' if torch.cuda.is_available() else 'cpu',
)
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
output = pipe(input_data, batch_size=batch_eval)
output = [i['generated_text'] for i in output]
with open(f'{output_dir}/best_model/prediction_test.txt', 'w') as f:
f.write('\n'.join(output))
with open(f'{output_dir}/best_model/prediction_test.txt') as f:
output = [set(i.split(',')) for i in f.read().split('\n')]
dataset_tmp = dataset_instance[dataset_split_test]
label_list = dataset_tmp[dataset_column_label]
_references = [
set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
]
eval_metric = get_f1_score(_references, output)
eval_metric[f'f1/{dataset}/{dataset_name}'] = eval_metric.pop('f1')
logging.info(json.dumps(eval_metric, indent=4))
with open(f'{output_dir}/best_model/evaluation_metrics.json', 'w') as f:
json.dump(eval_metric, f)
if not skip_upload:
assert model_alias is not None and model_organization is not None,\
'model_organization must be specified when model_alias is specified'
logging.info('uploading to huggingface')
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
model = load_model(model_name=f'{output_dir}/best_model')
model.push_to_hub(model_alias, **args)
tokenizer.push_to_hub(model_alias, **args)
repo = Repository(model_alias, f'{model_organization}/{model_alias}')
if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
sample = [i for i in sample if ''' not in i and ''' not in i][:3]
widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
with open(f'{model_alias}/README.md', 'w') as f:
f.write(f"""
---
widget:
{widget}
---
# {model_organization}/{model_alias}
This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
### Usage
```python
from transformers import pipeline
pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
output = pipe('{sample[0]}')
```
""")
repo.push_to_hub()
if __name__ == '__main__':
# arguments
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.')
parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str)
parser.add_argument('--low-cpu-mem-usage', action='store_true')
parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str)
parser.add_argument('--dataset-name', default='ja', type=str)
parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
parser.add_argument('--dataset-column-text', default='text', type=str)
parser.add_argument('--dataset-split-train', default='train', type=str)
parser.add_argument('--dataset-split-validation', default='validation', type=str)
parser.add_argument('--dataset-split-test', default='test', type=str)
parser.add_argument('--lr', nargs='+', default=None, type=float)
parser.add_argument('--epoch', nargs='+', default=None, type=int)
parser.add_argument('--batch', nargs='+', default=None, type=int)
parser.add_argument('--batch-eval', type=int, default=None)
parser.add_argument('--down-sample-train', default=None, type=int)
parser.add_argument('--down-sample-validation', default=2000, type=int)
parser.add_argument('--random-seed', default=42, type=int)
parser.add_argument('--use-auth-token', action='store_true')
parser.add_argument('--eval-steps', default=100, type=int)
parser.add_argument('--output-dir', default=None, type=str)
parser.add_argument('--model-alias', default=None, type=str)
parser.add_argument('--model-organization', default=None, type=str)
parser.add_argument('--skip-train', action='store_true')
parser.add_argument('--skip-test', action='store_true')
parser.add_argument('--skip-upload', action='store_true')
opt = parser.parse_args()
train(
model_name=opt.model_name,
model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
dataset=opt.dataset,
dataset_name=opt.dataset_name,
dataset_column_label=opt.dataset_column_label,
dataset_column_text=opt.dataset_column_text,
dataset_split_train=opt.dataset_split_train,
dataset_split_validation=opt.dataset_split_validation,
dataset_split_test=opt.dataset_split_test,
lr=opt.lr,
epoch=opt.epoch,
batch=opt.batch,
batch_eval=opt.batch_eval,
down_sample_train=opt.down_sample_train,
down_sample_validation=opt.down_sample_validation,
random_seed=opt.random_seed,
use_auth_token=opt.use_auth_token,
output_dir=opt.output_dir,
model_alias=opt.model_alias,
model_organization=opt.model_organization,
skip_train=opt.skip_train,
skip_test=opt.skip_test,
skip_upload=opt.skip_upload
)