asahi417's picture
init
bc993ef
raw
history blame
13.4 kB
""" Fine-tune T5 on topic classification (multi-label multi-class classification)
```
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
```
"""
import json
import logging
import os
import argparse
import gc
from typing import List, Set
from shutil import copyfile
from statistics import mean
from itertools import product
import torch
import transformers
from datasets import load_dataset
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
from huggingface_hub import Repository
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
os.environ['WANDB_DISABLED'] = 'true' # disable wandb
def load_model(
model_name: str,
use_auth_token: bool = False,
low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel:
"""Load language model from huggingface model hub."""
# config & tokenizer
config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token)
if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class
model_class = transformers.T5ForConditionalGeneration.from_pretrained
elif config.model_type == 'mt5':
model_class = transformers.MT5ForConditionalGeneration.from_pretrained
elif config.model_type == 'bart':
model_class = transformers.BartForConditionalGeneration.from_pretrained
elif config.model_type == 'mbart':
model_class = transformers.MBartForConditionalGeneration.from_pretrained
else:
raise ValueError(f'unsupported model type: {config.model_type}')
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
model = model_class(model_name, **param)
return model
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]):
scores = []
for g, r in zip(references, predictions):
tp = len(set(g).intersection(set(r)))
fp = len([_g for _g in g if _g not in r])
fn = len([_r for _r in r if _r not in g])
if tp == 0:
f1 = 0
else:
f1 = 2 * tp / (2 * tp + fp + fn)
scores.append(f1)
return {'f1': mean(scores)}
def train(
model_name: str,
model_low_cpu_mem_usage: bool,
dataset: str,
dataset_name: str,
dataset_column_label: str,
dataset_column_text: str,
dataset_split_train: str,
dataset_split_validation: str,
dataset_split_test: str,
lr: List,
epoch: int,
batch: List,
down_sample_train: int,
down_sample_validation: int,
random_seed: int,
use_auth_token: bool,
output_dir: str,
model_alias: str,
model_organization: str,
skip_train: bool = False,
skip_test: bool = False,
skip_upload: bool = False,
eval_steps: float = 0.25,
eval_batch_size: int = 16):
"""Fine-tune seq2seq model."""
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
# set up the output directory
if output_dir is None:
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
# dataset process
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
dataset_split = {
'train': [dataset_split_train, down_sample_train],
'validation': [dataset_split_validation, down_sample_validation]
}
dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
tokenized_dataset = {}
for s, (s_dataset, down_sample) in dataset_split.items():
tokenized_dataset[s] = []
dataset_tmp = dataset_instance[s_dataset]
dataset_tmp.shuffle(random_seed)
for i in dataset_tmp:
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
tokenized_dataset[s].append(model_inputs)
if down_sample is not None and len(dataset_tmp) > down_sample:
tokenized_dataset[f'{s}_ds'] = []
dataset_tmp = dataset_tmp.select(list(range(down_sample)))
for i in dataset_tmp:
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
tokenized_dataset[f'{s}_ds'].append(model_inputs)
else:
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
def compute_metric(eval_pred): # for parameter search
def decode_tokens(token_ids) -> List[Set[str]]:
return [
set(tokenizer.decode(list(filter(lambda x: x != -100, r)), skip_special_tokens=True).split(',')) for r
in token_ids
]
predictions, reference_token_ids = eval_pred
# format reference
references_decode = decode_tokens(reference_token_ids)
# format prediction
logit, loss = predictions
generation_token_id = logit.argmax(-1)
generation_token_id[logit.min(-1) == -100] = -100
generation_decode = decode_tokens(generation_token_id)
return get_f1_score(references_decode, generation_decode)
if not skip_train:
lr = [1e-6, 1e-4] if lr is None else lr
batch = [64] if batch is None else batch
for n, (lr_tmp, batch_tmp) in enumerate(product(lr, batch)):
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
trainer = Seq2SeqTrainer(
args=Seq2SeqTrainingArguments(
num_train_epochs=epoch,
learning_rate=lr_tmp,
output_dir=f'{output_dir}/runs',
evaluation_strategy='steps',
eval_steps=eval_steps,
per_device_eval_batch_size=eval_batch_size,
seed=random_seed,
per_device_train_batch_size=batch_tmp,
),
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=load_model(
model_name=model_name,
use_auth_token=use_auth_token,
low_cpu_mem_usage=model_low_cpu_mem_usage)),
train_dataset=tokenized_dataset['train_ds'],
eval_dataset=tokenized_dataset['validation_ds'],
compute_metrics=compute_metric,
)
os.makedirs(f'{output_dir}/model_{n}', exist_ok=True)
best_run = trainer.train()
trainer.save_model(f'{output_dir}/model_{n}')
tokenizer.save_pretrained(f'{output_dir}/model_{n}')
# grid search
with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
json.dump(best_run.hyperparameters, f)
del trainer
gc.collect()
torch.cuda.empty_cache()
logging.info(f'model saved at {output_dir}/model_{n}')
else:
logging.info('skip hyperparameter search & model training (already done)')
# get metric on the test set
if not skip_test:
logging.info('run evaluation on test set')
if not os.path.exists(f'{output_dir}/model/prediction_test.txt'):
pipe = pipeline(
'text2text-generation',
model=f'{output_dir}/model',
device='cuda:0' if torch.cuda.is_available() else 'cpu',
)
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
output = pipe(input_data, batch_size=eval_batch_size)
output = [i['generated_text'] for i in output]
with open(f'{output_dir}/model/prediction_test.txt', 'w') as f:
f.write('\n'.join(output))
with open(f'{output_dir}/model/prediction_test.txt') as f:
output = [set(i.split(',')) for i in f.read().split('\n')]
dataset_tmp = dataset_instance[dataset_split_test]
label_list = dataset_tmp[dataset_column_label]
_references = [
set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
]
eval_metric = get_f1_score(_references, output)
logging.info(json.dumps(eval_metric, indent=4))
with open(f'{output_dir}/model/evaluation_metrics.json', 'w') as f:
json.dump(eval_metric, f)
if not skip_upload:
assert model_alias is not None and model_organization is not None,\
'model_organization must be specified when model_alias is specified'
logging.info('uploading to huggingface')
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
model = load_model(model_name=f'{output_dir}/model')
model.push_to_hub(model_alias, **args)
tokenizer.push_to_hub(model_alias, **args)
repo = Repository(model_alias, f'{model_organization}/{model_alias}')
copyfile(f'{output_dir}/model/hyperparameters.json', f'{model_alias}/hyperparameters.json')
if os.path.exists(f'{output_dir}/model/prediction_test.txt'):
copyfile(f'{output_dir}/model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
if os.path.exists(f'{output_dir}/model/evaluation_metrics.json'):
copyfile(f'{output_dir}/model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
sample = [i for i in sample if ''' not in i and ''' not in i][:3]
widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
with open(f'{model_alias}/README.md', 'w') as f:
f.write(f"""
---
widget:
{widget}
---
# {model_organization}/{model_alias}
This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
### Usage
```python
from transformers import pipeline
pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
output = pipe('{sample[0]}')
```
""")
repo.push_to_hub()
if __name__ == '__main__':
# arguments
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.')
parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str)
parser.add_argument('--low-cpu-mem-usage', action='store_true')
parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str)
parser.add_argument('--dataset-name', default='ja', type=str)
parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
parser.add_argument('--dataset-column-text', default='text', type=str)
parser.add_argument('--dataset-split-train', default='train', type=str)
parser.add_argument('--dataset-split-validation', default='validation', type=str)
parser.add_argument('--dataset-split-test', default='test', type=str)
parser.add_argument('--lr', nargs='+', default=None, type=float)
parser.add_argument('--epoch', default=5, type=int)
parser.add_argument('--batch', nargs='+', default=None, type=int)
parser.add_argument('--down-sample-train', default=None, type=int)
parser.add_argument('--down-sample-validation', default=2000, type=int)
parser.add_argument('--random-seed', default=42, type=int)
parser.add_argument('--use-auth-token', action='store_true')
parser.add_argument('--eval-steps', default=100, type=int)
parser.add_argument('--output-dir', default=None, type=str)
parser.add_argument('--model-alias', default=None, type=str)
parser.add_argument('--model-organization', default=None, type=str)
parser.add_argument('--skip-train', action='store_true')
parser.add_argument('--skip-test', action='store_true')
parser.add_argument('--skip-upload', action='store_true')
opt = parser.parse_args()
train(
model_name=opt.model_name,
model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
dataset=opt.dataset,
dataset_name=opt.dataset_name,
dataset_column_label=opt.dataset_column_label,
dataset_column_text=opt.dataset_column_text,
dataset_split_train=opt.dataset_split_train,
dataset_split_validation=opt.dataset_split_validation,
dataset_split_test=opt.dataset_split_test,
lr=opt.lr,
epoch=opt.epoch,
batch=opt.batch,
down_sample_train=opt.down_sample_train,
down_sample_validation=opt.down_sample_validation,
random_seed=opt.random_seed,
use_auth_token=opt.use_auth_token,
eval_steps=opt.eval_steps,
output_dir=opt.output_dir,
model_alias=opt.model_alias,
model_organization=opt.model_organization,
skip_train=opt.skip_train,
skip_test=opt.skip_test,
skip_upload=opt.skip_upload
)