""" Fine-tune T5 on topic classification (multi-label multi-class classification) | |
``` | |
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage | |
``` | |
""" | |
import json | |
import logging | |
import os | |
import argparse | |
import gc | |
from glob import glob | |
from typing import List, Set, Dict | |
from shutil import copyfile | |
from statistics import mean | |
from itertools import product | |
from distutils.dir_util import copy_tree | |
import torch | |
import transformers | |
from datasets import load_dataset | |
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline | |
from huggingface_hub import Repository | |
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message | |
os.environ['WANDB_DISABLED'] = 'true' # disable wandb | |
_LR = [1e-6, 1e-5, 1e-4] | |
_BATCH = 32 | |
_EPOCH = 5 | |
def load_model( | |
model_name: str, | |
use_auth_token: bool = False, | |
low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel: | |
"""Load language model from huggingface model hub.""" | |
# config & tokenizer | |
config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token) | |
if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class | |
model_class = transformers.T5ForConditionalGeneration.from_pretrained | |
elif config.model_type == 'mt5': | |
model_class = transformers.MT5ForConditionalGeneration.from_pretrained | |
elif config.model_type == 'bart': | |
model_class = transformers.BartForConditionalGeneration.from_pretrained | |
elif config.model_type == 'mbart': | |
model_class = transformers.MBartForConditionalGeneration.from_pretrained | |
else: | |
raise ValueError(f'unsupported model type: {config.model_type}') | |
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage} | |
return model_class(model_name, **param) | |
def train( | |
model_name: str, | |
model_low_cpu_mem_usage: bool, | |
dataset: str, | |
dataset_name: str, | |
dataset_column_label: str, | |
dataset_column_text: str, | |
random_seed: int, | |
use_auth_token: bool): | |
"""Fine-tune seq2seq model.""" | |
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') | |
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' | |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) | |
dataset_instance = load_dataset(dataset, dataset_name, split="train", use_auth_token=use_auth_token) | |
tokenized_dataset = [] | |
for d in dataset_instance: | |
model_inputs = tokenizer(d[dataset_column_text], truncation=True) | |
model_inputs['labels'] = tokenizer(text_target=d[dataset_column_label], truncation=True)['input_ids'] | |
tokenized_dataset.append(model_inputs) | |
for n, lr_tmp in enumerate(_LR): | |
logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}") | |
output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}" | |
if os.path.exists(f"{output_dir_tmp}/eval_results.json"): | |
continue | |
model = load_model( | |
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage | |
) | |
trainer = Seq2SeqTrainer( | |
model=model, | |
args=Seq2SeqTrainingArguments( | |
num_train_epochs=_EPOCH, | |
learning_rate=lr_tmp, | |
output_dir=output_dir_tmp, | |
save_strategy="epoch", | |
evaluation_strategy="no", | |
seed=random_seed, | |
per_device_train_batch_size=_BATCH, | |
), | |
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model), | |
train_dataset=tokenized_dataset, | |
) | |
# train | |
result = trainer.train() | |
trainer.log_metrics("train", result.metrics) | |
trainer.save_metrics("train", result.metrics) | |
# clean up memory | |
trainer.save_model() | |
trainer.save_state() | |
del trainer | |
del model | |
gc.collect() | |
torch.cuda.empty_cache() | |
# | |
# | |
# def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float: | |
# scores = [] | |
# for g, r in zip(references, predictions): | |
# tp = len(set(g).intersection(set(r))) | |
# fp = len([_g for _g in g if _g not in r]) | |
# fn = len([_r for _r in r if _r not in g]) | |
# if tp == 0: | |
# f1 = 0 | |
# else: | |
# f1 = 2 * tp / (2 * tp + fp + fn) | |
# scores.append(f1) | |
# return mean(scores) | |
# | |
# | |
# | |
# | |
# def evaluate( | |
# model_path: str, | |
# batch_eval: int, | |
# dataset_column_text: str, | |
# dataset_instance, | |
# dataset_split_test: str, | |
# dataset_column_label: str, | |
# ): | |
# prediction_file = f'{model_path}/prediction.{dataset_name}.{}.txt' | |
# input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]] | |
# if not os.path.exists(): | |
# pipe = pipeline( | |
# 'text2text-generation', | |
# model=model_path, | |
# device='cuda:0' if torch.cuda.is_available() else 'cpu', | |
# ) | |
# output = pipe(input_data, batch_size=batch_eval) | |
# output = [i['generated_text'] for i in output] | |
# with open(f'{model_path}/prediction_test.txt', 'w') as f: | |
# f.write('\n'.join(output)) | |
# with open(f'{model_path}/prediction_test.txt') as f: | |
# output = [set(i.split(',')) for i in f.read().split('\n')] | |
# dataset_tmp = dataset_instance[dataset_split_test] | |
# label_list = dataset_tmp[dataset_column_label] | |
# _references = [ | |
# set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp | |
# ] | |
# eval_metric = get_f1_score(_references, output) | |
# eval_metric[f'f1/{dataset}/{dataset_name}'] = eval_metric.pop('f1') | |
# logging.info(json.dumps(eval_metric, indent=4)) | |
# with open(f'{model_path}/evaluation_metrics.json', 'w') as f: | |
# json.dump(eval_metric, f) | |
# | |
# | |
# def train( | |
# model_name: str, | |
# model_low_cpu_mem_usage: bool, | |
# dataset: str, | |
# dataset_name: str, | |
# dataset_column_label: str, | |
# dataset_column_text: str, | |
# dataset_split_train: str, | |
# dataset_split_validation: str, | |
# dataset_split_test: str, | |
# lr: List, | |
# epoch: List, | |
# batch: List, | |
# down_sample_train: int, | |
# down_sample_validation: int, | |
# random_seed: int, | |
# use_auth_token: bool, | |
# output_dir: str, | |
# model_alias: str, | |
# model_organization: str, | |
# skip_train: bool = False, | |
# skip_test: bool = False, | |
# skip_upload: bool = False, | |
# batch_eval: int = None): | |
# """Fine-tune seq2seq model.""" | |
# logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') | |
# if not output_dir: | |
# output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' | |
# # dataset process | |
# tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) | |
# dataset_split = { | |
# 'train': dataset_split_train, | |
# 'validation': dataset_split_validation | |
# } | |
# dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token) | |
# tokenized_dataset = {} | |
# for s, s_dataset in zip(['train', 'validation'], [dataset_split_train, dataset_split_validation): | |
# tokenized_dataset[s] = [] | |
# for i in dataset_instance[s_dataset]: | |
# model_inputs = tokenizer(i[dataset_column_text], truncation=True) | |
# model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids'] | |
# tokenized_dataset[s].append(model_inputs) | |
# | |
# if not skip_train: | |
# lr = [1e-6, 1e-5, 1e-4] if lr is None else lr | |
# batch = [32] if not batch else batch | |
# epoch = [3, 5] if not epoch else epoch | |
# batch_eval = min(batch) if not batch_eval else batch_eval | |
# for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)): | |
# logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}") | |
# output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}" | |
# if os.path.exists(f"{output_dir_tmp}/eval_results.json"): | |
# continue | |
# model = load_model( | |
# model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage | |
# ) | |
# trainer = Seq2SeqTrainer( | |
# model=model, | |
# args=Seq2SeqTrainingArguments( | |
# num_train_epochs=epoch_tmp, | |
# learning_rate=lr_tmp, | |
# output_dir=output_dir_tmp, | |
# evaluation_strategy="no", | |
# seed=random_seed, | |
# per_device_train_batch_size=batch_tmp, | |
# ), | |
# data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model), | |
# train_dataset=tokenized_dataset['train_ds'], | |
# ) | |
# # train | |
# result = trainer.train() | |
# trainer.log_metrics("train", result.metrics) | |
# trainer.save_metrics("train", result.metrics) | |
# # clean up memory | |
# trainer.save_model() | |
# trainer.save_state() | |
# del trainer | |
# del model | |
# gc.collect() | |
# torch.cuda.empty_cache() | |
# | |
# model_score = [] | |
# for eval_file in glob(f"{output_dir}/model_*/eval_results.json"): | |
# with open(eval_file) as f: | |
# results = json.load(f) | |
# model_score.append([os.path.dirname(eval_file), results['eval_loss'], results['eval_f1']]) | |
# logging.info("Search Result") | |
# for i in model_score: | |
# logging.info(i) | |
# max_metric = max(model_score, key=lambda x: x[2]) | |
# if len([i for i in model_score if i[2] == max_metric]) > 1: | |
# best_model = sorted(model_score, key=lambda x: x[1])[0][0] | |
# else: | |
# best_model = sorted(model_score, key=lambda x: x[2])[-1][0] | |
# copy_tree(best_model, f'{output_dir}/best_model') | |
# tokenizer.save_pretrained(f'{output_dir}/best_model') | |
# else: | |
# logging.info('skip hyperparameter search & model training (already done)') | |
# | |
# # get metric on the test set | |
# if not skip_test: | |
# logging.info('run evaluation on test set') | |
# if not skip_upload: | |
# assert model_alias is not None and model_organization is not None,\ | |
# 'model_organization must be specified when model_alias is specified' | |
# logging.info('uploading to huggingface') | |
# args = {'use_auth_token': use_auth_token, 'organization': model_organization} | |
# model = load_model(model_name=f'{output_dir}/best_model') | |
# model.push_to_hub(model_alias, **args) | |
# tokenizer.push_to_hub(model_alias, **args) | |
# repo = Repository(model_alias, f'{model_organization}/{model_alias}') | |
# if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'): | |
# copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt') | |
# if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'): | |
# copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json') | |
# sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]] | |
# sample = [i for i in sample if ''' not in i and ''' not in i][:3] | |
# widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)]) | |
# with open(f'{model_alias}/README.md', 'w') as f: | |
# f.write(f""" | |
# --- | |
# widget: | |
# {widget} | |
# --- | |
# | |
# # {model_organization}/{model_alias} | |
# | |
# This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}). | |
# | |
# ### Usage | |
# | |
# ```python | |
# from transformers import pipeline | |
# | |
# pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}') | |
# output = pipe('{sample[0]}') | |
# ``` | |
# """) | |
# repo.push_to_hub() | |
# | |
if __name__ == '__main__': | |
# arguments | |
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') | |
parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.') | |
parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str) | |
parser.add_argument('--low-cpu-mem-usage', action='store_true') | |
parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str) | |
parser.add_argument('--dataset-name', default='ja', type=str) | |
parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str) | |
parser.add_argument('--dataset-column-text', default='text', type=str) | |
parser.add_argument('--dataset-split-train', default='train', type=str) | |
parser.add_argument('--dataset-split-validation', default='validation', type=str) | |
parser.add_argument('--dataset-split-test', default='test', type=str) | |
parser.add_argument('--lr', nargs='+', default=None, type=float) | |
parser.add_argument('--epoch', nargs='+', default=None, type=int) | |
parser.add_argument('--batch', nargs='+', default=None, type=int) | |
parser.add_argument('--batch-eval', type=int, default=None) | |
parser.add_argument('--down-sample-train', default=None, type=int) | |
parser.add_argument('--down-sample-validation', default=200, type=int) | |
parser.add_argument('--random-seed', default=42, type=int) | |
parser.add_argument('--use-auth-token', action='store_true') | |
parser.add_argument('--eval-steps', default=100, type=int) | |
parser.add_argument('--output-dir', default=None, type=str) | |
parser.add_argument('--model-alias', default=None, type=str) | |
parser.add_argument('--model-organization', default=None, type=str) | |
parser.add_argument('--skip-train', action='store_true') | |
parser.add_argument('--skip-test', action='store_true') | |
parser.add_argument('--skip-upload', action='store_true') | |
opt = parser.parse_args() | |
train( | |
model_name=opt.model_name, | |
model_low_cpu_mem_usage=opt.low_cpu_mem_usage, | |
dataset=opt.dataset, | |
dataset_name=opt.dataset_name, | |
dataset_column_label=opt.dataset_column_label, | |
dataset_column_text=opt.dataset_column_text, | |
random_seed=opt.random_seed, | |
use_auth_token=opt.use_auth_token, | |
) |