|
""" Fine-tune T5 on topic classification (multi-label multi-class classification) |
|
``` |
|
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage |
|
``` |
|
""" |
|
import json |
|
import logging |
|
import os |
|
import argparse |
|
import gc |
|
from glob import glob |
|
from typing import List, Set |
|
from shutil import copyfile |
|
from statistics import mean |
|
from distutils.dir_util import copy_tree |
|
|
|
import torch |
|
import transformers |
|
from datasets import load_dataset |
|
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline |
|
from huggingface_hub import Repository |
|
|
|
|
|
os.environ['TOKENIZERS_PARALLELISM'] = 'false' |
|
os.environ['WANDB_DISABLED'] = 'true' |
|
_LR = [1e-6, 1e-5, 1e-4] |
|
_BATCH = 32 |
|
_EPOCH = 5 |
|
_CLASS_MAP = { |
|
'Arts & Culture': ['Τέχνες & Πολιτισμός', 'Arte y cultura', 'アート&カルチャー'], |
|
'Business & Entrepreneurs': ['Επιχειρήσεις & Επιχειρηματίες', 'Negocios y emprendedores', 'ビジネス'], |
|
'Celebrity & Pop Culture': ['Διασημότητες & Ποπ κουλτούρα', 'Celebridades y cultura pop', '芸能'], |
|
'Diaries & Daily Life': ['Ημερολόγια & Καθημερινή ζωή', 'Diarios y vida diaria', '日常'], |
|
'Family': ['Οικογένεια', 'Familia', '家族'], |
|
'Fashion & Style': ['Μόδα & Στυλ', 'Moda y estilo', 'ファッション'], |
|
'Film, TV & Video': ['Ταινίες, τηλεόραση & βίντεο', 'Cine, televisión y video', '映画&ラジオ'], |
|
'Fitness & Health': ['Γυμναστική & Υεία', 'Estado físico y salud', 'フィットネス&健康'], |
|
'Food & Dining': ['Φαγητό & Δείπνο', 'Comida y comedor', '料理'], |
|
'Learning & Educational': ['Μάθηση & Εκπαίδευση', 'Aprendizaje y educación', '教育関連'], |
|
'News & Social Concern': ['Ειδήσεις & Κοινωνία', 'Noticias e interés social', '社会'], |
|
'Relationships': ['Σχέσεις', 'Relaciones', '人間関係'], |
|
'Science & Technology': ['Επιστήμη & Τεχνολογία', 'Ciencia y Tecnología', 'サイエンス'], |
|
'Youth & Student Life': ['Νεανική & Φοιτητική ζωή', 'Juventud y Vida Estudiantil', '学校'], |
|
'Music': ['Μουσική', 'Música', '音楽'], |
|
'Gaming': ['Παιχνίδια', 'Juegos', 'ゲーム'], |
|
'Sports': ['Αθλητισμός', 'Deportes', 'スポーツ'], |
|
'Travel & Adventure': ['Ταξίδια & Περιπέτεια', 'Viajes y aventuras', '旅行'], |
|
'Other Hobbies': ['Άλλα χόμπι', 'Otros pasatiempos', 'その他'] |
|
} |
|
|
|
|
|
def load_model( |
|
model_name: str, |
|
use_auth_token: bool = False, |
|
low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel: |
|
"""Load language model from huggingface model hub.""" |
|
|
|
config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token) |
|
if config.model_type == 't5': |
|
model_class = transformers.T5ForConditionalGeneration.from_pretrained |
|
elif config.model_type == 'mt5': |
|
model_class = transformers.MT5ForConditionalGeneration.from_pretrained |
|
elif config.model_type == 'bart': |
|
model_class = transformers.BartForConditionalGeneration.from_pretrained |
|
elif config.model_type == 'mbart': |
|
model_class = transformers.MBartForConditionalGeneration.from_pretrained |
|
else: |
|
raise ValueError(f'unsupported model type: {config.model_type}') |
|
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage} |
|
return model_class(model_name, **param) |
|
|
|
|
|
def train( |
|
model_name: str, |
|
model_low_cpu_mem_usage: bool, |
|
dataset: str, |
|
dataset_name: str, |
|
dataset_column_label: str, |
|
dataset_column_text: str, |
|
random_seed: int, |
|
use_auth_token: bool): |
|
"""Fine-tune seq2seq model.""" |
|
logging.info(f'[TRAIN]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') |
|
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' |
|
|
|
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) |
|
dataset_instance = load_dataset(dataset, dataset_name, split="train", use_auth_token=use_auth_token) |
|
tokenized_dataset = [] |
|
for d in dataset_instance: |
|
model_inputs = tokenizer(d[dataset_column_text], truncation=True) |
|
model_inputs['labels'] = tokenizer(text_target=d[dataset_column_label], truncation=True)['input_ids'] |
|
tokenized_dataset.append(model_inputs) |
|
|
|
for n, lr_tmp in enumerate(_LR): |
|
logging.info(f"[TRAIN {n}/{len(_LR)}] lr: {lr_tmp}") |
|
output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}" |
|
if os.path.exists(f"{output_dir_tmp}/pytorch_model.bin"): |
|
continue |
|
model = load_model( |
|
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage |
|
) |
|
trainer = Seq2SeqTrainer( |
|
model=model, |
|
args=Seq2SeqTrainingArguments( |
|
num_train_epochs=_EPOCH, |
|
learning_rate=lr_tmp, |
|
output_dir=output_dir_tmp, |
|
save_strategy="epoch", |
|
evaluation_strategy="no", |
|
seed=random_seed, |
|
per_device_train_batch_size=_BATCH, |
|
), |
|
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model), |
|
train_dataset=tokenized_dataset.copy(), |
|
) |
|
|
|
trainer.train() |
|
del trainer |
|
del model |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
|
|
for model_path in glob(f"{output_dir}/*/*"): |
|
tokenizer.save_pretrained(model_path) |
|
|
|
|
|
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> float: |
|
scores = [] |
|
for g, r in zip(references, predictions): |
|
tp = len(set(g).intersection(set(r))) |
|
fp = len([_g for _g in g if _g not in r]) |
|
fn = len([_r for _r in r if _r not in g]) |
|
f1 = 0 if tp == 0 else 2 * tp / (2 * tp + fp + fn) |
|
scores.append(f1) |
|
return mean(scores) |
|
|
|
|
|
def unify_label(label: Set[str]): |
|
new_label = [] |
|
for label_tmp in label: |
|
label_en = [k for k, v in _CLASS_MAP.items() if label_tmp in v] |
|
if label_en: |
|
new_label.append(label_en[0]) |
|
return set(new_label) |
|
|
|
|
|
def get_metric( |
|
prediction_file: str, |
|
metric_file: str, |
|
model_path: str, |
|
data: List[str], |
|
label: List[str]) -> float: |
|
if os.path.exists(metric_file): |
|
with open(metric_file) as f: |
|
eval_metric = json.load(f) |
|
return eval_metric['f1'] |
|
if not os.path.exists(prediction_file): |
|
pipe = pipeline( |
|
'text2text-generation', |
|
model=model_path, |
|
device='cuda:0' if torch.cuda.is_available() else 'cpu', |
|
) |
|
output = pipe(data, batch_size=_BATCH) |
|
output = [i['generated_text'] for i in output] |
|
with open(prediction_file, 'w') as f: |
|
f.write('\n'.join(output)) |
|
with open(prediction_file) as f: |
|
output = [unify_label(set(i.split(','))) for i in f.read().split('\n')] |
|
label = [unify_label(set(i.split(','))) for i in label] |
|
eval_metric = {'f1': get_f1_score(label, output)} |
|
logging.info(json.dumps(eval_metric, indent=4)) |
|
with open(metric_file, 'w') as f: |
|
json.dump(eval_metric, f) |
|
return eval_metric['f1'] |
|
|
|
|
|
def validate( |
|
model_name: str, |
|
dataset: str, |
|
dataset_name: str, |
|
dataset_column_text: str, |
|
use_auth_token: bool, |
|
dataset_column_label: str): |
|
logging.info(f'[VALIDATE]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') |
|
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' |
|
dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token) |
|
label = [i[dataset_column_label] for i in dataset_instance] |
|
data = [i[dataset_column_text] for i in dataset_instance] |
|
model_score = [] |
|
for model_path in glob(f"{output_dir}/*/*/pytorch_model.bin"): |
|
model_path = os.path.dirname(model_path) |
|
prediction_file = f"{model_path}/prediction.validate.{os.path.basename(dataset)}.{dataset_name}.txt" |
|
metric_file = f"{model_path}/metric.validate.{os.path.basename(dataset)}.{dataset_name}.json" |
|
metric = get_metric( |
|
prediction_file=prediction_file, |
|
metric_file=metric_file, |
|
model_path=model_path, |
|
label=label, |
|
data=data |
|
) |
|
model_score.append([model_path, metric]) |
|
model_score = sorted(model_score, key=lambda x: x[1]) |
|
logging.info('Validation Result') |
|
for k, v in model_score: |
|
logging.info(f'{k}: {v}') |
|
best_model = model_score[-1][0] |
|
best_model_path = f'{output_dir}/best_model' |
|
copy_tree(best_model, best_model_path) |
|
|
|
|
|
def test( |
|
model_name: str, |
|
dataset: str, |
|
dataset_name: str, |
|
dataset_column_text: str, |
|
use_auth_token: bool, |
|
dataset_column_label: str): |
|
logging.info(f'[TEST]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})') |
|
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' |
|
dataset_instance = load_dataset(dataset, dataset_name, split='test', use_auth_token=use_auth_token) |
|
label = [i[dataset_column_label] for i in dataset_instance] |
|
data = [i[dataset_column_text] for i in dataset_instance] |
|
model_path = f'{output_dir}/best_model' |
|
if not os.path.exists(model_path): |
|
model_path = os.path.basename(model_name) |
|
|
|
prediction_file = f"{model_path}/prediction.{os.path.basename(dataset)}.{dataset_name}.txt" |
|
metric_file = f"{model_path}/metric.{os.path.basename(dataset)}.{dataset_name}.json" |
|
metric = get_metric( |
|
prediction_file=prediction_file, |
|
metric_file=metric_file, |
|
model_path=model_path, |
|
label=label, |
|
data=data |
|
) |
|
logging.info(f'Test Result: {metric}') |
|
|
|
|
|
def upload( |
|
model_name: str, |
|
dataset: str, |
|
dataset_name: str, |
|
dataset_column_text: str, |
|
use_auth_token: bool, |
|
model_alias: str, |
|
model_organization: str): |
|
assert model_alias is not None and model_organization is not None,\ |
|
'model_organization must be specified when model_alias is specified' |
|
logging.info('uploading to huggingface') |
|
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}' |
|
args = {'use_auth_token': use_auth_token, 'organization': model_organization} |
|
model_path = f'{output_dir}/best_model' |
|
if not os.path.exists(model_path): |
|
model_path = os.path.basename(model_name) |
|
model = load_model(model_name=model_path) |
|
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token) |
|
model.push_to_hub(model_alias, **args) |
|
tokenizer.push_to_hub(model_alias, **args) |
|
repo = Repository(model_alias, f'{model_organization}/{model_alias}') |
|
for i in glob(f'{model_path}/*'): |
|
if not os.path.exists(f'{model_alias}/{os.path.basename(i)}'): |
|
copyfile(i, f'{model_alias}/{os.path.basename(i)}') |
|
dataset_instance = load_dataset(dataset, dataset_name, split='validation', use_auth_token=use_auth_token) |
|
sample = [i[dataset_column_text] for i in dataset_instance] |
|
sample = [i for i in sample if "'" not in i and '"' not in i][:3] |
|
widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)]) |
|
with open(f'{model_alias}/README.md', 'w') as f: |
|
f.write(f""" |
|
--- |
|
widget: |
|
{widget} |
|
--- |
|
|
|
# {model_organization}/{model_alias} |
|
|
|
This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}). |
|
|
|
### Usage |
|
|
|
```python |
|
from transformers import pipeline |
|
|
|
pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}') |
|
output = pipe('{sample[0]}') |
|
``` |
|
""") |
|
repo.push_to_hub() |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') |
|
parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.') |
|
parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str) |
|
parser.add_argument('--low-cpu-mem-usage', action='store_true') |
|
parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str) |
|
parser.add_argument('--dataset-name', default='ja', type=str) |
|
parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str) |
|
parser.add_argument('--dataset-column-text', default='text', type=str) |
|
parser.add_argument('--random-seed', default=42, type=int) |
|
parser.add_argument('--use-auth-token', action='store_true') |
|
parser.add_argument('--model-alias', default=None, type=str) |
|
parser.add_argument('--model-organization', default=None, type=str) |
|
parser.add_argument('--skip-train', action='store_true') |
|
parser.add_argument('--skip-validate', action='store_true') |
|
parser.add_argument('--skip-test', action='store_true') |
|
parser.add_argument('--skip-upload', action='store_true') |
|
opt = parser.parse_args() |
|
|
|
if not opt.skip_train: |
|
train( |
|
model_name=opt.model_name, |
|
model_low_cpu_mem_usage=opt.low_cpu_mem_usage, |
|
dataset=opt.dataset, |
|
dataset_name=opt.dataset_name, |
|
dataset_column_label=opt.dataset_column_label, |
|
dataset_column_text=opt.dataset_column_text, |
|
random_seed=opt.random_seed, |
|
use_auth_token=opt.use_auth_token, |
|
) |
|
if not opt.skip_validate: |
|
validate( |
|
model_name=opt.model_name, |
|
dataset=opt.dataset, |
|
dataset_name=opt.dataset_name, |
|
dataset_column_label=opt.dataset_column_label, |
|
dataset_column_text=opt.dataset_column_text, |
|
use_auth_token=opt.use_auth_token |
|
) |
|
if not opt.skip_test: |
|
test( |
|
model_name=opt.model_name, |
|
dataset=opt.dataset, |
|
dataset_name=opt.dataset_name, |
|
dataset_column_label=opt.dataset_column_label, |
|
dataset_column_text=opt.dataset_column_text, |
|
use_auth_token=opt.use_auth_token |
|
) |
|
if not opt.skip_upload: |
|
upload( |
|
model_name=opt.model_name, |
|
dataset=opt.dataset, |
|
dataset_name=opt.dataset_name, |
|
dataset_column_text=opt.dataset_column_text, |
|
use_auth_token=opt.use_auth_token, |
|
model_alias=opt.model_alias, |
|
model_organization=opt.model_organization |
|
) |
|
|