|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from dataclasses import dataclass, field |
|
import json |
|
import math |
|
import jsonlines |
|
import pathlib |
|
from multiprocessing import Pool |
|
from typing import Dict, Optional, Sequence |
|
|
|
import numpy as np |
|
import torch |
|
from torch.utils.data import Dataset |
|
import transformers |
|
from transformers import Trainer |
|
from transformers.trainer_pt_utils import LabelSmoother |
|
|
|
from fastchat.conversation import SeparatorStyle |
|
from fastchat.model.model_adapter import get_conversation_template |
|
|
|
IGNORE_TOKEN_ID = LabelSmoother.ignore_index |
|
|
|
|
|
@dataclass |
|
class ModelArguments: |
|
model_name_or_path: Optional[str] = field(default="facebook/opt-125m") |
|
|
|
|
|
@dataclass |
|
class DataArguments: |
|
data_path: str = field( |
|
default=None, metadata={"help": "Path to the training data."} |
|
) |
|
lazy_preprocess: bool = False |
|
|
|
|
|
@dataclass |
|
class TrainingArguments(transformers.TrainingArguments): |
|
cache_dir: Optional[str] = field(default=None) |
|
optim: str = field(default="adamw_torch") |
|
model_max_length: int = field( |
|
default=512, |
|
metadata={ |
|
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." |
|
}, |
|
) |
|
|
|
|
|
local_rank = None |
|
|
|
|
|
def rank0_print(*args): |
|
if local_rank == 0: |
|
print(*args) |
|
|
|
|
|
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str): |
|
"""Collects the state dict and dump to disk.""" |
|
state_dict = trainer.model.state_dict() |
|
if trainer.args.should_save: |
|
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()} |
|
del state_dict |
|
trainer._save(output_dir, state_dict=cpu_state_dict) |
|
|
|
|
|
def apply_prompt_template(sources, systems=None): |
|
conv = get_conversation_template("vicuna") |
|
roles = {"human": conv.roles[0], "gpt": conv.roles[1]} |
|
conversations = [] |
|
for i, source in enumerate(sources): |
|
if roles[source[0]["from"]] != conv.roles[0]: |
|
source = source[1:] |
|
|
|
conv.messages = [] |
|
for j, sentence in enumerate(source): |
|
role = roles[sentence["from"]] |
|
assert role == conv.roles[j % 2], f"{i}" |
|
conv.append_message(role, sentence["value"]) |
|
if systems and systems[i]: |
|
conv.set_system_message(systems[i]) |
|
prompt = conv.get_prompt() |
|
conversations.append(prompt) |
|
return conversations, conv |
|
|
|
|
|
def tokenize_conversations(conversations, tokenizer): |
|
input_ids = tokenizer( |
|
conversations, |
|
return_tensors="pt", |
|
padding="max_length", |
|
max_length=tokenizer.model_max_length, |
|
truncation=True, |
|
).input_ids |
|
targets = input_ids.clone() |
|
return input_ids, targets |
|
|
|
|
|
def mask_targets(conversations, targets, tokenizer, conv): |
|
sep = conv.sep + conv.roles[1] + ": " |
|
for conversation, target in zip(conversations, targets): |
|
total_len = int(target.ne(tokenizer.pad_token_id).sum()) |
|
|
|
turns = conversation.split(conv.sep2) |
|
cur_len = 0 |
|
target[:cur_len] = IGNORE_TOKEN_ID |
|
for i, turn in enumerate(turns): |
|
if turn == "": |
|
break |
|
turn_len = len(tokenizer(turn + conv.sep2).input_ids) |
|
|
|
parts = turn.split(sep) |
|
if len(parts) != 2: |
|
break |
|
parts[0] += sep |
|
instruction_len = len(tokenizer(parts[0]).input_ids) - 1 |
|
|
|
target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID |
|
cur_len += turn_len |
|
|
|
target[cur_len:] = IGNORE_TOKEN_ID |
|
|
|
if False: |
|
z = target.clone() |
|
z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z) |
|
rank0_print(tokenizer.decode(z)) |
|
|
|
if cur_len < tokenizer.model_max_length: |
|
if cur_len != total_len: |
|
target[:] = IGNORE_TOKEN_ID |
|
rank0_print( |
|
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." |
|
f" (ignored)" |
|
) |
|
return targets |
|
|
|
|
|
def preprocess(sources, tokenizer: transformers.PreTrainedTokenizer, **kwargs) -> Dict: |
|
systems = None if not kwargs else kwargs.get("systems", None) |
|
|
|
|
|
if len(sources) <= 1000: |
|
conversations, conv = apply_prompt_template(sources, systems) |
|
input_ids, targets = tokenize_conversations(conversations, tokenizer) |
|
targets = mask_targets(conversations, targets, tokenizer, conv) |
|
else: |
|
with Pool() as p: |
|
conversations, conv = p.apply_async( |
|
apply_prompt_template, (sources, systems) |
|
).get() |
|
input_ids, targets = p.apply_async( |
|
tokenize_conversations, (conversations, tokenizer) |
|
).get() |
|
targets = p.apply_async( |
|
mask_targets, (conversations, targets, tokenizer, conv) |
|
).get() |
|
p.close() |
|
p.join() |
|
|
|
return dict( |
|
input_ids=input_ids, |
|
labels=targets, |
|
attention_mask=input_ids.ne(tokenizer.pad_token_id), |
|
) |
|
|
|
|
|
class SupervisedDataset(Dataset): |
|
"""Dataset for supervised fine-tuning.""" |
|
|
|
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer): |
|
super(SupervisedDataset, self).__init__() |
|
|
|
rank0_print("Formatting inputs...") |
|
systems = [example.get("system", "") for example in raw_data] |
|
sources = [example["conversations"] for example in raw_data] |
|
|
|
data_dict = preprocess(sources, tokenizer, systems=systems) |
|
|
|
self.input_ids = data_dict["input_ids"] |
|
self.labels = data_dict["labels"] |
|
self.attention_mask = data_dict["attention_mask"] |
|
|
|
def __len__(self): |
|
return len(self.input_ids) |
|
|
|
def __getitem__(self, i) -> Dict[str, torch.Tensor]: |
|
return dict( |
|
input_ids=self.input_ids[i], |
|
labels=self.labels[i], |
|
attention_mask=self.attention_mask[i], |
|
) |
|
|
|
|
|
class LazySupervisedDataset(Dataset): |
|
"""Dataset for supervised fine-tuning.""" |
|
|
|
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer): |
|
super(LazySupervisedDataset, self).__init__() |
|
self.tokenizer = tokenizer |
|
|
|
rank0_print("Formatting inputs...Skip in lazy mode") |
|
self.raw_data = raw_data |
|
self.cached_data_dict = {} |
|
|
|
def __len__(self): |
|
return len(self.raw_data) |
|
|
|
def __getitem__(self, i) -> Dict[str, torch.Tensor]: |
|
if i in self.cached_data_dict: |
|
return self.cached_data_dict[i] |
|
|
|
ret = preprocess( |
|
[self.raw_data[i]["conversations"]], |
|
self.tokenizer, |
|
systems=[self.raw_data[i].get("system", "")], |
|
) |
|
ret = dict( |
|
input_ids=ret["input_ids"][0], |
|
labels=ret["labels"][0], |
|
attention_mask=ret["attention_mask"][0], |
|
) |
|
self.cached_data_dict[i] = ret |
|
|
|
return ret |
|
|
|
|
|
def make_supervised_data_module( |
|
tokenizer: transformers.PreTrainedTokenizer, data_args, train_ratio=0.98 |
|
) -> Dict: |
|
"""Make dataset and collator for supervised fine-tuning.""" |
|
train_ratio = min(train_ratio, 1.0) |
|
dataset_cls = ( |
|
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset |
|
) |
|
rank0_print("Loading data...") |
|
data_path = data_args.data_path |
|
if data_path.endswith(".json"): |
|
raw_data = json.load(open(data_path, "r")) |
|
elif data_path.endswith(".jsonl"): |
|
with jsonlines.open(data_path, mode="r") as reader: |
|
raw_data = [item for item in reader] |
|
|
|
|
|
np.random.seed(0) |
|
perm = np.random.permutation(len(raw_data)) |
|
split = int(len(perm) * train_ratio) |
|
train_indices = perm[:split] |
|
if train_ratio < 1: |
|
eval_indices = perm[split:] |
|
else: |
|
|
|
eval_indices = perm[-int(len(perm) * 0.05) :] |
|
train_raw_data = [raw_data[i] for i in train_indices] |
|
eval_raw_data = [raw_data[i] for i in eval_indices] |
|
rank0_print(f"#train {len(train_raw_data)}, #eval {len(eval_raw_data)}") |
|
|
|
train_dataset = dataset_cls(train_raw_data, tokenizer=tokenizer) |
|
eval_dataset = dataset_cls(eval_raw_data, tokenizer=tokenizer) |
|
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset) |
|
|
|
|
|
def train(): |
|
global local_rank |
|
|
|
parser = transformers.HfArgumentParser( |
|
(ModelArguments, DataArguments, TrainingArguments) |
|
) |
|
model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
local_rank = training_args.local_rank |
|
config = transformers.AutoConfig.from_pretrained( |
|
model_args.model_name_or_path, |
|
trust_remote_code=True, |
|
cache_dir=training_args.cache_dir, |
|
) |
|
|
|
orig_ctx_len = getattr(config, "max_position_embeddings", None) |
|
if orig_ctx_len and training_args.model_max_length > orig_ctx_len: |
|
scaling_factor = float(math.ceil(training_args.model_max_length / orig_ctx_len)) |
|
config.rope_scaling = {"type": "linear", "factor": scaling_factor} |
|
config.use_cache = False |
|
model = transformers.AutoModelForCausalLM.from_pretrained( |
|
model_args.model_name_or_path, |
|
config=config, |
|
trust_remote_code=True, |
|
cache_dir=training_args.cache_dir, |
|
) |
|
|
|
model.tie_weights() |
|
|
|
tokenizer = transformers.AutoTokenizer.from_pretrained( |
|
model_args.model_name_or_path, |
|
config=config, |
|
trust_remote_code=True, |
|
cache_dir=training_args.cache_dir, |
|
model_max_length=training_args.model_max_length, |
|
padding_side="right", |
|
use_fast=False, |
|
) |
|
|
|
tokenizer.pad_token = tokenizer.unk_token |
|
print(f"tokens len: {len(tokenizer)}") |
|
model.resize_token_embeddings(len(tokenizer)) |
|
|
|
data_module = make_supervised_data_module( |
|
tokenizer=tokenizer, train_ratio=0.98, data_args=data_args |
|
) |
|
trainer = Trainer( |
|
model=model, tokenizer=tokenizer, args=training_args, **data_module |
|
) |
|
|
|
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")): |
|
trainer.train(resume_from_checkpoint=True) |
|
else: |
|
trainer.train() |
|
trainer.save_state() |
|
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir) |
|
|
|
|
|
if __name__ == "__main__": |
|
train() |
|
|