|
|
|
src="lightblue/karasu-1.1B" |
|
tgt="KoichiYasuoka/karasu-1.1B-upos" |
|
|
|
import os |
|
from transformers import AutoTokenizer,AutoConfig,LlamaForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer |
|
from tokenizers.normalizers import Replace |
|
os.system("test -f ja_gsd_modern.conllu || curl -LO https://github.com/KoichiYasuoka/SuPar-UniDic/raw/main/suparunidic/suparmodels/ja_gsd_modern.conllu") |
|
|
|
class UPOSFileDataset(object): |
|
def __init__(self,conllu,tokenizer): |
|
self.conllu=open(conllu,"r",encoding="utf-8") |
|
self.tokenizer=tokenizer |
|
self.seeks=[0] |
|
label=set(["SYM"]) |
|
s=self.conllu.readline() |
|
while s!="": |
|
if s=="\n": |
|
self.seeks.append(self.conllu.tell()) |
|
else: |
|
w=s.split("\t") |
|
if len(w)==10: |
|
if w[0].isdecimal(): |
|
label.add(w[3] if w[5]=="_" else w[3]+"|"+w[5]) |
|
s=self.conllu.readline() |
|
lid={} |
|
for i,l in enumerate(sorted(label)): |
|
lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2 |
|
self.label2id=lid |
|
def __call__(*args): |
|
lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))} |
|
for t in args: |
|
t.label2id=lid |
|
return lid |
|
def __del__(self): |
|
self.conllu.close() |
|
__len__=lambda self:len(self.seeks)-1 |
|
def __getitem__(self,i): |
|
self.conllu.seek(self.seeks[i]) |
|
form,upos,sp=[],[],False |
|
while self.conllu.tell()<self.seeks[i+1]: |
|
w=self.conllu.readline().split("\t") |
|
if len(w)==10: |
|
form.append(" "+w[1] if sp else w[1]) |
|
if w[0].isdecimal(): |
|
upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5]) |
|
sp=w[9].find("SpaceAfter=No")<0 |
|
v=self.tokenizer(form,add_special_tokens=False) |
|
i,u=[self.tokenizer.cls_token_id],["SYM"] |
|
for j,(x,y) in enumerate(zip(v["input_ids"],upos)): |
|
if x!=[]: |
|
i+=x |
|
u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1) |
|
if len(i)<self.tokenizer.model_max_length-3: |
|
ids=i |
|
upos=u |
|
else: |
|
ids=i[0:self.tokenizer.model_max_length-2] |
|
upos=u[0:self.tokenizer.model_max_length-2] |
|
return {"input_ids":ids,"labels":[self.label2id[t] for t in upos]} |
|
|
|
tkz=AutoTokenizer.from_pretrained(src,cls_token="<s>",sep_token="<s>",pad_token="</s>",model_max_length=2048) |
|
tkz.backend_tokenizer.normalizer=Replace(" ","\u2581") |
|
trainDS=UPOSFileDataset("ja_gsd_modern.conllu",tkz) |
|
lid=trainDS.label2id |
|
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True) |
|
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False) |
|
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=LlamaForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS) |
|
trn.train() |
|
trn.save_model(tgt) |
|
tkz.save_pretrained(tgt) |
|
|