KoichiYasuoka's picture
initial release
2f68ae9
raw
history blame contribute delete
No virus
3.38 kB
#! /usr/bin/python3
src="hplt_bert_base_be"
tgt="KoichiYasuoka/ltgbert-base-belarusian-upos"
url="https://github.com/UniversalDependencies/UD_Belarusian-HSE"
import os
from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
os.system(f"test -d {src} || ( curl -L https://data.hplt-project.org/one/models/encoder/{src}.tar.gz | tar xvzf - )")
d=os.path.basename(url)
os.system(f"test -d {d} || git clone --depth=1 {url}")
os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
class UPOSFileDataset(object):
def __init__(self,conllu,tokenizer):
self.conllu=open(conllu,"r",encoding="utf-8")
self.tokenizer=tokenizer
self.seeks=[0]
label=set(["SYM"])
s=self.conllu.readline()
while s!="":
if s=="\n":
self.seeks.append(self.conllu.tell())
else:
w=s.split("\t")
if len(w)==10:
if w[0].isdecimal():
label.add(w[3] if w[5]=="_" else w[3]+"|"+w[5])
s=self.conllu.readline()
lid={}
for i,l in enumerate(sorted(label)):
lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2
self.label2id=lid
def __call__(*args):
lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
for t in args:
t.label2id=lid
return lid
def __del__(self):
self.conllu.close()
__len__=lambda self:len(self.seeks)-1
def __getitem__(self,i):
self.conllu.seek(self.seeks[i])
form,upos,space=[],[],[True]
while self.conllu.tell()<self.seeks[i+1]:
w=self.conllu.readline().split("\t")
if len(w)==10 and w[0].isdecimal():
form.append(w[1])
upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
space.append(w[9].find("SpaceAfter=No")<0)
v=self.tokenizer(form,add_special_tokens=False)
i,u=[],[]
for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
if x!=[]:
if space[j]==False:
k=self.tokenizer.convert_ids_to_tokens(x[0])
if k.startswith("âĸģ"):
x[0]=self.tokenizer.convert_tokens_to_ids(k[3:])
i+=x
u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1)
if len(i)<self.tokenizer.model_max_length-3:
ids=[self.tokenizer.cls_token_id]+i+[self.tokenizer.sep_token_id]
upos=["SYM"]+u+["SYM"]
else:
ids=i[0:self.tokenizer.model_max_length-2]
upos=u[0:self.tokenizer.model_max_length-2]
return {"input_ids":ids,"labels":[self.label2id[t] for t in upos]}
tkz=AutoTokenizer.from_pretrained(src,model_max_length=512)
trainDS=UPOSFileDataset("train.conllu",tkz)
devDS=UPOSFileDataset("dev.conllu",tkz)
testDS=UPOSFileDataset("test.conllu",tkz)
lid=trainDS(devDS,testDS)
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True,trust_remote_code=True)
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=16,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True,trust_remote_code=True),train_dataset=trainDS)
trn.train()
trn.save_model(tgt)
tkz.save_pretrained(tgt)