KennethEnevoldsen's picture
Added creation scripts
32e9f98
import spacy
from numpy import char
from spacy.tokens import Doc, DocBin
train_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/train.spacy"
dev_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/dev.spacy"
test_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/test.spacy"
nlp = spacy.blank("da")
# train, dev, test = dane()
train_docs = list(DocBin().from_disk(train_dane).get_docs(nlp.vocab))
dev_docs = list(DocBin().from_disk(dev_dane).get_docs(nlp.vocab))
test_docs = list(DocBin().from_disk(test_dane).get_docs(nlp.vocab))
Doc.set_extension("split", default=None)
for split, nam in zip([train_docs, dev_docs, test_docs], ["train", "dev", "test"]):
for doc in split:
doc._.split = nam
# text2doc = {}
# n_duplicates = 0 # all looks like non-actual duplicates (e.g. "stk. 2")
# for i, doc in enumerate(test_docs + train_docs + dev_docs):
# if doc.text in text2doc:
# print(f"Duplicate found: {doc.text}")
# print("split:": doc._.split)
# n_duplicates += 1
# text2doc[doc.text] = doc
# load daneplus
path_to_data = "/Users/au561649/Github/DaCy/training/dane_plus/train.spacy"
train_data = DocBin().from_disk(path_to_data)
daneplus_docs = list(train_data.get_docs(nlp.vocab))
text2doc = {}
n_duplicates = 0 # No duplicates (prodigy removed them - this will be problematic when reconstructing the documents - so therefore we re-annotate the dane documents)
for i, doc in enumerate(daneplus_docs):
if doc.text in text2doc:
print(f"Duplicate found: {doc.text}")
n_duplicates += 1
text2doc[doc.text] = doc
# Add the daneplus annotations to the dane documents
docs_to_fix = []
for doc in train_docs + dev_docs + test_docs:
if doc.text in text2doc:
_ents_to_add = text2doc[doc.text].ents
ents_to_add = []
for ent in _ents_to_add:
char_span = doc.char_span(ent.start_char, ent.end_char, label=ent.label_)
if char_span is None:
print(f"Entity could not be added: {ent.text}")
docs_to_fix.append((doc, ent))
continue
ents_to_add.append(char_span)
doc.ents = ents_to_add # type: ignore
# manual fixes (due to difference in tokenization)
doc, ent = docs_to_fix[0]
ents = list(doc.ents)
_ent = doc[-2:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
print("added", new_ent, "to", doc.text)
ents.append(new_ent)
doc.ents = ents
doc, ent = docs_to_fix[1]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)
doc, ent = docs_to_fix[2]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)
doc, ent = docs_to_fix[3]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)
doc, ent = docs_to_fix[4]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)
doc, ent = docs_to_fix[5]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)
# Save the new documents
new_train = DocBin(docs=train_docs)
new_dev = DocBin(docs=dev_docs)
new_test = DocBin(docs=test_docs)
new_train.to_disk("train.spacy")
new_dev.to_disk("dev.spacy")
new_test.to_disk("test.spacy")