nkjp-pos / creation_script.py
asawczyn's picture
update dataset to new split
a40731b
raw
history blame
2.26 kB
import json
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Union, Tuple, Set, Any
from bs4 import BeautifulSoup, Tag
from tqdm import tqdm
NKJP_PATH = "NKJP-PodkorpusMilionowy-1.2"
def get_split() -> Tuple[Set[str], Set[str]]:
with open("data/split/train.txt", "r") as f:
train = list(f.readlines())
with open("data/split/test.txt", "r") as f:
test = list(f.readlines())
train = [x.strip() for x in train]
test = [x.strip() for x in test]
train_set = set(train)
test_set = set(test)
assert len(train_set) == len(train)
assert len(test_set) == len(test)
return train_set, test_set
def parse_sentence(sentence_tag: Tag) -> dict[str, Any]:
sentence = defaultdict(list)
for seg in sentence_tag.find_all("seg"): # słowo
[f_orth] = seg.find_all("f", attrs={"name": "orth"})
sentence["tokens"].append(f_orth.getText().strip())
[f_orth] = seg.find_all("f", attrs={"name": "disamb"})
sentence["pos_tags"].append(f_orth.getText().strip().split(":")[1])
assert len(sentence["tokens"]) == len(sentence["pos_tags"])
return dict(sentence)
def parse_tei_file(path: Path) -> List[dict[str, Union[List[str], str]]]:
with open(path, "r") as tei:
soup = BeautifulSoup(tei, "lxml")
result = []
for p in soup.find_all("p"):
for s in p.find_all("s"):
example = parse_sentence(s)
example["id"] = f"{path.parent.name}_{s['xml:id']}"
result.append(example)
return result
train_names, test_names = get_split()
train, test = [], []
for entry in tqdm(list(os.scandir(NKJP_PATH))):
if entry.is_dir():
file_data = parse_tei_file(Path(entry.path) / "ann_morphosyntax.xml")
if entry.name in train_names:
train += file_data
elif entry.name in test_names:
test += file_data
else:
raise ValueError(f"Couldn't find file in splits: {entry.name}")
with open("data/train.jsonl", "w") as f:
for item in train:
f.write(json.dumps(item, ensure_ascii=False) + "\n")
with open("data/test.jsonl", "w") as f:
for item in test:
f.write(json.dumps(item, ensure_ascii=False) + "\n")