Datasets:
Tasks:
Other
Modalities:
Text
Sub-tasks:
part-of-speech
Languages:
Polish
Size:
10K - 100K
Tags:
structure-prediction
License:
File size: 2,261 Bytes
a40731b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import json
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Union, Tuple, Set, Any
from bs4 import BeautifulSoup, Tag
from tqdm import tqdm
NKJP_PATH = "NKJP-PodkorpusMilionowy-1.2"
def get_split() -> Tuple[Set[str], Set[str]]:
with open("data/split/train.txt", "r") as f:
train = list(f.readlines())
with open("data/split/test.txt", "r") as f:
test = list(f.readlines())
train = [x.strip() for x in train]
test = [x.strip() for x in test]
train_set = set(train)
test_set = set(test)
assert len(train_set) == len(train)
assert len(test_set) == len(test)
return train_set, test_set
def parse_sentence(sentence_tag: Tag) -> dict[str, Any]:
sentence = defaultdict(list)
for seg in sentence_tag.find_all("seg"): # słowo
[f_orth] = seg.find_all("f", attrs={"name": "orth"})
sentence["tokens"].append(f_orth.getText().strip())
[f_orth] = seg.find_all("f", attrs={"name": "disamb"})
sentence["pos_tags"].append(f_orth.getText().strip().split(":")[1])
assert len(sentence["tokens"]) == len(sentence["pos_tags"])
return dict(sentence)
def parse_tei_file(path: Path) -> List[dict[str, Union[List[str], str]]]:
with open(path, "r") as tei:
soup = BeautifulSoup(tei, "lxml")
result = []
for p in soup.find_all("p"):
for s in p.find_all("s"):
example = parse_sentence(s)
example["id"] = f"{path.parent.name}_{s['xml:id']}"
result.append(example)
return result
train_names, test_names = get_split()
train, test = [], []
for entry in tqdm(list(os.scandir(NKJP_PATH))):
if entry.is_dir():
file_data = parse_tei_file(Path(entry.path) / "ann_morphosyntax.xml")
if entry.name in train_names:
train += file_data
elif entry.name in test_names:
test += file_data
else:
raise ValueError(f"Couldn't find file in splits: {entry.name}")
with open("data/train.jsonl", "w") as f:
for item in train:
f.write(json.dumps(item, ensure_ascii=False) + "\n")
with open("data/test.jsonl", "w") as f:
for item in test:
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|