aspectemo / aspectemo.py
Albert Sawczyn
add data and loader
4796b54
raw
history blame
4.11 kB
import csv
from typing import List, Generator, Tuple, Dict
import datasets
from datasets import DownloadManager
from datasets.info import SupervisedKeysData
_DESCRIPTION = """AspectEmo 1.0 dataset: Multi-Domain Corpus of Consumer Reviews for Aspect-Based
Sentiment Analysis"""
_CLASSES = ['O',
'B-a_plus_m',
'B-a_minus_m',
'B-a_zero',
'B-a_minus_s',
'B-a_plus_s',
'B-a_amb',
'B-a_minus_m:B-a_minus_m',
'B-a_minus_m:B-a_minus_m:B-a_minus_m',
'B-a_plus_m:B-a_plus_m',
'B-a_plus_m:B-a_plus_m:B-a_plus_m',
'B-a_zero:B-a_zero:B-a_zero',
'B-a_zero:B-a_zero',
'I-a_plus_m',
'B-a_zero:B-a_plus_m',
'B-a_minus_m:B-a_zero',
'B-a_minus_s:B-a_minus_s:B-a_minus_s',
'B-a_amb:B-a_amb',
'I-a_minus_m',
'B-a_minus_s:B-a_minus_s',
'B-a_plus_s:B-a_plus_s:B-a_plus_s',
'B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m',
'B-a_plus_m:B-a_amb',
'B-a_minus_m:B-a_plus_m',
'B-a_amb:B-a_amb:B-a_amb',
'I-a_zero',
'B-a_plus_s:B-a_plus_s',
'B-a_plus_m:B-a_plus_s',
'B-a_plus_m:B-a_zero',
'B-a_zero:B-a_zero:B-a_zero:B-a_zero:B-a_zero:B-a_zero',
'B-a_zero:B-a_minus_m',
'B-a_amb:B-a_plus_s',
'B-a_zero:B-a_minus_s']
_URLS = {
"train": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/train.tsv",
"validation": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/val.tsv",
"test": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/test.tsv",
}
class AspectEmo(datasets.GeneratorBasedBuilder):
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"orth": datasets.Sequence(datasets.Value("string")),
"ctag": datasets.Sequence(datasets.Value("string")),
"sentiment": datasets.Sequence(datasets.features.ClassLabel(
names=_CLASSES,
num_classes=len(_CLASSES)
)),
}
),
supervised_keys=SupervisedKeysData(input="orth", output="sentiment"),
homepage="https://clarin-pl.eu/dspace/handle/11321/849",
)
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]},
),
]
def _generate_examples(
self, filepath: str
) -> Generator[Tuple[int, Dict[str, str]], None, None]:
with open(filepath, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
next(reader, None) # skip header
id_, orth, ctag, sentiment = set(), [], [], []
for line in reader:
if not line:
assert len(id_) == 1
yield id_.pop(), {"orth": orth, "ctag": ctag, "sentiment": sentiment, }
id_, orth, ctag, sentiment = set(), [], [], []
else:
id_.add(line[0])
orth.append(line[1])
ctag.append(line[2])
sentiment.append(line[3])