Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
sentiment-classification
Languages:
Polish
Size:
1K - 10K
License:
File size: 4,112 Bytes
4796b54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import csv
from typing import List, Generator, Tuple, Dict
import datasets
from datasets import DownloadManager
from datasets.info import SupervisedKeysData
_DESCRIPTION = """AspectEmo 1.0 dataset: Multi-Domain Corpus of Consumer Reviews for Aspect-Based
Sentiment Analysis"""
_CLASSES = ['O',
'B-a_plus_m',
'B-a_minus_m',
'B-a_zero',
'B-a_minus_s',
'B-a_plus_s',
'B-a_amb',
'B-a_minus_m:B-a_minus_m',
'B-a_minus_m:B-a_minus_m:B-a_minus_m',
'B-a_plus_m:B-a_plus_m',
'B-a_plus_m:B-a_plus_m:B-a_plus_m',
'B-a_zero:B-a_zero:B-a_zero',
'B-a_zero:B-a_zero',
'I-a_plus_m',
'B-a_zero:B-a_plus_m',
'B-a_minus_m:B-a_zero',
'B-a_minus_s:B-a_minus_s:B-a_minus_s',
'B-a_amb:B-a_amb',
'I-a_minus_m',
'B-a_minus_s:B-a_minus_s',
'B-a_plus_s:B-a_plus_s:B-a_plus_s',
'B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m:B-a_plus_m',
'B-a_plus_m:B-a_amb',
'B-a_minus_m:B-a_plus_m',
'B-a_amb:B-a_amb:B-a_amb',
'I-a_zero',
'B-a_plus_s:B-a_plus_s',
'B-a_plus_m:B-a_plus_s',
'B-a_plus_m:B-a_zero',
'B-a_zero:B-a_zero:B-a_zero:B-a_zero:B-a_zero:B-a_zero',
'B-a_zero:B-a_minus_m',
'B-a_amb:B-a_plus_s',
'B-a_zero:B-a_minus_s']
_URLS = {
"train": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/train.tsv",
"validation": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/val.tsv",
"test": "https://huggingface.co/datasets/clarin-pl/aspectemo/resolve/main/data/test.tsv",
}
class AspectEmo(datasets.GeneratorBasedBuilder):
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"orth": datasets.Sequence(datasets.Value("string")),
"ctag": datasets.Sequence(datasets.Value("string")),
"sentiment": datasets.Sequence(datasets.features.ClassLabel(
names=_CLASSES,
num_classes=len(_CLASSES)
)),
}
),
supervised_keys=SupervisedKeysData(input="orth", output="sentiment"),
homepage="https://clarin-pl.eu/dspace/handle/11321/849",
)
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]},
),
]
def _generate_examples(
self, filepath: str
) -> Generator[Tuple[int, Dict[str, str]], None, None]:
with open(filepath, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
next(reader, None) # skip header
id_, orth, ctag, sentiment = set(), [], [], []
for line in reader:
if not line:
assert len(id_) == 1
yield id_.pop(), {"orth": orth, "ctag": ctag, "sentiment": sentiment, }
id_, orth, ctag, sentiment = set(), [], [], []
else:
id_.add(line[0])
orth.append(line[1])
ctag.append(line[2])
sentiment.append(line[3])
|