NEREL / NEREL.py
iluvvatar's picture
v1.1.0
f8bf13a
raw
history blame
4.05 kB
import datasets
import json
_NAME = 'NEREL'
_CITATION = '''
@article{loukachevitch2021nerel,
title={NEREL: A Russian Dataset with Nested Named Entities, Relations and Events},
author={Loukachevitch, Natalia and Artemova, Ekaterina and Batura, Tatiana and Braslavski, Pavel and Denisov, Ilia and Ivanov, Vladimir and Manandhar, Suresh and Pugachev, Alexander and Tutubalina, Elena},
journal={arXiv preprint arXiv:2108.13112},
year={2021}
}
'''.strip()
_DESCRIPTION = 'A Russian Dataset with Nested Named Entities, Relations and Events'
_HOMEPAGE = 'https://doi.org/10.48550/arXiv.2108.13112'
_VERSION = '1.1.0'
class NERELBuilder(datasets.GeneratorBasedBuilder):
_DATA_URLS = {
'train': 'data/train.jsonl',
'test': f'data/test.jsonl',
'dev': f'data/dev.jsonl',
}
_ENT_TYPES_URLS = {
'ent_types': 'ent_types.jsonl'
}
_REL_TYPES_URLS = {
'rel_types': 'rel_types.jsonl'
}
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
datasets.BuilderConfig('data',
version=VERSION,
description='Data'),
datasets.BuilderConfig('ent_types',
version=VERSION,
description='Entity types list'),
datasets.BuilderConfig('rel_types',
version=VERSION,
description='Relation types list')
]
DEFAULT_CONFIG_NAME = 'data'
def _info(self) -> datasets.DatasetInfo:
if self.config.name == 'data':
features = datasets.Features({
'id': datasets.Value('int32'),
'text': datasets.Value('string'),
'entities': datasets.Sequence(datasets.Value('string')),
'relations': datasets.Sequence(datasets.Value('string')),
'links': datasets.Sequence(datasets.Value('string'))
})
elif self.config.name == 'ent_types':
features = datasets.Features({
'type': datasets.Value('string'),
'link': datasets.Value('string')
})
else:
features = datasets.Features({
'type': datasets.Value('string'),
'arg1': datasets.Sequence(datasets.Value('string')),
'arg2': datasets.Sequence(datasets.Value('string')),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
if self.config.name == 'data':
files = dl_manager.download(self._DATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'filepath': files['train']},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'filepath': files['test']},
),
datasets.SplitGenerator(
name='dev',
gen_kwargs={'filepath': files['dev']},
),
]
elif self.config.name == 'ent_types':
files = dl_manager.download(self._ENT_TYPES_URLS)
return [
datasets.SplitGenerator(
name='ent_types',
gen_kwargs={'filepath': files['ent_types']},
)
]
else:
files = dl_manager.download(self._REL_TYPES_URLS)
return [
datasets.SplitGenerator(
name='rel_types',
gen_kwargs={'filepath': files['rel_types']},
)
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for i, line in enumerate(f):
yield i, json.loads(line)