simplequestions-sparqltotext / simplequestions-sparqltotext.py
glecorve's picture
Inflate JSON dataset
b05524d
raw
history blame
4.77 kB
import os
import zipfile
import json
import base64
import datasets
try:
import gitlab
except ImportError:
print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
_CITATION = """\
@inproceedings{lecorve2022sparql2text,
title={Coqar: Question rewriting on coqa},
author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
year={2022}
}
"""
_HOMEPAGE = ""
_URLS = {
"train": "json/annotated_wd_data_train.json",
"valid": "json/annotated_wd_data_valid.json",
"test": "json/annotated_wd_data_test.json"
}
_DESCRIPTION = """\
SimpleQuestions-SPARQL2Text: Special version of SimpleQuestions with SPARQL queries formatted for the SPARQL-to-Text task
"""
class SimpleQuestions_SPARQL2Text(datasets.GeneratorBasedBuilder):
"""
SimpleQuestions-SPARQL2Text: Special version of SimpleQuestions with
SPARQL queries formatted for the SPARQL-to-Text task
"""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"original_nl_question": datasets.Value('string'),
"recased_nl_question": datasets.Value('string'),
"sparql_query": datasets.Value('string'),
"verbalized_sparql_query": datasets.Value('string'),
"nl_subject": datasets.Value('string'),
"nl_property": datasets.Value('string'),
"nl_object": datasets.Value('string'),
"nl_answer": datasets.Value('string'),
"rdf_subject": datasets.Value('string'),
"rdf_property": datasets.Value('string'),
"rdf_object": datasets.Value('string'),
"rdf_answer": datasets.Value('string'),
"rdf_target": datasets.Value('string')
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset
supervised_keys=("recased_nl_question", "verbalized_sparql_query"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
paths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": dl_manager.extract(paths['train']),
"split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
"split": "valid"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_manager.extract(paths['test']),
"split": "test"}
)
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
def transform_sample(original_sample):
transformed_sample = {
"original_nl_question": "",
"recased_nl_question": "",
"sparql_query": "",
"verbalized_sparql_query": "",
"nl_subject": "",
"nl_property": "",
"nl_object": "",
"nl_answer": "",
"rdf_subject": "",
"rdf_property": "",
"rdf_object": "",
"rdf_answer": "",
"rdf_target": ""
}
transformed_sample.update(original_sample)
return transformed_sample
# Yields (key, example) tuples from the dataset
with open(filepath,'r') as f:
data = json.load(f)
key = 0
for it in data:
yield key, transform_sample(it)
key += 1