|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{balakrishnan-etal-2019-constrained, |
|
title = "Constrained Decoding for Neural {NLG} from Compositional Representations in Task-Oriented Dialogue", |
|
author = "Balakrishnan, Anusha and |
|
Rao, Jinfeng and |
|
Upasani, Kartikeya and |
|
White, Michael and |
|
Subba, Rajen", |
|
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
month = jul, |
|
year = "2019", |
|
address = "Florence, Italy", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/P19-1080", |
|
doi = "10.18653/v1/P19-1080", |
|
pages = "831--844" |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Conversational Weather dataset is designed for generation of responses to weather queries based on a structured input data. The input allows specifying data attributes such as dates, times, locations, weather conditions, and errors, and also offers control over structure of response through discourse relations such as join, contrast, and justification. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/facebookresearch/TreeNLG" |
|
|
|
_LICENSE = "CC-BY-NC-4.0" |
|
|
|
|
|
|
|
_URLs = { |
|
'default': { |
|
'train': 'https://raw.githubusercontent.com/facebookresearch/TreeNLG/master/data/weather/train.tsv', |
|
'validation': 'https://raw.githubusercontent.com/facebookresearch/TreeNLG/master/data/weather/val.tsv', |
|
'test': 'https://raw.githubusercontent.com/facebookresearch/TreeNLG/master/data/weather/test.tsv' |
|
} |
|
} |
|
|
|
|
|
class ConversationalWeather(datasets.GeneratorBasedBuilder): |
|
"""The Conversational Weather dataset is designed for generation of responses to weather queries |
|
based on a structured input data. The input allows specifying data attributes such as dates, times, |
|
locations, weather conditions, and errors, and also offers control over structure of response through |
|
discourse relations such as join, contrast, and justification.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"data_id": datasets.Value("string"), |
|
"user_query": datasets.Value("string"), |
|
"tree_str_mr": datasets.Value("string"), |
|
"response": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['train']), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['test']), |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['validation']), |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
print(filepath) |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
csv_reader = csv.reader(f, delimiter='\t') |
|
for id_, row in enumerate(csv_reader): |
|
assert len(row) == 4 |
|
yield id_, { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"data_id": row[0], |
|
"user_query": row[1], |
|
"tree_str_mr": row[2], |
|
"response": row[3], |
|
} |
|
|