# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
import csv | |
import json | |
import os | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@article{tydiqa, | |
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages}, | |
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki} | |
journal = {TACL}, | |
year = {2020} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """ | |
The english Wikipedia 2019-0201 passage dump that used for xor-tydi retrieval task, available at https://archive.org/download/enwiki-20190201/enwiki-20190201-pages-articles-multistream.xml.bz2 | |
""" | |
_URLS = { | |
"corpus": { | |
"en_wiki": "en_wiki.tsv", | |
} | |
} | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class XorTydi(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="corpus", version=VERSION, description="en wiki passage corpus."), | |
] | |
DEFAULT_CONFIG_NAME = "corpus" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
features = datasets.Features( | |
{ | |
"docid": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"text": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
homepage='https://github.com/ielab/xor-tydi', | |
features=features, # Here we define them above because they are different between the two configurations | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls = _URLS[self.config.name]['en_wiki'] | |
data_dir = dl_manager.download_and_extract(urls) | |
return [ | |
datasets.SplitGenerator( | |
name="en_wiki", | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": data_dir, | |
"split": "en_wiki", | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
with open(filepath, encoding="utf-8") as f: | |
for line in f: | |
text_id, text, title = line.strip().split('\t') | |
yield text_id, {"docid": text_id, "title": title, "text": text} | |