Datasets:

ArXiv:
License:
holylovenia commited on
Commit
4a68f32
·
verified ·
1 Parent(s): e4b2304

Upload xquadr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. xquadr.py +215 -0
xquadr.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Tasks, Licenses, TASK_TO_SCHEMA, SCHEMA_TO_FEATURES
24
+
25
+ _CITATION = """\
26
+ @article{,@inproceedings{roy-etal-2020-lareqa,
27
+ title = "{LAR}e{QA}: Language-Agnostic Answer Retrieval from a Multilingual Pool",
28
+ author = "Roy, Uma and
29
+ Constant, Noah and
30
+ Al-Rfou, Rami and
31
+ Barua, Aditya and
32
+ Phillips, Aaron and
33
+ Yang, Yinfei",
34
+ editor = "Webber, Bonnie and
35
+ Cohn, Trevor and
36
+ He, Yulan and
37
+ Liu, Yang",
38
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
39
+ month = nov,
40
+ year = "2020",
41
+ address = "Online",
42
+ publisher = "Association for Computational Linguistics",
43
+ url = "https://aclanthology.org/2020.emnlp-main.477",
44
+ doi = "10.18653/v1/2020.emnlp-main.477",
45
+ pages = "5919--5930",
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "xquadr"
50
+
51
+ _DESCRIPTION = """\
52
+ XQuAD-R is a retrieval version of the XQuAD dataset (a cross-lingual extractive
53
+ QA dataset) that is a part of the LAReQA benchmark. Like XQuAD, XQUAD-R is an
54
+ 11-way parallel dataset, where each question (out of around 1200) appears in 11
55
+ different languages and has 11 parallel correct answers across the languages. It
56
+ is designed so as to include parallel QA pairs across languages, allowing
57
+ questions to be matched with answers from different languages. The span-tagging
58
+ task in XQuAD is converted into a retrieval task by breaking up each contextual
59
+ paragraph into sentences, and treating each sentence as a possible target
60
+ answer. There are around 1000 candidate answers in each language.
61
+ """
62
+
63
+ _HOMEPAGE = "https://github.com/google-research-datasets/lareqa"
64
+
65
+ _LANGUAGES = ["tha", "vie"]
66
+
67
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
68
+
69
+ _LOCAL = False
70
+
71
+ _URLS = {
72
+ "tha": "https://github.com/google-research-datasets/lareqa/raw/master/xquad-r/th.json",
73
+ "vie": "https://github.com/google-research-datasets/lareqa/raw/master/xquad-r/vi.json",
74
+ }
75
+
76
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING_RETRIEVAL]
77
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # qa
78
+
79
+ _SOURCE_VERSION = "1.1.0" # inside the dataset
80
+
81
+ _SEACROWD_VERSION = "2024.06.20"
82
+
83
+
84
+ class XquadRDataset(datasets.GeneratorBasedBuilder):
85
+ """A retrieval version of the XQuAD dataset (a cross-lingual extractive QA dataset)"""
86
+
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
89
+
90
+ BUILDER_CONFIGS = []
91
+ for subset in _LANGUAGES:
92
+ BUILDER_CONFIGS += [
93
+ SEACrowdConfig(
94
+ name=f"{_DATASETNAME}_{subset}_source",
95
+ version=SOURCE_VERSION,
96
+ description=f"{_DATASETNAME} {subset} source schema",
97
+ schema="source",
98
+ subset_id=subset,
99
+ ),
100
+ SEACrowdConfig(
101
+ name=f"{_DATASETNAME}_{subset}_{_SEACROWD_SCHEMA}",
102
+ version=SEACROWD_VERSION,
103
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
104
+ schema=_SEACROWD_SCHEMA,
105
+ subset_id=subset,
106
+ ),
107
+ ]
108
+
109
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_{_LANGUAGES[0]}_source"
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "paragraphs": datasets.Sequence(
116
+ {
117
+ "context": datasets.Value("string"),
118
+ "qas": datasets.Sequence(
119
+ {
120
+ "answers": datasets.Sequence(
121
+ {
122
+ "answer_start": datasets.Value("int32"),
123
+ "text": datasets.Value("string"),
124
+ }
125
+ ),
126
+ "id": datasets.Value("string"),
127
+ "question": datasets.Value("string"),
128
+ }
129
+ ),
130
+ "sentence_breaks": datasets.Sequence(
131
+ datasets.Sequence(datasets.Value("int32"))
132
+ ),
133
+ "sentences": datasets.Sequence(datasets.Value("string")),
134
+ }
135
+ ),
136
+ "title": datasets.Value("string"),
137
+ }
138
+ )
139
+ elif self.config.schema == _SEACROWD_SCHEMA:
140
+ features = SCHEMA_TO_FEATURES[
141
+ TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]
142
+ ] # qa_features
143
+ features["meta"] = {
144
+ "title": datasets.Value("string"),
145
+ "answers_start": datasets.Sequence(datasets.Value("int32")),
146
+ "answers_text": datasets.Sequence(datasets.Value("string")),
147
+ }
148
+
149
+ return datasets.DatasetInfo(
150
+ description=_DESCRIPTION,
151
+ features=features,
152
+ homepage=_HOMEPAGE,
153
+ license=_LICENSE,
154
+ citation=_CITATION,
155
+ )
156
+
157
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
158
+ """Returns SplitGenerators."""
159
+ url = _URLS[self.config.subset_id]
160
+ data_path = Path(dl_manager.download(url))
161
+
162
+ return [
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TRAIN,
165
+ gen_kwargs={
166
+ "data_path": data_path,
167
+ },
168
+ ),
169
+ ]
170
+
171
+ def _generate_examples(self, data_path: Path) -> Tuple[int, Dict]:
172
+ """Yields examples as (key, example) tuples."""
173
+ with open(data_path, "r", encoding="utf-8") as file:
174
+ data = json.load(file)
175
+
176
+ key = 0
177
+ for example in data["data"]:
178
+
179
+ if self.config.schema == "source":
180
+ yield key, example
181
+ key += 1
182
+
183
+ elif self.config.schema == _SEACROWD_SCHEMA:
184
+ for paragraph in example["paragraphs"]:
185
+ # get sentence breaks (sentences' string stop index)
186
+ break_list = [breaks[1] for breaks in paragraph["sentence_breaks"]]
187
+
188
+ for qa in paragraph["qas"]:
189
+ # get answers' string start index
190
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
191
+
192
+ # retrieve answers' relevant sentence
193
+ answers = []
194
+ for start in answer_starts:
195
+ for i, end in enumerate(break_list):
196
+ if start < end:
197
+ answers.append(paragraph["sentences"][i])
198
+ break
199
+
200
+ yield key, {
201
+ "id": str(key),
202
+ "question_id": qa["id"],
203
+ # "document_id": None,
204
+ "question": qa["question"],
205
+ "type": "retrieval",
206
+ "choices": [], # escape multiple choice qa seacrowd test
207
+ "context": paragraph["context"],
208
+ "answer": answers,
209
+ "meta": {
210
+ "title": example["title"],
211
+ "answers_start": answer_starts,
212
+ "answers_text": [answer["text"] for answer in qa["answers"]],
213
+ },
214
+ }
215
+ key += 1