replaced readme with dataset script
Browse files- README.md +0 -297
- phantom-wiki-v0.5.py +180 -0
README.md
DELETED
@@ -1,297 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: bsd-3-clause
|
3 |
-
dataset_name: phantom-wiki-v0.5
|
4 |
-
configs:
|
5 |
-
- config_name: text-corpus
|
6 |
-
data_files:
|
7 |
-
- split: depth_20_size_25_seed_1
|
8 |
-
path: depth_20_size_25_seed_1/articles.json
|
9 |
-
- split: depth_20_size_50_seed_1
|
10 |
-
path: depth_20_size_50_seed_1/articles.json
|
11 |
-
- split: depth_20_size_100_seed_1
|
12 |
-
path: depth_20_size_100_seed_1/articles.json
|
13 |
-
- split: depth_20_size_200_seed_1
|
14 |
-
path: depth_20_size_200_seed_1/articles.json
|
15 |
-
- split: depth_20_size_300_seed_1
|
16 |
-
path: depth_20_size_300_seed_1/articles.json
|
17 |
-
- split: depth_20_size_400_seed_1
|
18 |
-
path: depth_20_size_400_seed_1/articles.json
|
19 |
-
- split: depth_20_size_500_seed_1
|
20 |
-
path: depth_20_size_500_seed_1/articles.json
|
21 |
-
- split: depth_20_size_1000_seed_1
|
22 |
-
path: depth_20_size_1000_seed_1/articles.json
|
23 |
-
- split: depth_20_size_2500_seed_1
|
24 |
-
path: depth_20_size_2500_seed_1/articles.json
|
25 |
-
- split: depth_20_size_5000_seed_1
|
26 |
-
path: depth_20_size_5000_seed_1/articles.json
|
27 |
-
# - split: depth_20_size_7500_seed_1
|
28 |
-
# path: depth_20_size_7500_seed_1/articles.json
|
29 |
-
- split: depth_20_size_10000_seed_1
|
30 |
-
path: depth_20_size_10000_seed_1/articles.json
|
31 |
-
- split: depth_20_size_25_seed_2
|
32 |
-
path: depth_20_size_25_seed_2/articles.json
|
33 |
-
- split: depth_20_size_50_seed_2
|
34 |
-
path: depth_20_size_50_seed_2/articles.json
|
35 |
-
- split: depth_20_size_100_seed_2
|
36 |
-
path: depth_20_size_100_seed_2/articles.json
|
37 |
-
- split: depth_20_size_200_seed_2
|
38 |
-
path: depth_20_size_200_seed_2/articles.json
|
39 |
-
- split: depth_20_size_300_seed_2
|
40 |
-
path: depth_20_size_300_seed_2/articles.json
|
41 |
-
- split: depth_20_size_400_seed_2
|
42 |
-
path: depth_20_size_400_seed_2/articles.json
|
43 |
-
- split: depth_20_size_500_seed_2
|
44 |
-
path: depth_20_size_500_seed_2/articles.json
|
45 |
-
- split: depth_20_size_1000_seed_2
|
46 |
-
path: depth_20_size_1000_seed_2/articles.json
|
47 |
-
- split: depth_20_size_2500_seed_2
|
48 |
-
path: depth_20_size_2500_seed_2/articles.json
|
49 |
-
- split: depth_20_size_5000_seed_2
|
50 |
-
path: depth_20_size_5000_seed_2/articles.json
|
51 |
-
- split: depth_20_size_7500_seed_2
|
52 |
-
path: depth_20_size_7500_seed_2/articles.json
|
53 |
-
- split: depth_20_size_10000_seed_2
|
54 |
-
path: depth_20_size_10000_seed_2/articles.json
|
55 |
-
- split: depth_20_size_25_seed_3
|
56 |
-
path: depth_20_size_25_seed_3/articles.json
|
57 |
-
- split: depth_20_size_50_seed_3
|
58 |
-
path: depth_20_size_50_seed_3/articles.json
|
59 |
-
- split: depth_20_size_100_seed_3
|
60 |
-
path: depth_20_size_100_seed_3/articles.json
|
61 |
-
- split: depth_20_size_200_seed_3
|
62 |
-
path: depth_20_size_200_seed_3/articles.json
|
63 |
-
- split: depth_20_size_300_seed_3
|
64 |
-
path: depth_20_size_300_seed_3/articles.json
|
65 |
-
- split: depth_20_size_400_seed_3
|
66 |
-
path: depth_20_size_400_seed_3/articles.json
|
67 |
-
- split: depth_20_size_500_seed_3
|
68 |
-
path: depth_20_size_500_seed_3/articles.json
|
69 |
-
- split: depth_20_size_1000_seed_3
|
70 |
-
path: depth_20_size_1000_seed_3/articles.json
|
71 |
-
- split: depth_20_size_2500_seed_3
|
72 |
-
path: depth_20_size_2500_seed_3/articles.json
|
73 |
-
- split: depth_20_size_5000_seed_3
|
74 |
-
path: depth_20_size_5000_seed_3/articles.json
|
75 |
-
- split: depth_20_size_7500_seed_3
|
76 |
-
path: depth_20_size_7500_seed_3/articles.json
|
77 |
-
- split: depth_20_size_10000_seed_3
|
78 |
-
path: depth_20_size_10000_seed_3/articles.json
|
79 |
-
|
80 |
-
- config_name: question-answer
|
81 |
-
data_files:
|
82 |
-
- split: depth_20_size_25_seed_1
|
83 |
-
path: depth_20_size_25_seed_1/questions.json
|
84 |
-
- split: depth_20_size_50_seed_1
|
85 |
-
path: depth_20_size_50_seed_1/questions.json
|
86 |
-
- split: depth_20_size_100_seed_1
|
87 |
-
path: depth_20_size_100_seed_1/questions.json
|
88 |
-
- split: depth_20_size_200_seed_1
|
89 |
-
path: depth_20_size_200_seed_1/questions.json
|
90 |
-
- split: depth_20_size_300_seed_1
|
91 |
-
path: depth_20_size_300_seed_1/questions.json
|
92 |
-
- split: depth_20_size_400_seed_1
|
93 |
-
path: depth_20_size_400_seed_1/questions.json
|
94 |
-
- split: depth_20_size_500_seed_1
|
95 |
-
path: depth_20_size_500_seed_1/questions.json
|
96 |
-
- split: depth_20_size_1000_seed_1
|
97 |
-
path: depth_20_size_1000_seed_1/questions.json
|
98 |
-
- split: depth_20_size_2500_seed_1
|
99 |
-
path: depth_20_size_2500_seed_1/questions.json
|
100 |
-
- split: depth_20_size_5000_seed_1
|
101 |
-
path: depth_20_size_5000_seed_1/questions.json
|
102 |
-
# - split: depth_20_size_7500_seed_1
|
103 |
-
# path: depth_20_size_7500_seed_1/questions.json
|
104 |
-
- split: depth_20_size_10000_seed_1
|
105 |
-
path: depth_20_size_10000_seed_1/questions.json
|
106 |
-
- split: depth_20_size_25_seed_2
|
107 |
-
path: depth_20_size_25_seed_2/questions.json
|
108 |
-
- split: depth_20_size_50_seed_2
|
109 |
-
path: depth_20_size_50_seed_2/questions.json
|
110 |
-
- split: depth_20_size_100_seed_2
|
111 |
-
path: depth_20_size_100_seed_2/questions.json
|
112 |
-
- split: depth_20_size_200_seed_2
|
113 |
-
path: depth_20_size_200_seed_2/questions.json
|
114 |
-
- split: depth_20_size_300_seed_2
|
115 |
-
path: depth_20_size_300_seed_2/questions.json
|
116 |
-
- split: depth_20_size_400_seed_2
|
117 |
-
path: depth_20_size_400_seed_2/questions.json
|
118 |
-
- split: depth_20_size_500_seed_2
|
119 |
-
path: depth_20_size_500_seed_2/questions.json
|
120 |
-
- split: depth_20_size_1000_seed_2
|
121 |
-
path: depth_20_size_1000_seed_2/questions.json
|
122 |
-
- split: depth_20_size_2500_seed_2
|
123 |
-
path: depth_20_size_2500_seed_2/questions.json
|
124 |
-
- split: depth_20_size_5000_seed_2
|
125 |
-
path: depth_20_size_5000_seed_2/questions.json
|
126 |
-
- split: depth_20_size_7500_seed_2
|
127 |
-
path: depth_20_size_7500_seed_2/questions.json
|
128 |
-
- split: depth_20_size_10000_seed_2
|
129 |
-
path: depth_20_size_10000_seed_2/questions.json
|
130 |
-
- split: depth_20_size_25_seed_3
|
131 |
-
path: depth_20_size_25_seed_3/questions.json
|
132 |
-
- split: depth_20_size_50_seed_3
|
133 |
-
path: depth_20_size_50_seed_3/questions.json
|
134 |
-
- split: depth_20_size_100_seed_3
|
135 |
-
path: depth_20_size_100_seed_3/questions.json
|
136 |
-
- split: depth_20_size_200_seed_3
|
137 |
-
path: depth_20_size_200_seed_3/questions.json
|
138 |
-
- split: depth_20_size_300_seed_3
|
139 |
-
path: depth_20_size_300_seed_3/questions.json
|
140 |
-
- split: depth_20_size_400_seed_3
|
141 |
-
path: depth_20_size_400_seed_3/questions.json
|
142 |
-
- split: depth_20_size_500_seed_3
|
143 |
-
path: depth_20_size_500_seed_3/questions.json
|
144 |
-
- split: depth_20_size_1000_seed_3
|
145 |
-
path: depth_20_size_1000_seed_3/questions.json
|
146 |
-
- split: depth_20_size_2500_seed_3
|
147 |
-
path: depth_20_size_2500_seed_3/questions.json
|
148 |
-
- split: depth_20_size_5000_seed_3
|
149 |
-
path: depth_20_size_5000_seed_3/questions.json
|
150 |
-
- split: depth_20_size_7500_seed_3
|
151 |
-
path: depth_20_size_7500_seed_3/questions.json
|
152 |
-
- split: depth_20_size_10000_seed_3
|
153 |
-
path: depth_20_size_10000_seed_3/questions.json
|
154 |
-
|
155 |
-
---
|
156 |
-
|
157 |
-
TODO: add 7500 seed 1
|
158 |
-
|
159 |
-
# Dataset Card for Dataset Name
|
160 |
-
|
161 |
-
<!-- Provide a quick summary of the dataset. -->
|
162 |
-
|
163 |
-
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
|
164 |
-
|
165 |
-
## Dataset Details
|
166 |
-
|
167 |
-
Format based on this dataset: https://huggingface.co/rag-datasets
|
168 |
-
|
169 |
-
### Dataset Description
|
170 |
-
|
171 |
-
<!-- Provide a longer summary of what this dataset is. -->
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
- **Curated by:** [More Information Needed]
|
176 |
-
- **Funded by [optional]:** [More Information Needed]
|
177 |
-
- **Shared by [optional]:** [More Information Needed]
|
178 |
-
- **Language(s) (NLP):** [More Information Needed]
|
179 |
-
- **License:** [More Information Needed]
|
180 |
-
|
181 |
-
### Dataset Sources [optional]
|
182 |
-
|
183 |
-
<!-- Provide the basic links for the dataset. -->
|
184 |
-
|
185 |
-
- **Repository:** [More Information Needed]
|
186 |
-
- **Paper [optional]:** [More Information Needed]
|
187 |
-
- **Demo [optional]:** [More Information Needed]
|
188 |
-
|
189 |
-
## Uses
|
190 |
-
|
191 |
-
<!-- Address questions around how the dataset is intended to be used. -->
|
192 |
-
|
193 |
-
### Direct Use
|
194 |
-
|
195 |
-
<!-- This section describes suitable use cases for the dataset. -->
|
196 |
-
|
197 |
-
[More Information Needed]
|
198 |
-
|
199 |
-
### Out-of-Scope Use
|
200 |
-
|
201 |
-
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
|
202 |
-
|
203 |
-
[More Information Needed]
|
204 |
-
|
205 |
-
## Dataset Structure
|
206 |
-
|
207 |
-
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
|
208 |
-
|
209 |
-
[More Information Needed]
|
210 |
-
|
211 |
-
## Dataset Creation
|
212 |
-
|
213 |
-
### Curation Rationale
|
214 |
-
|
215 |
-
<!-- Motivation for the creation of this dataset. -->
|
216 |
-
|
217 |
-
[More Information Needed]
|
218 |
-
|
219 |
-
### Source Data
|
220 |
-
|
221 |
-
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
|
222 |
-
|
223 |
-
#### Data Collection and Processing
|
224 |
-
|
225 |
-
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
|
226 |
-
|
227 |
-
[More Information Needed]
|
228 |
-
|
229 |
-
#### Who are the source data producers?
|
230 |
-
|
231 |
-
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
|
232 |
-
|
233 |
-
[More Information Needed]
|
234 |
-
|
235 |
-
### Annotations [optional]
|
236 |
-
|
237 |
-
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
|
238 |
-
|
239 |
-
#### Annotation process
|
240 |
-
|
241 |
-
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
|
242 |
-
|
243 |
-
[More Information Needed]
|
244 |
-
|
245 |
-
#### Who are the annotators?
|
246 |
-
|
247 |
-
<!-- This section describes the people or systems who created the annotations. -->
|
248 |
-
|
249 |
-
[More Information Needed]
|
250 |
-
|
251 |
-
#### Personal and Sensitive Information
|
252 |
-
|
253 |
-
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
|
254 |
-
|
255 |
-
[More Information Needed]
|
256 |
-
|
257 |
-
## Bias, Risks, and Limitations
|
258 |
-
|
259 |
-
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
260 |
-
|
261 |
-
[More Information Needed]
|
262 |
-
|
263 |
-
### Recommendations
|
264 |
-
|
265 |
-
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
266 |
-
|
267 |
-
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
|
268 |
-
|
269 |
-
## Citation [optional]
|
270 |
-
|
271 |
-
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
|
272 |
-
|
273 |
-
**BibTeX:**
|
274 |
-
|
275 |
-
[More Information Needed]
|
276 |
-
|
277 |
-
**APA:**
|
278 |
-
|
279 |
-
[More Information Needed]
|
280 |
-
|
281 |
-
## Glossary [optional]
|
282 |
-
|
283 |
-
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
|
284 |
-
|
285 |
-
[More Information Needed]
|
286 |
-
|
287 |
-
## More Information [optional]
|
288 |
-
|
289 |
-
[More Information Needed]
|
290 |
-
|
291 |
-
## Dataset Card Authors [optional]
|
292 |
-
|
293 |
-
[More Information Needed]
|
294 |
-
|
295 |
-
## Dataset Card Contact
|
296 |
-
|
297 |
-
[More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
phantom-wiki-v0.5.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Dataset script for PhantomWiki v0.5.
|
2 |
+
|
3 |
+
Template: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
|
4 |
+
"""
|
5 |
+
|
6 |
+
|
7 |
+
import csv
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
|
11 |
+
import datasets
|
12 |
+
|
13 |
+
|
14 |
+
# TODO: Add BibTeX citation
|
15 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
16 |
+
_CITATION = """\
|
17 |
+
@InProceedings{huggingface:dataset,
|
18 |
+
title = {A great new dataset},
|
19 |
+
author={huggingface, Inc.
|
20 |
+
},
|
21 |
+
year={2020}
|
22 |
+
}
|
23 |
+
"""
|
24 |
+
|
25 |
+
# TODO: Add description of the dataset here
|
26 |
+
# You can copy an official description
|
27 |
+
_DESCRIPTION = """\
|
28 |
+
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
29 |
+
"""
|
30 |
+
|
31 |
+
# TODO: Add a link to an official homepage for the dataset here
|
32 |
+
_HOMEPAGE = "https://github.com/albertgong1/phantom-wiki"
|
33 |
+
|
34 |
+
# TODO: Add the licence for the dataset here if you can find it
|
35 |
+
_LICENSE = ""
|
36 |
+
|
37 |
+
# TODO: Add link to the official dataset URLs here
|
38 |
+
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
39 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
40 |
+
_URLS = {}
|
41 |
+
# Construct splits
|
42 |
+
SIZES = [
|
43 |
+
25,
|
44 |
+
100,
|
45 |
+
200,
|
46 |
+
300,
|
47 |
+
400,
|
48 |
+
500,
|
49 |
+
1000,
|
50 |
+
2500,
|
51 |
+
5000,
|
52 |
+
10000,
|
53 |
+
]
|
54 |
+
SPLITS = []
|
55 |
+
for depth in [20]:
|
56 |
+
for size in SIZES:
|
57 |
+
for seed in [1, 2, 3]:
|
58 |
+
SPLITS.append(f"depth_{depth}_size_{size}_seed_{seed}")
|
59 |
+
for filename, config in [("articles.json", "corpus"), ("questions.json", "question-answer"), ("facts.pl", "database")]:
|
60 |
+
_URLS[config] = {}
|
61 |
+
for split in SPLITS:
|
62 |
+
_URLS[config][split] = f"https://huggingface.co/datasets/ag2435/phantom-wiki/resolve/main/{split}/{filename}"
|
63 |
+
|
64 |
+
class PhantomWiki(datasets.GeneratorBasedBuilder):
|
65 |
+
"""PhantomWiki v0.5"""
|
66 |
+
|
67 |
+
VERSION = datasets.Version("0.5.0")
|
68 |
+
|
69 |
+
# This is an example of a dataset with multiple configurations.
|
70 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
71 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
72 |
+
|
73 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
74 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
75 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
76 |
+
|
77 |
+
# You will be able to load one or the other configurations in the following list with
|
78 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
79 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
80 |
+
BUILDER_CONFIGS = [
|
81 |
+
datasets.BuilderConfig(name="text-corpus", version=VERSION, description="This config contains the documents in the text corpus"),
|
82 |
+
datasets.BuilderConfig(name="question-answer", version=VERSION, description="This config containst the question-answer pairs"),
|
83 |
+
datasets.BuilderConfig(name="database", version=VERSION, description="This config contains the complete Prolog database"),
|
84 |
+
]
|
85 |
+
|
86 |
+
# DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
87 |
+
|
88 |
+
def _info(self):
|
89 |
+
"""This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
90 |
+
"""
|
91 |
+
if self.config.name == "corpus": # This is the name of the configuration selected in BUILDER_CONFIGS above
|
92 |
+
features = datasets.Features(
|
93 |
+
{
|
94 |
+
"title": datasets.Value("string"),
|
95 |
+
"article": datasets.Value("string"),
|
96 |
+
# "facts": datasets.Value("string"), # TODO
|
97 |
+
}
|
98 |
+
)
|
99 |
+
elif self.config.name == "question-answer":
|
100 |
+
# NOTE: to see available data types: https://huggingface.co/docs/datasets/v2.5.2/en/package_reference/main_classes#datasets.Features
|
101 |
+
features = datasets.Features(
|
102 |
+
{
|
103 |
+
"id": datasets.Value("string"),
|
104 |
+
"question": datasets.Value("string"),
|
105 |
+
"intermediate_answers": datasets.Value("string"),
|
106 |
+
"answer": datasets.Sequence(datasets.Value("string")),
|
107 |
+
"prolog": datasets.Features(
|
108 |
+
{
|
109 |
+
"query": datasets.Value("string"),
|
110 |
+
"answer": datasets.Value("string"),
|
111 |
+
}
|
112 |
+
),
|
113 |
+
"template": datasets.Sequence(datasets.Value("string")),
|
114 |
+
"type": datasets.Value("int64"), # this references the template type
|
115 |
+
"difficulty": datasets.Value("int64"),
|
116 |
+
}
|
117 |
+
)
|
118 |
+
elif self.config.name == "database":
|
119 |
+
features = datasets.Features(
|
120 |
+
{
|
121 |
+
"content": datasets.Value("string"),
|
122 |
+
}
|
123 |
+
)
|
124 |
+
else:
|
125 |
+
raise ValueError(f"Unknown configuration name {self.config.name}")
|
126 |
+
return datasets.DatasetInfo(
|
127 |
+
# This is the description that will appear on the datasets page.
|
128 |
+
description=_DESCRIPTION,
|
129 |
+
# This defines the different columns of the dataset and their types
|
130 |
+
features=features, # Here we define them above because they are different between the two configurations
|
131 |
+
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
132 |
+
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
133 |
+
# supervised_keys=("sentence", "label"),
|
134 |
+
# Homepage of the dataset for documentation
|
135 |
+
homepage=_HOMEPAGE,
|
136 |
+
# License for the dataset if available
|
137 |
+
license=_LICENSE,
|
138 |
+
# Citation for the dataset
|
139 |
+
citation=_CITATION,
|
140 |
+
)
|
141 |
+
|
142 |
+
def _split_generators(self, dl_manager):
|
143 |
+
"""This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
144 |
+
|
145 |
+
NOTE: If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
146 |
+
"""
|
147 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
148 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
149 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
150 |
+
urls = _URLS[self.config.name]
|
151 |
+
data_dir = dl_manager.download_and_extract(urls)
|
152 |
+
splits = []
|
153 |
+
for name, filepath in data_dir.items():
|
154 |
+
splits.append(datasets.SplitGenerator(
|
155 |
+
name=name,
|
156 |
+
# These kwargs will be passed to _generate_examples
|
157 |
+
gen_kwargs={
|
158 |
+
"filepath": filepath,
|
159 |
+
"split": name,
|
160 |
+
},
|
161 |
+
))
|
162 |
+
return splits
|
163 |
+
|
164 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
165 |
+
def _generate_examples(self, filepath, split):
|
166 |
+
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
167 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
168 |
+
with open(filepath, encoding="utf-8") as f:
|
169 |
+
for key, data in enumerate(json.load(f)):
|
170 |
+
if self.config.name == "corpus":
|
171 |
+
yield key, data
|
172 |
+
elif self.config.name == "question-answer":
|
173 |
+
yield key, data
|
174 |
+
elif self.config.name == "database":
|
175 |
+
# NOTE: Our schema expects a dictionary with a single key "content"
|
176 |
+
yield key, {
|
177 |
+
"content": data,
|
178 |
+
}
|
179 |
+
else:
|
180 |
+
raise ValueError(f"Unknown configuration name {self.config.name}")
|