innit commit
Browse files- wikitext.py +252 -0
wikitext.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
#
|
15 |
+
# NOTE: This is a modified version of https://github.com/huggingface/datasets/blob/master/datasets/wikitext/wikitext.py
|
16 |
+
# that returns Wiki pages instead of Wiki text line-by-line.
|
17 |
+
"""WikiText Dataset."""
|
18 |
+
|
19 |
+
|
20 |
+
import os
|
21 |
+
|
22 |
+
import datasets
|
23 |
+
|
24 |
+
|
25 |
+
_CITATION = """\
|
26 |
+
@misc{merity2016pointer,
|
27 |
+
title={Pointer Sentinel Mixture Models},
|
28 |
+
author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
|
29 |
+
year={2016},
|
30 |
+
eprint={1609.07843},
|
31 |
+
archivePrefix={arXiv},
|
32 |
+
primaryClass={cs.CL}
|
33 |
+
}
|
34 |
+
"""
|
35 |
+
|
36 |
+
_DESCRIPTION = """\
|
37 |
+
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
|
38 |
+
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
|
39 |
+
License.
|
40 |
+
"""
|
41 |
+
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
|
42 |
+
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
43 |
+
_DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
|
44 |
+
|
45 |
+
|
46 |
+
class WikitextConfig(datasets.BuilderConfig):
|
47 |
+
"""BuilderConfig for GLUE."""
|
48 |
+
|
49 |
+
def __init__(self, data_url, **kwargs):
|
50 |
+
"""BuilderConfig for Wikitext
|
51 |
+
Args:
|
52 |
+
data_url: `string`, url to the dataset (word or raw level)
|
53 |
+
**kwargs: keyword arguments forwarded to super.
|
54 |
+
"""
|
55 |
+
super(WikitextConfig, self).__init__(
|
56 |
+
version=datasets.Version(
|
57 |
+
"1.0.0",
|
58 |
+
),
|
59 |
+
**kwargs,
|
60 |
+
)
|
61 |
+
self.data_url = data_url
|
62 |
+
|
63 |
+
|
64 |
+
class Wikitext(datasets.GeneratorBasedBuilder):
|
65 |
+
"""TODO(wikitext_103): Short description of my dataset."""
|
66 |
+
|
67 |
+
# TODO(wikitext_103): Set up version.
|
68 |
+
VERSION = datasets.Version("0.1.0")
|
69 |
+
BUILDER_CONFIGS = [
|
70 |
+
WikitextConfig(
|
71 |
+
name="wikitext-103-v1",
|
72 |
+
data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
|
73 |
+
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
74 |
+
),
|
75 |
+
WikitextConfig(
|
76 |
+
name="wikitext-2-v1",
|
77 |
+
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
|
78 |
+
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
79 |
+
),
|
80 |
+
WikitextConfig(
|
81 |
+
name="wikitext-103-raw-v1",
|
82 |
+
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
|
83 |
+
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
84 |
+
"They should only be used for character level work or for creating newly derived datasets.",
|
85 |
+
),
|
86 |
+
WikitextConfig(
|
87 |
+
name="wikitext-2-raw-v1",
|
88 |
+
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
|
89 |
+
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
90 |
+
"They should only be used for character level work or for creating newly derived datasets.",
|
91 |
+
),
|
92 |
+
]
|
93 |
+
|
94 |
+
def _info(self):
|
95 |
+
# TODO(wikitext): Specifies the datasets.DatasetInfo object
|
96 |
+
return datasets.DatasetInfo(
|
97 |
+
# This is the description that will appear on the datasets page.
|
98 |
+
description=_DESCRIPTION,
|
99 |
+
# datasets.features.FeatureConnectors
|
100 |
+
features=datasets.Features(
|
101 |
+
{
|
102 |
+
"page": datasets.Value("string")
|
103 |
+
# These are the features of your dataset like images, labels ...
|
104 |
+
}
|
105 |
+
),
|
106 |
+
# If there's a common (input, target) tuple from the features,
|
107 |
+
# specify them here. They'll be used if as_supervised=True in
|
108 |
+
# builder.as_dataset.
|
109 |
+
supervised_keys=None,
|
110 |
+
homepage=_HOMEPAGE,
|
111 |
+
license=_LICENSE,
|
112 |
+
citation=_CITATION,
|
113 |
+
)
|
114 |
+
|
115 |
+
def _split_generators(self, dl_manager):
|
116 |
+
"""Returns SplitGenerators."""
|
117 |
+
# TODO(wikitext): Downloads the data and defines the splits
|
118 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
119 |
+
# download and extract URLs
|
120 |
+
if self.config.name == "wikitext-103-v1":
|
121 |
+
data_file = dl_manager.download_and_extract(self.config.data_url)
|
122 |
+
data_dir = os.path.join(data_file, "wikitext-103")
|
123 |
+
return [
|
124 |
+
datasets.SplitGenerator(
|
125 |
+
name=datasets.Split.TEST,
|
126 |
+
gen_kwargs={
|
127 |
+
"data_file": os.path.join(data_dir, "wiki.test.tokens"),
|
128 |
+
"split": "test",
|
129 |
+
},
|
130 |
+
),
|
131 |
+
datasets.SplitGenerator(
|
132 |
+
name=datasets.Split.TRAIN,
|
133 |
+
gen_kwargs={
|
134 |
+
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
|
135 |
+
"split": "train",
|
136 |
+
},
|
137 |
+
),
|
138 |
+
datasets.SplitGenerator(
|
139 |
+
name=datasets.Split.VALIDATION,
|
140 |
+
gen_kwargs={
|
141 |
+
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
|
142 |
+
"split": "valid",
|
143 |
+
},
|
144 |
+
),
|
145 |
+
]
|
146 |
+
else:
|
147 |
+
if self.config.name == "wikitext-103-raw-v1":
|
148 |
+
data_file = dl_manager.download_and_extract(self.config.data_url)
|
149 |
+
data_dir = os.path.join(data_file, "wikitext-103-raw")
|
150 |
+
return [
|
151 |
+
datasets.SplitGenerator(
|
152 |
+
name=datasets.Split.TEST,
|
153 |
+
gen_kwargs={
|
154 |
+
"data_file": os.path.join(data_dir, "wiki.test.raw"),
|
155 |
+
"split": "test",
|
156 |
+
},
|
157 |
+
),
|
158 |
+
datasets.SplitGenerator(
|
159 |
+
name=datasets.Split.TRAIN,
|
160 |
+
gen_kwargs={
|
161 |
+
"data_file": os.path.join(data_dir, "wiki.train.raw"),
|
162 |
+
"split": "train",
|
163 |
+
},
|
164 |
+
),
|
165 |
+
datasets.SplitGenerator(
|
166 |
+
name=datasets.Split.VALIDATION,
|
167 |
+
gen_kwargs={
|
168 |
+
"data_file": os.path.join(data_dir, "wiki.valid.raw"),
|
169 |
+
"split": "valid",
|
170 |
+
},
|
171 |
+
),
|
172 |
+
]
|
173 |
+
else:
|
174 |
+
if self.config.name == "wikitext-2-raw-v1":
|
175 |
+
data_file = dl_manager.download_and_extract(self.config.data_url)
|
176 |
+
data_dir = os.path.join(data_file, "wikitext-2-raw")
|
177 |
+
return [
|
178 |
+
datasets.SplitGenerator(
|
179 |
+
name=datasets.Split.TEST,
|
180 |
+
gen_kwargs={
|
181 |
+
"data_file": os.path.join(data_dir, "wiki.test.raw"),
|
182 |
+
"split": "test",
|
183 |
+
},
|
184 |
+
),
|
185 |
+
datasets.SplitGenerator(
|
186 |
+
name=datasets.Split.TRAIN,
|
187 |
+
gen_kwargs={
|
188 |
+
"data_file": os.path.join(data_dir, "wiki.train.raw"),
|
189 |
+
"split": "train",
|
190 |
+
},
|
191 |
+
),
|
192 |
+
datasets.SplitGenerator(
|
193 |
+
name=datasets.Split.VALIDATION,
|
194 |
+
gen_kwargs={
|
195 |
+
"data_file": os.path.join(data_dir, "wiki.valid.raw"),
|
196 |
+
"split": "valid",
|
197 |
+
},
|
198 |
+
),
|
199 |
+
]
|
200 |
+
else:
|
201 |
+
if self.config.name == "wikitext-2-v1":
|
202 |
+
data_file = dl_manager.download_and_extract(
|
203 |
+
self.config.data_url
|
204 |
+
)
|
205 |
+
data_dir = os.path.join(data_file, "wikitext-2")
|
206 |
+
return [
|
207 |
+
datasets.SplitGenerator(
|
208 |
+
name=datasets.Split.TEST,
|
209 |
+
gen_kwargs={
|
210 |
+
"data_file": os.path.join(
|
211 |
+
data_dir, "wiki.test.tokens"
|
212 |
+
),
|
213 |
+
"split": "test",
|
214 |
+
},
|
215 |
+
),
|
216 |
+
datasets.SplitGenerator(
|
217 |
+
name=datasets.Split.TRAIN,
|
218 |
+
gen_kwargs={
|
219 |
+
"data_file": os.path.join(
|
220 |
+
data_dir, "wiki.train.tokens"
|
221 |
+
),
|
222 |
+
"split": "train",
|
223 |
+
},
|
224 |
+
),
|
225 |
+
datasets.SplitGenerator(
|
226 |
+
name=datasets.Split.VALIDATION,
|
227 |
+
gen_kwargs={
|
228 |
+
"data_file": os.path.join(
|
229 |
+
data_dir, "wiki.valid.tokens"
|
230 |
+
),
|
231 |
+
"split": "valid",
|
232 |
+
},
|
233 |
+
),
|
234 |
+
]
|
235 |
+
|
236 |
+
def _generate_examples(self, data_file, split):
|
237 |
+
"""Yields examples."""
|
238 |
+
with open(data_file, encoding="utf-8") as f:
|
239 |
+
key = 0
|
240 |
+
ret = []
|
241 |
+
data = f.read().split("\n")
|
242 |
+
for line in data:
|
243 |
+
rline = line.replace("= = =", "===").replace("= =", "==").strip()
|
244 |
+
if rline.startswith("= ") and rline.strip().endswith(" ="):
|
245 |
+
page = "\n".join(ret)
|
246 |
+
if page.strip():
|
247 |
+
yield key, {"page": page}
|
248 |
+
key += 1
|
249 |
+
ret = []
|
250 |
+
ret.append(line)
|
251 |
+
page = "\n".join(ret)
|
252 |
+
yield key, {"page": page}
|