conceptofmind commited on
Commit
d3084f4
·
verified ·
1 Parent(s): 37af255

Update megawika.py

Browse files
Files changed (1) hide show
  1. megawika.py +155 -82
megawika.py CHANGED
@@ -1,66 +1,106 @@
1
- import datasets
2
- import json
3
- import yaml
4
- import urllib.request
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- _DESCRIPTION = """\
7
  MegaWika is a multi- and crosslingual text dataset containing 30 million
8
  Wikipedia passages with their scraped and cleaned web citations. The
9
  passages span 50 Wikipedias in 50 languages, and the articles in which
10
- the passages were originally embedded are included for convenience."""
 
 
 
 
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  _CITATION = """\
13
  @article{barham2023megawika,
14
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
15
- author={Barham, Samuel and Weller, Orion and others},
 
 
 
 
 
 
16
  journal={INSERT ARXIV PREPRINT ID HERE},
17
  year={2023}
18
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- _HOMEPAGE = "https://huggingface.co/datasets/conceptofmind/MegaWika"
21
  _LICENSE = "cc-by-sa-4.0"
22
 
23
- # Load the file paths for all the splits
24
- file_list_url = "https://huggingface.co/datasets/conceptofmind/MegaWika/raw/main/files.yml"
25
-
26
- def get_data_urls():
27
- with urllib.request.urlopen(file_list_url) as f:
28
- try:
29
- fnames = yaml.safe_load(f)
30
- return fnames['fnames']
31
- except yaml.YAMLError as exc:
32
- print("Error loading the file paths for the dataset splits. Aborting.")
33
- return {}
34
-
35
- class MegaWikaConfig(datasets.BuilderConfig):
36
- """BuilderConfig for MegaWika."""
37
-
38
- def __init__(self, language=None, **kwargs):
39
- """BuilderConfig for MegaWika.
40
-
41
- Args:
42
- language: The language of the dataset split
43
- **kwargs: Keyword arguments forwarded to super.
44
- """
45
- super(MegaWikaConfig, self).__init__(**kwargs)
46
- self.language = language
47
 
48
  class MegaWika(datasets.GeneratorBasedBuilder):
49
- """MegaWika dataset."""
50
-
51
- # Get available languages from the data URLs
52
- _DATA_URL = get_data_urls()
53
- BUILDER_CONFIGS = [
54
- MegaWikaConfig(
55
- name=lang if lang != "all" else "default",
56
- language=lang,
57
- version=datasets.Version("1.0.0"),
58
- description=f"MegaWika {lang} configuration"
59
- )
60
- for lang in ["all"] + list(_DATA_URL.keys())
61
- ]
62
-
63
- DEFAULT_CONFIG_NAME = "default" # For the "all" configuration
64
 
65
  def _info(self):
66
  return datasets.DatasetInfo(
@@ -72,24 +112,32 @@ class MegaWika(datasets.GeneratorBasedBuilder):
72
  "entries": datasets.features.Sequence(
73
  {
74
  "id": datasets.Value("string"),
 
 
75
  "passage": {
76
  "text": [datasets.Value("string")],
77
  "parse": datasets.Value("string"),
78
  "en_tokens": [datasets.Value("string")],
79
  "lang_tokens": [datasets.Value("string")],
80
- "en_lang_token_map": [[datasets.Value("int32")]]
81
  },
 
 
82
  "mt": {
83
  "original": datasets.Value("string"),
84
  "original_sents": [datasets.Value("string")],
85
  "translation": datasets.Value("string"),
86
  "translation_sents": [datasets.Value("string")],
87
- "translation_probs": [[datasets.Value("string")]],
88
  "repetitious_translation": datasets.Value("bool")
89
  },
 
 
90
  "source_lang": datasets.Value("string"),
91
  "source_url": datasets.Value("string"),
92
  "source_text": datasets.Value("string"),
 
 
93
  "qa_pairs": datasets.Sequence(
94
  {
95
  "question": datasets.Value("string"),
@@ -101,10 +149,10 @@ class MegaWika(datasets.GeneratorBasedBuilder):
101
  "argument": datasets.Value("string")
102
  }
103
  ),
104
- "en_matches_in_source": [[datasets.Value("int32")]],
105
- "en_match_in_passage": [datasets.Value("int32")],
106
- "lang_matches_in_source": [[datasets.Value("int32")]],
107
- "lang_match_in_passage": [datasets.Value("int32")],
108
  "passage": [datasets.Value("string")],
109
  "en_answer_tokens": [datasets.Value("string")],
110
  "match_disambiguated_question": datasets.Value("string"),
@@ -115,42 +163,45 @@ class MegaWika(datasets.GeneratorBasedBuilder):
115
  }
116
  ),
117
  supervised_keys=None,
118
- homepage=_HOMEPAGE,
119
  citation=_CITATION,
120
- license=_LICENSE
121
  )
122
 
123
  def _split_generators(self, dl_manager):
124
- """Returns SplitGenerators."""
125
- if self.config.language == "all":
126
- data_sources = self._DATA_URL
127
  else:
128
- data_sources = {self.config.language: self._DATA_URL[self.config.language]}
129
 
130
  return [
131
  datasets.SplitGenerator(
132
- name=datasets.Split.TRAIN, # Using TRAIN as default split
133
  gen_kwargs={
134
  "filepaths": dl_manager.download(data_sources[lang])
135
  }
136
  )
137
- for lang in data_sources
 
138
  ]
139
 
140
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
141
- """Helper method to extract QA pair features."""
142
- if feature_name in qa_pair and qa_pair[feature_name]:
143
- return qa_pair[feature_name]
144
- elif feature_name.startswith('en'):
145
- base_feature = '_'.join(feature_name.split('_')[1:])
146
- if base_feature in qa_pair and qa_pair[base_feature]:
147
- return qa_pair[base_feature]
148
- return []
 
149
 
 
 
150
  def _generate_examples(self, filepaths):
151
- """Yields examples."""
152
  id_ = 0
153
  for filepath in filepaths:
 
154
  try:
155
  with open(filepath, "r", encoding="utf-8") as f:
156
  for line in f:
@@ -166,11 +217,19 @@ class MegaWika(datasets.GeneratorBasedBuilder):
166
  "passage": {
167
  "text": entry['passage'].get("text", []),
168
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
169
- "en_tokens": list(entry['passage'].get("en_tokens", {}).values()),
 
 
 
 
 
 
 
170
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
171
  "en_lang_token_map": [
172
  (int(item[0]), int(item[1]))
173
- for item in entry['passage'].get("en_lang_token_map", {}).items()
 
174
  ]
175
  },
176
  "mt": {
@@ -179,7 +238,7 @@ class MegaWika(datasets.GeneratorBasedBuilder):
179
  "translation": entry.get("translation", ""),
180
  "translation_sents": entry.get("translation_sents", []),
181
  "translation_probs": entry.get("translation_probs", [[]]),
182
- "repetitious_translation": entry.get("repetitious_translation", False)
183
  },
184
  "source_lang": entry.get("source_lang", ""),
185
  "source_url": entry.get("source_url", ""),
@@ -190,20 +249,34 @@ class MegaWika(datasets.GeneratorBasedBuilder):
190
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
191
  'lang_answer': qa_pair.get('lang_answer', ''),
192
  'frames': qa_pair.get('frames', []),
193
- "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
194
- "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
195
- "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
196
- "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
197
  "passage": qa_pair.get('passage', []),
198
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
199
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
200
  }
201
- for qa_pair in entry.get('qa_pairs', [])
 
202
  ]
203
  }
204
- for entry in example.get("entries", [])
 
205
  ]
206
  }
207
  id_ += 1
208
- except Exception as e:
209
- print(f"Error reading file {filepath}: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and
2
+ # the Johns Hopkins University (JHU) Human Language Technology
3
+ # Center of Excellence.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ This file provides a HuggingFace dataset loader implementation for
18
+ the JHU/HLTCOE MegaWika dataset.
19
 
 
20
  MegaWika is a multi- and crosslingual text dataset containing 30 million
21
  Wikipedia passages with their scraped and cleaned web citations. The
22
  passages span 50 Wikipedias in 50 languages, and the articles in which
23
+ the passages were originally embedded are included for convenience. Where
24
+ a Wikipedia passage is in a non-English language, an automated English
25
+ translation is provided. Furthermore, nearly 130 million English
26
+ question/answer pairs were extracted from the passages, and FrameNet events
27
+ occurring in the passages are detected using the LOME FrameNet parser.
28
+ """
29
 
30
+
31
+ import csv
32
+ import json
33
+ import os
34
+ import re
35
+ import pathlib
36
+ from pathlib import Path
37
+ import yaml
38
+ from ast import literal_eval
39
+
40
+ import datasets
41
+
42
+ # import gzip
43
+ # try:
44
+ # import lzma as xz
45
+ # except ImportError:
46
+ # import pylzma as xz
47
+
48
+
49
+ # TODO: Add BibTeX citation
50
+ # Find for instance the citation on arxiv or on the dataset repo/website
51
  _CITATION = """\
52
  @article{barham2023megawika,
53
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
54
+ author={Barham, Samuel and Weller, Orion and
55
+ Yuan, Michelle and Murray, Kenton and
56
+ Yarmohammadi, Mahsa and Jiang, Zhengping and
57
+ Vashishtha, Siddharth and Martin, Alexander and
58
+ Liu, Anqi and White, Aaron Steven and
59
+ Boyd-Graber, Jordan and Van Durme, Benjamin
60
+ },
61
  journal={INSERT ARXIV PREPRINT ID HERE},
62
  year={2023}
63
+ }
64
+ """
65
+
66
+ # TODO: Add description of the dataset here
67
+ # You can copy an official description
68
+ _DESCRIPTION = """\
69
+ MegaWika is a multi- and crosslingual text dataset containing 30 million
70
+ Wikipedia passages with their scraped and cleaned web citations. The
71
+ passages span 50 Wikipedias in 50 languages, and the articles in which
72
+ the passages were originally embedded are included for convenience. Where
73
+ a Wikipedia passage is in a non-English language, an automated English
74
+ translation is provided. Furthermore, nearly 130 million English
75
+ question/answer pairs were extracted from the passages, and FrameNet events
76
+ occurring in the passages are detected using the LOME FrameNet parser.
77
+ """
78
+
79
+ _HOMEPAGE = "https://huggingface.co/datasets/hltcoe/megawika"
80
 
 
81
  _LICENSE = "cc-by-sa-4.0"
82
 
83
+ _URL = "https://huggingface.co/datasets/hltcoe/megawika"
84
+
85
+ # Load the file paths for all the splits (per language currently)
86
+
87
+ file_list_url = "https://huggingface.co/datasets/hltcoe/megawika/raw/main/files.yml"
88
+
89
+ import urllib.request
90
+ with urllib.request.urlopen(file_list_url) as f:
91
+ try:
92
+ fnames = yaml.safe_load(f)
93
+ except yaml.YAMLError as exc:
94
+ print("Error loading the file paths for the dataset splits. Aborting.")
95
+ exit(1)
96
+
97
+ _DATA_URL = fnames['fnames']
98
+
99
+ _VARIANTS = ["all"] + list(_DATA_URL.keys())
100
+
 
 
 
 
 
 
101
 
102
  class MegaWika(datasets.GeneratorBasedBuilder):
103
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  def _info(self):
106
  return datasets.DatasetInfo(
 
112
  "entries": datasets.features.Sequence(
113
  {
114
  "id": datasets.Value("string"),
115
+
116
+ # Wiki passage
117
  "passage": {
118
  "text": [datasets.Value("string")],
119
  "parse": datasets.Value("string"),
120
  "en_tokens": [datasets.Value("string")],
121
  "lang_tokens": [datasets.Value("string")],
122
+ "en_lang_token_map": [[datasets.Value("int32")]] # list of pairs
123
  },
124
+
125
+ # MT
126
  "mt": {
127
  "original": datasets.Value("string"),
128
  "original_sents": [datasets.Value("string")],
129
  "translation": datasets.Value("string"),
130
  "translation_sents": [datasets.Value("string")],
131
+ "translation_probs": [[datasets.Value("float32")]],
132
  "repetitious_translation": datasets.Value("bool")
133
  },
134
+
135
+ # Source document
136
  "source_lang": datasets.Value("string"),
137
  "source_url": datasets.Value("string"),
138
  "source_text": datasets.Value("string"),
139
+
140
+ # Question/answer pairs
141
  "qa_pairs": datasets.Sequence(
142
  {
143
  "question": datasets.Value("string"),
 
149
  "argument": datasets.Value("string")
150
  }
151
  ),
152
+ "en_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
153
+ "en_match_in_passage": [datasets.Value("int32")], # pair of int indices
154
+ "lang_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
155
+ "lang_match_in_passage": [datasets.Value("int32")], # pair of int indices
156
  "passage": [datasets.Value("string")],
157
  "en_answer_tokens": [datasets.Value("string")],
158
  "match_disambiguated_question": datasets.Value("string"),
 
163
  }
164
  ),
165
  supervised_keys=None,
166
+ homepage=_URL,
167
  citation=_CITATION,
 
168
  )
169
 
170
  def _split_generators(self, dl_manager):
171
+ if self.config.name == "all":
172
+ data_sources = _DATA_URL
 
173
  else:
174
+ data_sources = {self.config.name: _DATA_URL[self.config.name]}
175
 
176
  return [
177
  datasets.SplitGenerator(
178
+ name=lang,
179
  gen_kwargs={
180
  "filepaths": dl_manager.download(data_sources[lang])
181
  }
182
  )
183
+ for lang
184
+ in data_sources
185
  ]
186
 
187
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
188
+ res = []
189
+
190
+ if feature_name in qa_pair:
191
+ if qa_pair[feature_name]:
192
+ return qa_pair[feature_name]
193
+ else:
194
+ if feature_name.startswith('en'):
195
+ feature_name = '_'.join(feature_name.split('_')[1:])
196
+ return self._get_qa_pair_list_features(qa_pair, feature_name)
197
 
198
+ return res
199
+
200
  def _generate_examples(self, filepaths):
201
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
202
  id_ = 0
203
  for filepath in filepaths:
204
+ # logger.info("Generating examples from = %s", filepath)
205
  try:
206
  with open(filepath, "r", encoding="utf-8") as f:
207
  for line in f:
 
217
  "passage": {
218
  "text": entry['passage'].get("text", []),
219
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
220
+ "en_tokens": list(entry['passage'].get(
221
+ "en_tokens",
222
+ {
223
+ token: token
224
+ for tokens in entry['passage'].get("tokens", {})
225
+ for token in tokens
226
+ }
227
+ ).values()),
228
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
229
  "en_lang_token_map": [
230
  (int(item[0]), int(item[1]))
231
+ for item
232
+ in entry['passage'].get("en_lang_token_map", {}).items()
233
  ]
234
  },
235
  "mt": {
 
238
  "translation": entry.get("translation", ""),
239
  "translation_sents": entry.get("translation_sents", []),
240
  "translation_probs": entry.get("translation_probs", [[]]),
241
+ "repetitious_translation": entry.get("repetitious_translation", None)
242
  },
243
  "source_lang": entry.get("source_lang", ""),
244
  "source_url": entry.get("source_url", ""),
 
249
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
250
  'lang_answer': qa_pair.get('lang_answer', ''),
251
  'frames': qa_pair.get('frames', []),
252
+ "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
253
+ "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
254
+ "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
255
+ "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
256
  "passage": qa_pair.get('passage', []),
257
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
258
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
259
  }
260
+ for qa_pair
261
+ in entry.get('qa_pairs', [])
262
  ]
263
  }
264
+ for entry
265
+ in example.get("entries", [])
266
  ]
267
  }
268
  id_ += 1
269
+ except:
270
+ print("Error reading file:", filepath)
271
+
272
+
273
+
274
+ # "entries": datasets.features.Sequence(
275
+ # {
276
+ # "qa_pairs": datasets.Sequence(
277
+ # {
278
+ # "question": datasets.Value("string"),
279
+ # "answer": datasets.Value("string"),
280
+ # }
281
+ # )
282
+ # }