Datasets:

Modalities:
Text
Formats:
json
ArXiv:
Libraries:
Datasets
Dask
License:
orionweller commited on
Commit
235690a
·
1 Parent(s): cf94da3

remove python file; use just data files

Browse files
megawika-report-generation.py DELETED
@@ -1,148 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and
2
- # the Johns Hopkins University (JHU) Human Language Technology
3
- # Center of Excellence.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- This file provides a HuggingFace dataset loader implementation for
18
- the JHU/HLTCOE MegaWika dataset, specifically for a report generation
19
- or multi-doc summarization dataset using the raw MegaWika
20
-
21
-
22
- MegaWika is a multi- and crosslingual text dataset containing 30 million
23
- Wikipedia passages with their scraped and cleaned web citations. The
24
- passages span 50 Wikipedias in 50 languages, and the articles in which
25
- the passages were originally embedded are included for convenience. Where
26
- a Wikipedia passage is in a non-English language, an automated English
27
- translation is provided.
28
- """
29
-
30
-
31
- import json
32
- import os
33
-
34
- import datasets
35
-
36
-
37
- _CITATION = """\
38
- @misc{barham2023megawika,
39
- title={MegaWika: Millions of reports and their sources across 50 diverse languages},
40
- author={Samuel Barham and and Weller and Michelle Yuan and Kenton Murray and Mahsa Yarmohammadi and Zhengping Jiang and Siddharth Vashishtha and Alexander Martin and Anqi Liu and Aaron Steven White and Jordan Boyd-Graber and Benjamin Van Durme},
41
- year={2023},
42
- eprint={2307.07049},
43
- archivePrefix={arXiv},
44
- primaryClass={cs.CL}
45
- }
46
- """
47
-
48
- _DESCRIPTION = """\
49
- MegaWika is a multi- and crosslingual text dataset containing 30 million
50
- Wikipedia passages with their scraped and cleaned web citations. The
51
- passages span 50 Wikipedias in 50 languages, and the articles in which
52
- the passages were originally embedded are included for convenience. Where
53
- a Wikipedia passage is in a non-English language, an automated English
54
- translation is provided.
55
- """
56
-
57
- _URL = "https://huggingface.co/datasets/hltcoe/megawika"
58
-
59
-
60
- class MegaWikaReportGenerationConfig(datasets.BuilderConfig):
61
- """BuilderConfig for MegaWikaReportGeneration."""
62
-
63
- def __init__(self, language: str = "en", monolingual: bool = True, iterative: bool = False, **kwargs):
64
- """BuilderConfig for MegaWikaReportGeneration.
65
- """
66
- super(MegaWikaReportGenerationConfig, self).__init__(**kwargs)
67
- self.language = language
68
- self.monolingual = monolingual
69
- self.iterative = iterative
70
-
71
-
72
- class MegaWikaReportGeneration(datasets.GeneratorBasedBuilder):
73
- """The MegaWikaReportGeneration benchmark."""
74
-
75
- BUILDER_CONFIGS = [
76
- MegaWikaReportGenerationConfig(
77
- name="monolingual-section",
78
- monolingual=True,
79
- iterative=False,
80
- ),
81
- MegaWikaReportGenerationConfig(
82
- name="crosslingual-section",
83
- monolingual=False,
84
- iterative=False,
85
- ),
86
- MegaWikaReportGenerationConfig(
87
- name="monolingual-iterative",
88
- monolingual=True,
89
- iterative=True,
90
- ),
91
- MegaWikaReportGenerationConfig(
92
- name="crosslingual-iterative",
93
- monolingual=False,
94
- iterative=True,
95
- ),
96
- ]
97
-
98
- def _info(self):
99
- features = {}
100
- features["id"] = datasets.Value("string")
101
- features["num_docs"] = datasets.Value("int32")
102
- features["title"] = datasets.Value("string")
103
- features["intro"] = datasets.Value("string")
104
- features["section_name"] = datasets.Value("string")
105
- features["gold_section_text"] = datasets.Value("string")
106
- features["citations"] = datasets.features.Sequence(datasets.Value("string"))
107
- features["previous_text"] = datasets.Value("string")
108
- return datasets.DatasetInfo(
109
- description=_DESCRIPTION,
110
- features=datasets.Features(features),
111
- homepage=_URL,
112
- citation=_CITATION,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
- dl_dir = dl_manager.download_and_extract(self.config.url) or ""
117
- dl_dir = os.path.join(dl_dir, "mono" if self.config.monolingual else "cl", "iterative" if self.config.iterative else "section", self.config.language)
118
- return [
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TRAIN,
121
- gen_kwargs={
122
- "data_file": os.path.join(dl_dir, "train.jsonl"),
123
- "split": datasets.Split.TRAIN,
124
- },
125
- ),
126
- datasets.SplitGenerator(
127
- name=datasets.Split.VALIDATION,
128
- gen_kwargs={
129
- "data_file": os.path.join(dl_dir, "dev.jsonl"),
130
- "split": datasets.Split.VALIDATION,
131
- },
132
- ),
133
- datasets.SplitGenerator(
134
- name=datasets.Split.TEST,
135
- gen_kwargs={
136
- "data_file": os.path.join(dl_dir, "test.jsonl"),
137
- "split": datasets.Split.TEST,
138
- },
139
- ),
140
- ]
141
-
142
- def _generate_examples(self, data_file, split):
143
- with open(data_file, encoding="utf-8") as f:
144
- for idx, line in enumerate(f):
145
- row = json.loads(line)
146
- if "previous_text" not in row:
147
- row["previous_text"] = ""
148
- yield idx, row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mono/iterative/en/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29a7bc1e5dd4268093b3de22909f0f261056e14b37a9bb19844aebbf85e224b3
3
- size 1222489800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43f8e469190e6ce0d226915ecc2e3ffb7d1a0e61cf52ead4f7e41f539f2d8470
3
+ size 873230444
mono/iterative/en/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d569064c22c35d8da8d9515edecba544cc9be2251909cc093f90485a2b59017
3
- size 2160289682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:713cd7f93f24feec71bddc1d7966b7f0c6fe1185416dc042caad6361b749f46c
3
+ size 126108510
mono/{section/en/dev.jsonl → iterative/en/validation.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70f91ea5a482c9d940912d75349f47ef2c105b0e45255920a93aeabc32847653
3
- size 956995299
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ccb8cac72d596ff1665f1517072e9075e4d99214209dd2808a887a62c03f121
3
+ size 824844123
mono/section/en/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b3e365c65f6e4b31abf900c1438163dd496afc976b274d025a5905d1686103a
3
- size 1012541033
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4921c111088c99738fcd752a01c047f15954841e69175732a00e9ce8f250a3c5
3
+ size 1013483345
mono/section/en/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:254d18dc975a8d5f7b3d52abf7229166ddad961286331504dd5862c3a53a100a
3
- size 3289623081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93f4660e0ce7e76b50479dcda11e69301ed4bfe4298311c1ed2aaddf49a63f49
3
+ size 237183920
mono/{iterative/en/dev.jsonl → section/en/validation.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f6d47397b49fbe4465fd1aa553e31a73b9a7f7417f5f0c736973760313ebac2
3
- size 1155159661
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c17f2c634d58233f96b493923f976b3c586c68e651ef441219e076d730ad86a
3
+ size 957890697