Datasets:
GEM
/

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
ratishsp commited on
Commit
5efe2da
1 Parent(s): 315c458

data loader script

Browse files
Files changed (1) hide show
  1. mlb_data_to_text.py +146 -0
mlb_data_to_text.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{puduppully-etal-2019-data,
27
+ title = "Data-to-text Generation with Entity Modeling",
28
+ author = "Puduppully, Ratish and
29
+ Dong, Li and
30
+ Lapata, Mirella",
31
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
32
+ month = jul,
33
+ year = "2019",
34
+ address = "Florence, Italy",
35
+ publisher = "Association for Computational Linguistics",
36
+ url = "https://www.aclweb.org/anthology/P19-1195",
37
+ doi = "10.18653/v1/P19-1195",
38
+ pages = "2023--2035",
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ The MLB dataset for data to text generation contains Major League Baseball games statistics and
44
+ their human-written summaries.
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/ratishsp/mlb-data-scripts"
48
+
49
+ _LICENSE = ""
50
+
51
+ _URLs = {
52
+ "train": "train.jsonl",
53
+ "validation": "validation.jsonl",
54
+ "test": "test.jsonl"
55
+ }
56
+
57
+
58
+ class MlbDataToText(datasets.GeneratorBasedBuilder):
59
+ """MLB dataset for data to text generation"""
60
+
61
+ VERSION = datasets.Version("1.1.0")
62
+
63
+ def _info(self):
64
+ features = datasets.Features(
65
+ {
66
+ "home_name": datasets.Value("string"),
67
+ "box_score": dict,
68
+ "home_city": datasets.Value("string"),
69
+ "vis_name": datasets.Value("string"),
70
+ "play_by_play": dict,
71
+ "vis_line": dict,
72
+ "vis_city": datasets.Value("string"),
73
+ "day": datasets.Value("string"),
74
+ "home_line": dict,
75
+ "summary": list,
76
+ "gem_id": datasets.Value("string")
77
+ }
78
+ )
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ license=_LICENSE,
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ """Returns SplitGenerators."""
90
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
91
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
92
+
93
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
94
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
95
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
96
+ data_dir = dl_manager.download_and_extract(_URLs)
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ # These kwargs will be passed to _generate_examples
101
+ gen_kwargs={
102
+ "filepath": data_dir["train"],
103
+ "split": "train",
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={
110
+ "filepath": data_dir["test"],
111
+ "split": "test"
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": data_dir["validation"],
119
+ "split": "validation",
120
+ },
121
+ ),
122
+ ]
123
+
124
+ def _generate_examples(
125
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
126
+ ):
127
+ """ Yields examples as (key, example) tuples. """
128
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
129
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
130
+
131
+ with open(filepath, encoding="utf-8") as f:
132
+ for id_, row in enumerate(f):
133
+ data = json.loads(row)
134
+ yield id_, {
135
+ "home_name": data["home_name"],
136
+ "box_score": data["box_score"],
137
+ "home_city": data["home_city"],
138
+ "vis_name": data["vis_name"],
139
+ "play_by_play": data["play_by_play"],
140
+ "vis_line": data["vis_line"],
141
+ "vis_city": data["vis_city"],
142
+ "day": data["day"],
143
+ "home_line": data["home_line"],
144
+ "summary": data["summary"],
145
+ "gem_id": data["gem_id"]
146
+ }