Datasets:
GEM
/

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 7,709 Bytes
689a0b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
"""
Script creates mlb data for gem benchmark
"""
import argparse
import os
import json
import logging
logging.basicConfig(level=logging.INFO)


def sort_files_key(x):
    if "train" in x:
        file_index = int(x[5:7].strip("."))  # get the index of the train file
    else:
        file_index = -1  # valid and test
    return file_index


def filter_summaries(summary_entry, seen_output, test_seen_output):
    match_words = {"rain", "rains", "rained", "snow"}
    filter = False
    if len(summary_entry["summary"]) < 100:
        filter = True
    elif 100 < len(summary_entry["summary"]) < 300:
        if len(match_words.intersection(set(summary_entry["summary"]))) > 0:
            filter = True
    elif "_".join(summary_entry["summary"][:50]) in seen_output:  # retaining only one instance
        filter = True
    elif "_".join(summary_entry["summary"][:50]) in test_seen_output:  # retaining only one instance
        filter = True
    return filter


def replace_Carmona(obj):
    def decode_dict(a_dict):
        for key, value in a_dict.items():
            try:
                if value == "Roberto Hernandez":
                    a_dict[key] = value.replace("Roberto Hernandez", "Fausto Carmona")
            except AttributeError:
                pass
        return a_dict
    return json.loads(json.dumps(obj), object_hook=decode_dict)


def process(input_folder, type, output_folder):
    output_name_map = {"train": "train", "valid": "validation", "test": "test"}
    output_jsonl = open(os.path.join(output_folder, output_name_map[type] + ".jsonl"), mode="w", encoding="utf-8")
    file_list = os.listdir(input_folder)
    sorted_file_list = sorted(file_list, key=sort_files_key)
    seen_output = set()
    test_seen_output = set()
    if type == "train":
        for filename in sorted_file_list:
            if "valid" in filename or "test" in filename:
                print("test filename", filename)
                json_file = open(os.path.join(input_folder, filename), mode="r", encoding="utf-8")
                data = json.load(json_file)
                for entry_index, entry in enumerate(data):
                    test_seen_output.add("_".join(entry["summary"][:50]))

    index = 1
    for filename in sorted_file_list:
        if type in filename:
            print("filename", filename)
            json_file = open(os.path.join(input_folder, filename), mode="r", encoding="utf-8")
            data = json.load(json_file)
            for entry_index, entry in enumerate(data):
                logging.debug("instance %s", entry_index)
                if type == "train":
                    if filter_summaries(entry, seen_output, test_seen_output):
                        continue
                seen_output.add("_".join(entry["summary"][:50]))

                summary = entry["summary"]
                summ = " ".join(summary)
                if "Fausto Carmona" in summ:
                    entry = replace_Carmona(entry)
                gem_id = "GEM-mlb_data_to_text-"+ output_name_map[type] + "-" + str(index)
                index += 1

                updated_entry = {}
                updated_entry["home_name"] = entry["home_name"]
                updated_entry["vis_name"] = entry["vis_name"]
                updated_entry["home_city"] = entry["home_city"]
                updated_entry["vis_city"] = entry["vis_city"]
                updated_entry["summary"] = entry["summary"]
                updated_entry["summary_eval"] = " ".join(entry["summary"]).replace("*NEWPARAGRAPH* ", "")
                updated_entry["day"] = entry["day"]
                updated_entry["gem_id"] = gem_id
                updated_entry["box_score"] = []
                box_score_keys = entry["box_score"].keys()
                construct_box_score(box_score_keys, entry["box_score"], updated_entry["box_score"])
                assert len(updated_entry["box_score"]) == len(
                    entry["box_score"][list(box_score_keys)[-1]])  # checking sizes match
                updated_entry["play_by_play"] = []
                construct_play_by_play(entry["play_by_play"], updated_entry["play_by_play"])
                updated_entry["vis_line"] = {}
                updated_entry["home_line"] = {}
                for attrib in ["team_runs", "result", "team_hits", "team_name", "team_errors", "team_city"]:
                    updated_entry["vis_line"][attrib] = entry["vis_line"][attrib]
                    updated_entry["home_line"][attrib] = entry["home_line"][attrib]
                updated_entry["vis_line"]["innings"] = []
                construct_inning_scores(entry["vis_line"]["innings"], updated_entry["vis_line"]["innings"])
                updated_entry["home_line"]["innings"] = []
                construct_inning_scores(entry["home_line"]["innings"], updated_entry["home_line"]["innings"])
                json.dump(updated_entry, output_jsonl, ensure_ascii=False)
                output_jsonl.write('\n')
                if entry_index % 50 == 0:
                    print("entry_index", entry_index)
    output_jsonl.close()


def construct_box_score(box_score_keys, box_score, box_score_list):
    player_index = 0
    while True:
        box_score_object = {}
        for _box_score_key in box_score_keys:
            if str(player_index) not in box_score[_box_score_key]:
                return
            box_score_object[_box_score_key] = box_score[_box_score_key][str(player_index)]
        box_score_list.append(box_score_object)
        player_index += 1


def construct_play_by_play(play_by_play, play_by_play_list):
    inning_index = 1
    while True:
        if str(inning_index) not in play_by_play:
            return
        play_by_play_object = {}
        for side in ["top", "bottom"]:
            if side in play_by_play[str(inning_index)]:
                play_by_play_object[side] = []
                for play in play_by_play[str(inning_index)][side]:
                    play_object = construct_play_object(play)
                    play_by_play_object[side].append(play_object)

        play_by_play_object["inning"] = inning_index
        play_by_play_list.append(play_by_play_object)
        inning_index += 1


def construct_play_object(play):
    play_object = {}
    for attrib in ["runs", "pitcher", "o", "b", "s", "batter", "event", "event2", "home_team_runs", "away_team_runs",
                   "rbi", "error_runs", "fielder_error"]:
        play_object[attrib] = "N/A"
        if attrib in play:
            play_object[attrib] = play[attrib]
    for attrib in ["scorers", "b1", "b2", "b3"]:
        play_object[attrib] = ["N/A"]
        if attrib in play:
            play_object[attrib] = play[attrib]
    return play_object


def construct_inning_scores(innings, innings_list):
    inning_index = 1
    while True:
        if "inn" + str(inning_index) not in innings:
            return
        innings_list.append({"inn": inning_index, "runs": innings["inn" + str(inning_index)]})
        inning_index += 1


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Script for constructing the mlb dataset in GEM format')
    parser.add_argument('-json_root', type=str,
                        help='path of json root; download from '
                             'https://drive.google.com/drive/folders/1G4iIE-02icAU2-5skvLlTEPWDQQj1ss4', default=None)
    parser.add_argument('-output_folder', type=str,
                        help='path of output file', default=None)
    parser.add_argument('-dataset_type', type=str,
                        help='type of dataset', default=None)
    args = parser.parse_args()
    process(args.json_root, args.dataset_type, args.output_folder)