lara-martin commited on
Commit
4f16171
·
verified ·
1 Parent(s): 33cfccf

Create FIREBALL.py

Browse files
Files changed (1) hide show
  1. FIREBALL.py +236 -0
FIREBALL.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import jsonlines
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ # TODO: Add BibTeX citation
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """\
29
+ @inproceedings{Zhu2023FIREBALL,
30
+ title={{FIREBALL: A Dataset of Dungeons and Dragons Actual-Play with Structured Game State Information}},
31
+ author={Zhu, Andrew and Aggarwal, Karmanya and Feng, Alexander and Martin, Lara J. and Callison-Burch, Chris},
32
+ year={2023},
33
+ booktitle={Annual Meeting of the Association for Computational Linguistics (ACL)},
34
+ month={7},
35
+ url={https://aclanthology.org/2023.acl-long.229/},
36
+ address={Toronto, Canada},
37
+ pages={4171--4193},
38
+ publisher={ACL},
39
+ doi={10.18653/v1/2023.acl-long.229}
40
+ }
41
+ """
42
+
43
+
44
+ _DESCRIPTION = """\
45
+ FIREBALL Dungeons & Dragons data with narrative and Avrae scripting commands.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/zhudotexe/FIREBALL"
49
+
50
+ # TODO: Add the licence for the dataset here if you can find it
51
+ _LICENSE = "cc-by-4.0"
52
+
53
+
54
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
+ _URLS = {
57
+ "FIREBALL": "https://huggingface.co/datasets/lara-martin/FIREBALL/tree/main/filtered",
58
+ }
59
+
60
+
61
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
+ class NewDataset(datasets.GeneratorBasedBuilder):
63
+ """TODO: Short description of my dataset."""
64
+
65
+ VERSION = datasets.Version("1.0.0")
66
+
67
+ # This is an example of a dataset with multiple configurations.
68
+ # If you don't want/need to define several sub-sets in your dataset,
69
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
70
+
71
+ # If you need to make complex sub-parts in the datasets with configurable options
72
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
73
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
74
+
75
+ # You will be able to load one or the other configurations in the following list with
76
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
77
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
78
+ BUILDER_CONFIGS = [
79
+ datasets.BuilderConfig(name="FIREBALL", version=VERSION, description="This part of my dataset covers a first domain"),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "FIREBALL" # It's not mandatory to have a default configuration. Just use one if it make sense.
83
+
84
+ def _info(self):
85
+
86
+ features = datasets.Features(
87
+ {
88
+ "speaker_id": datasets.Value('int64'),
89
+ "before_utterances": datasets.Sequence(datasets.Value('string')),
90
+ 'combat_state_before': datasets.Sequence(
91
+ {
92
+ 'name': datasets.Value(dtype='string'),
93
+ 'hp': datasets.Value(dtype='string'),
94
+ 'class': datasets.Value(dtype='string'),
95
+ 'race': datasets.Value(dtype='string'),
96
+ 'attacks': datasets.Value(dtype='string'),
97
+ 'spells': datasets.Value(dtype='string'),
98
+ 'actions': datasets.Value(dtype='string'),
99
+ 'effects': datasets.Value(dtype='string'),
100
+ 'description': datasets.Value(dtype='string'),
101
+ 'controller_id': datasets.Value(dtype='string')
102
+ }
103
+ ), #list of dictionaries
104
+ 'current_actor': {
105
+ 'name': datasets.Value(dtype='string'),
106
+ 'hp': datasets.Value(dtype='string'),
107
+ 'class': datasets.Value(dtype='string'),
108
+ 'race': datasets.Value(dtype='string'),
109
+ 'attacks': datasets.Value(dtype='string'),
110
+ 'spells': datasets.Value(dtype='string'),
111
+ 'actions': datasets.Value(dtype='string'),
112
+ 'effects': datasets.Value(dtype='string'),
113
+ 'description': datasets.Value(dtype='string'),
114
+ 'controller_id': datasets.Value(dtype='string')
115
+ }, #dictionary
116
+ 'commands_norm': datasets.value('string'),
117
+ 'automation_results': datasets.value('string'),
118
+ 'caster_after': {
119
+ 'name': datasets.Value(dtype='string'),
120
+ 'hp': datasets.Value(dtype='string'),
121
+ 'class': datasets.Value(dtype='string'),
122
+ 'race': datasets.Value(dtype='string'),
123
+ 'attacks': datasets.Value(dtype='string'),
124
+ 'spells': datasets.Value(dtype='string'),
125
+ 'actions': datasets.Value(dtype='string'),
126
+ 'effects': datasets.Value(dtype='string'),
127
+ 'description': datasets.Value(dtype='string'),
128
+ 'controller_id': datasets.Value(dtype='string')
129
+ }, #dictionary
130
+ 'targets_after': datasets.Sequence(
131
+ {
132
+ 'name': datasets.Value(dtype='string'),
133
+ 'hp': datasets.Value(dtype='string'),
134
+ 'class': datasets.Value(dtype='string'),
135
+ 'race': datasets.Value(dtype='string'),
136
+ 'attacks': datasets.Value(dtype='string'),
137
+ 'spells': datasets.Value(dtype='string'),
138
+ 'actions': datasets.Value(dtype='string'),
139
+ 'effects': datasets.Value(dtype='string'),
140
+ 'description': datasets.Value(dtype='string'),
141
+ 'controller_id': datasets.Value(dtype='string')
142
+ }
143
+ ), #list of dictionaries
144
+ 'combat_state_after': datasets.Sequence(
145
+ {
146
+ 'name': datasets.Value(dtype='string'),
147
+ 'hp': datasets.Value(dtype='string'),
148
+ 'class': datasets.Value(dtype='string'),
149
+ 'race': datasets.Value(dtype='string'),
150
+ 'attacks': datasets.Value(dtype='string'),
151
+ 'spells': datasets.Value(dtype='string'),
152
+ 'actions': datasets.Value(dtype='string'),
153
+ 'effects': datasets.Value(dtype='string'),
154
+ 'description': datasets.Value(dtype='string'),
155
+ 'controller_id': datasets.Value(dtype='string')
156
+ }
157
+ ), #list of dictionaries
158
+ 'after_utterances': datasets.Sequence(datasets.Value('string')),
159
+ 'utterance_history': datasets.Sequence(datasets.Value('string')),
160
+ 'before_idxs': datasets.Sequence(datasets.Value('int16')),
161
+ 'before_state_idx': datasets.Value('int16'),
162
+ 'command_idxs': datasets.Sequence(datasets.Value('int16')),
163
+ 'after_state_idx': datasets.Value('int16'),
164
+ 'after_idxs': datasets.Sequence(datasets.Value('int16')),
165
+ 'embed_idxs': datasets.Sequence(datasets.Value('int16'))
166
+ }
167
+ )
168
+ return datasets.DatasetInfo(
169
+ # This is the description that will appear on the datasets page.
170
+ description=_DESCRIPTION,
171
+ # This defines the different columns of the dataset and their types
172
+ features=features, # Here we define them above because they are different between the two configurations
173
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
174
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
175
+ # supervised_keys=("sentence", "label"),
176
+ # Homepage of the dataset for documentation
177
+ homepage=_HOMEPAGE,
178
+ # License for the dataset if available
179
+ license=_LICENSE,
180
+ # Citation for the dataset
181
+ citation=_CITATION,
182
+ )
183
+
184
+ # def _split_generators(self, dl_manager):
185
+ # # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
186
+ # # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
187
+
188
+ # # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
189
+ # # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
190
+ # # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
191
+ # urls = _URLS[self.config.name]
192
+ # data_dir = dl_manager.download_and_extract(urls)
193
+ # for root,dirs,files in os.walk(data_dir):
194
+ # for f in files:
195
+ # data = os.path.join()
196
+ # return [
197
+ # datasets.SplitGenerator(
198
+ # name=datasets.Split.TRAIN,
199
+ # # These kwargs will be passed to _generate_examples
200
+ # gen_kwargs={
201
+ # "filepath": os.path.join(data_dir, "train.jsonl"),
202
+ # "split": "train",
203
+ # },
204
+ # ),
205
+ # ]
206
+
207
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
208
+ def _generate_examples(self, filepath, split):
209
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
210
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
211
+ key = 0
212
+ for root,dirs,files in os.walk(filepath):
213
+ for file in files:
214
+ with jsonlines.open(os.path.join(root,file)) as f:
215
+ for data in f:
216
+ # Yields examples as (key, example) tuples
217
+ yield key, {
218
+ "speaker_id": data["speaker_id"],
219
+ "before_utterances": data["before_utterances"],
220
+ 'combat_state_before': data['combat_state_before'],
221
+ 'current_actor': data["current_actor"],
222
+ 'commands_norm': data['commands_norm'],
223
+ 'automation_results': data['automation_results'],
224
+ 'caster_after': data['caster_after'],
225
+ 'targets_after': data['targets_after'],
226
+ 'combat_state_after': data['combat_state_after'],
227
+ 'after_utterances': data['after_utterances'],
228
+ 'utterance_history': data['utterance_history'],
229
+ 'before_idxs': data['before_idxs'],
230
+ 'before_state_idx': data['before_state_idx'],
231
+ 'command_idxs': data['command_idxs'],
232
+ 'after_state_idx': data['after_state_idx'],
233
+ 'after_idxs': data['after_idxs'],
234
+ 'embed_idxs': data['embed_idxs']
235
+ }
236
+ key+=1