dreamerdeo commited on
Commit
24a463d
·
1 Parent(s): 5e1d36f
Files changed (1) hide show
  1. finqa.py +82 -0
finqa.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables."""
15
+
16
+ import os
17
+
18
+ import datasets
19
+
20
+ import json
21
+
22
+
23
+ class FinQA(datasets.GeneratorBasedBuilder):
24
+
25
+ VERSION = datasets.Version("1.0.0")
26
+
27
+ def _info(self):
28
+ features = datasets.Features(
29
+ {
30
+ # "filename": datasets.Value("string"),
31
+ "id": datasets.Value("string"),
32
+ "post_text": datasets.features.Sequence(datasets.Value("string")),
33
+ "pre_text": datasets.features.Sequence(datasets.Value("string")),
34
+ "question": datasets.Value("string"),
35
+ "answers": datasets.Value("string"),
36
+ "table": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
37
+ }
38
+ )
39
+ return datasets.DatasetInfo(
40
+ features=features,
41
+ )
42
+
43
+ def _split_generators(self, dl_manager):
44
+ train_file = "train.json"
45
+ dev_file = "dev.json"
46
+ test_file = "test.json"
47
+
48
+ root_dir = 'finqa'
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=datasets.Split.TRAIN,
52
+ # These kwargs will be passed to _generate_examples
53
+ gen_kwargs={"main_filepath": os.path.join(root_dir, train_file), "root_dir": root_dir},
54
+ ),
55
+ datasets.SplitGenerator(
56
+ name=datasets.Split.TEST,
57
+ # These kwargs will be passed to _generate_examples
58
+ gen_kwargs={"main_filepath": os.path.join(root_dir, dev_file), "root_dir": root_dir},
59
+ ),
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.VALIDATION,
62
+ # These kwargs will be passed to _generate_examples
63
+ gen_kwargs={"main_filepath": os.path.join(root_dir, test_file), "root_dir": root_dir},
64
+ ),
65
+ ]
66
+
67
+
68
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
69
+ def _generate_examples(self, main_filepath, root_dir):
70
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
71
+ with open(main_filepath, encoding="utf-8") as f:
72
+ # skip the first line since it is the tsv header
73
+ lines = json.load(f)
74
+ for idx, example in enumerate(lines):
75
+ yield idx, { \
76
+ "id": example['id'], \
77
+ "post_text": example['post_text'], \
78
+ "pre_text": example['pre_text'], \
79
+ "question": example['qa']['question'], \
80
+ "answers": example['qa']['answer'], \
81
+ "table": example['table']
82
+ }