Datasets:

ArXiv:
Tags:
HAR
lynn-miller commited on
Commit
1a89f33
·
verified ·
1 Parent(s): 2a46e53

Upload Opportunity.py

Browse files
Files changed (1) hide show
  1. Opportunity.py +124 -0
Opportunity.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Monster-Monash custom downloader"""
15
+
16
+
17
+ import numpy as np
18
+ import os
19
+ import datasets
20
+
21
+
22
+ _DATASET = "Opportunity"
23
+ _SHAPE = (113, 100)
24
+ #_DESCRIPTION = ""
25
+ #_CITATION = ""
26
+ #_HOMEPAGE = ""
27
+ #_LICENSE = ""
28
+
29
+ _URLS = {
30
+ 'data': f"{_DATASET}_X.npy",
31
+ 'labels': f"{_DATASET}_y.npy",
32
+ 'fold_0': "test_indices_fold_0.txt",
33
+ 'fold_1': "test_indices_fold_1.txt",
34
+ 'fold_2': "test_indices_fold_2.txt",
35
+ 'fold_3': "test_indices_fold_3.txt",
36
+ 'fold_4': "test_indices_fold_4.txt",
37
+ }
38
+
39
+
40
+ class Monster(datasets.GeneratorBasedBuilder):
41
+ """Generic Monster class for all downloader."""
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(name="full", version=VERSION, description="All data"),
47
+ datasets.BuilderConfig(name="fold_0", version=VERSION, description="Cross-validation fold 0"),
48
+ datasets.BuilderConfig(name="fold_1", version=VERSION, description="Cross-validation fold 1"),
49
+ datasets.BuilderConfig(name="fold_2", version=VERSION, description="Cross-validation fold 2"),
50
+ datasets.BuilderConfig(name="fold_3", version=VERSION, description="Cross-validation fold 3"),
51
+ datasets.BuilderConfig(name="fold_4", version=VERSION, description="Cross-validation fold 4"),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "full" # By default all data is returned in a single split.
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "X": datasets.Array2D(_SHAPE, "float32"),
60
+ "y": datasets.Value("int64")
61
+ }
62
+ )
63
+ return datasets.DatasetInfo(
64
+ # description=_DESCRIPTION,
65
+ features=features,
66
+ supervised_keys=("X", "y"),
67
+ # homepage=_HOMEPAGE,
68
+ # license=_LICENSE,
69
+ # citation=_CITATION,
70
+ )
71
+
72
+ def _split_generators(self, dl_manager):
73
+ data = dl_manager.download_and_extract(_URLS['data'])
74
+ labels = dl_manager.download_and_extract(_URLS['labels'])
75
+ if self.config.name == "full":
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={
80
+ "data": data,
81
+ "labels": labels,
82
+ "fold": None,
83
+ "split": "all",
84
+ },
85
+ ),
86
+ ]
87
+ else:
88
+ fold = dl_manager.download_and_extract(_URLS[self.config.name])
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={
93
+ "data": data,
94
+ "labels": labels,
95
+ "fold": fold,
96
+ "split": "train",
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={
102
+ "data": data,
103
+ "labels": labels,
104
+ "fold": fold,
105
+ "split": "test"
106
+ },
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, data, labels, fold, split):
111
+ X = np.load(data)
112
+ y = np.load(labels)
113
+ if self.config.name == "full":
114
+ for row in range(y.shape[0]):
115
+ yield(row, {"X": X[row], "y": y[row]})
116
+ else:
117
+ test_indices = np.loadtxt(fold, dtype='int')
118
+ if split == "test":
119
+ for row in test_indices:
120
+ yield(int(row), {"X": X[row], "y": y[row]})
121
+ elif split == "train":
122
+ train_indices = np.delete(np.arange(y.shape[0]), test_indices)
123
+ for row in train_indices:
124
+ yield(int(row), {"X": X[row], "y": y[row]})