Datasets:
Upload build_dataset.py
Browse files- build_dataset.py +10 -9
build_dataset.py
CHANGED
@@ -59,7 +59,7 @@ class EMBERConfig(datasets.GeneratorBasedBuilder):
|
|
59 |
"x": datasets.features.Sequence(
|
60 |
datasets.Value("float32")
|
61 |
),
|
62 |
-
"y": datasets.Value("
|
63 |
"appeared": datasets.Value("string"),
|
64 |
"avclass": datasets.Value("string"),
|
65 |
"subset": datasets.Value("string"),
|
@@ -74,7 +74,7 @@ class EMBERConfig(datasets.GeneratorBasedBuilder):
|
|
74 |
"x": datasets.features.Sequence(
|
75 |
datasets.Value("float32")
|
76 |
),
|
77 |
-
"y": datasets.Value("
|
78 |
"appeared": datasets.Value("string"),
|
79 |
"avclass": datasets.Value("string"),
|
80 |
"subset": datasets.Value("string"),
|
@@ -120,20 +120,21 @@ class EMBERConfig(datasets.GeneratorBasedBuilder):
|
|
120 |
def _generate_examples(self, filepaths, split):
|
121 |
key = 0
|
122 |
for id, filepath in enumerate(filepaths[split]):
|
|
|
123 |
with open(filepath[id], encoding="utf-8") as f:
|
124 |
data_list = json.load(f)
|
125 |
for data in data_list:
|
126 |
-
key += 1
|
127 |
if self.config.name == "text_classification":
|
|
|
128 |
yield key, {
|
129 |
"input": data["input"],
|
130 |
"label": data["label"],
|
131 |
-
"x": data["x"],
|
132 |
-
"y": data["y"],
|
133 |
-
"appeared": data["appeared"],
|
134 |
-
"avclass": data["avclass"],
|
135 |
-
"subset": data["subset"],
|
136 |
-
"sha256": data["sha256"]
|
137 |
}
|
138 |
else:
|
139 |
yield key, {
|
|
|
59 |
"x": datasets.features.Sequence(
|
60 |
datasets.Value("float32")
|
61 |
),
|
62 |
+
"y": datasets.Value("string"),
|
63 |
"appeared": datasets.Value("string"),
|
64 |
"avclass": datasets.Value("string"),
|
65 |
"subset": datasets.Value("string"),
|
|
|
74 |
"x": datasets.features.Sequence(
|
75 |
datasets.Value("float32")
|
76 |
),
|
77 |
+
"y": datasets.Value("string"),
|
78 |
"appeared": datasets.Value("string"),
|
79 |
"avclass": datasets.Value("string"),
|
80 |
"subset": datasets.Value("string"),
|
|
|
120 |
def _generate_examples(self, filepaths, split):
|
121 |
key = 0
|
122 |
for id, filepath in enumerate(filepaths[split]):
|
123 |
+
key += 1
|
124 |
with open(filepath[id], encoding="utf-8") as f:
|
125 |
data_list = json.load(f)
|
126 |
for data in data_list:
|
|
|
127 |
if self.config.name == "text_classification":
|
128 |
+
data.remove
|
129 |
yield key, {
|
130 |
"input": data["input"],
|
131 |
"label": data["label"],
|
132 |
+
# "x": data["x"],
|
133 |
+
# "y": data["y"],
|
134 |
+
# "appeared": data["appeared"],
|
135 |
+
# "avclass": data["avclass"],
|
136 |
+
# "subset": data["subset"],
|
137 |
+
# "sha256": data["sha256"]
|
138 |
}
|
139 |
else:
|
140 |
yield key, {
|