cw1521 commited on
Commit
4d75f4b
·
1 Parent(s): 7b039f9

Upload 2 files

Browse files
Files changed (2) hide show
  1. build_dataset.py +15 -9
  2. file_list.json +5 -1
build_dataset.py CHANGED
@@ -51,7 +51,7 @@ _LICENSE = ""
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _URLS = {
54
- "first_domain": "https://huggingface.co/datasets/cw1521/ember2018-malware/blob/main/data.zip"
55
  }
56
 
57
 
@@ -133,15 +133,19 @@ class NewDataset(datasets.GeneratorBasedBuilder):
133
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
134
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
135
  urls = _URLS[self.config.name]
136
- data_dir = dl_manager.download_and_extract(urls)
137
- print(data_dir)
138
  file_list = get_file_list()
 
 
 
 
 
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
142
  # These kwargs will be passed to _generate_examples
143
  gen_kwargs={
144
- "filepaths": [os.path.join(data_dir, f"data/{file}") for file in file_list["train"]],
 
145
  "split": "train",
146
  },
147
  ),
@@ -156,21 +160,23 @@ class NewDataset(datasets.GeneratorBasedBuilder):
156
  datasets.SplitGenerator(
157
  name=datasets.Split.TEST,
158
  # These kwargs will be passed to _generate_examples
159
- # [os.path.join(data_dir, file) for file in file_list["test"]],
160
  gen_kwargs={
161
- "filepaths": [os.path.join(data_dir, f"data/{file}") for file in file_list["test"]],
 
162
  "split": "test"
163
  },
164
  ),
165
  ]
166
 
167
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
168
- def _generate_examples(self, filepaths, split):
169
  key = 0
170
- for path in filepaths:
171
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
172
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
173
- with open(path, encoding="utf-8") as f:
 
 
174
  data_list = json.load(f)
175
  for data in data_list["data"]:
176
  key += 1
 
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _URLS = {
54
+ "first_domain": "https://huggingface.co/datasets/cw1521/ember2018-malware/blob/main/data/"
55
  }
56
 
57
 
 
133
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
134
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
135
  urls = _URLS[self.config.name]
 
 
136
  file_list = get_file_list()
137
+ file_urls = {
138
+ "train": [f"{urls[0]}/{file}" for file in file_list["train"]],
139
+ "test": [f"{urls[0]}/{file}" for file in file_list["test"]]
140
+ }
141
+ data_dir = dl_manager.download_and_extract(file_urls)
142
  return [
143
  datasets.SplitGenerator(
144
  name=datasets.Split.TRAIN,
145
  # These kwargs will be passed to _generate_examples
146
  gen_kwargs={
147
+ "filepaths": file_list["train"],
148
+ "local_datafiles": data_dir["train"],
149
  "split": "train",
150
  },
151
  ),
 
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TEST,
162
  # These kwargs will be passed to _generate_examples
 
163
  gen_kwargs={
164
+ "filepaths": file_list["train"],
165
+ "local_datafiles": data_dir["train"],
166
  "split": "test"
167
  },
168
  ),
169
  ]
170
 
171
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
172
+ def _generate_examples(self, filenames, local_datafiles):
173
  key = 0
174
+ for id, path in enumerate(filenames):
175
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
176
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
177
+
178
+ local_path = os.path.join(local_datafiles[id], path)
179
+ with open(local_path, encoding="utf-8") as f:
180
  data_list = json.load(f)
181
  for data in data_list["data"]:
182
  key += 1
file_list.json CHANGED
@@ -1 +1,5 @@
1
- {"train": ["ember2018_train_1.jsonl", "ember2018_train_2.jsonl", "ember2018_train_3.jsonl", "ember2018_train_4.jsonl", "ember2018_train_5.jsonl", "ember2018_train_6.jsonl", "ember2018_train_7.jsonl", "ember2018_train_8.jsonl", "ember2018_train_9.jsonl", "ember2018_train_10.jsonl", "ember2018_train_11.jsonl", "ember2018_train_12.jsonl", "ember2018_train_13.jsonl", "ember2018_train_14.jsonl", "ember2018_train_15.jsonl", "ember2018_train_16.jsonl", "ember2018_train_17.jsonl", "ember2018_train_18.jsonl", "ember2018_train_19.jsonl", "ember2018_train_20.jsonl", "ember2018_train_21.jsonl", "ember2018_train_22.jsonl", "ember2018_train_23.jsonl", "ember2018_train_24.jsonl", "ember2018_train_25.jsonl", "ember2018_train_26.jsonl", "ember2018_train_27.jsonl", "ember2018_train_28.jsonl", "ember2018_train_29.jsonl", "ember2018_train_30.jsonl", "ember2018_train_31.jsonl", "ember2018_train_32.jsonl", "ember2018_train_33.jsonl", "ember2018_train_34.jsonl", "ember2018_train_35.jsonl", "ember2018_train_36.jsonl", "ember2018_train_37.jsonl", "ember2018_train_38.jsonl", "ember2018_train_39.jsonl", "ember2018_train_40.jsonl", "ember2018_train_41.jsonl", "ember2018_train_42.jsonl", "ember2018_train_43.jsonl", "ember2018_train_44.jsonl", "ember2018_train_45.jsonl", "ember2018_train_46.jsonl", "ember2018_train_47.jsonl", "ember2018_train_48.jsonl", "ember2018_train_49.jsonl", "ember2018_train_50.jsonl", "ember2018_train_51.jsonl", "ember2018_train_52.jsonl", "ember2018_train_53.jsonl", "ember2018_train_54.jsonl", "ember2018_train_55.jsonl", "ember2018_train_56.jsonl", "ember2018_train_57.jsonl", "ember2018_train_58.jsonl", "ember2018_train_59.jsonl", "ember2018_train_60.jsonl", "ember2018_train_61.jsonl", "ember2018_train_62.jsonl", "ember2018_train_63.jsonl", "ember2018_train_64.jsonl", "ember2018_train_65.jsonl", "ember2018_train_66.jsonl", "ember2018_train_67.jsonl", "ember2018_train_68.jsonl", "ember2018_train_69.jsonl", "ember2018_train_70.jsonl", "ember2018_train_71.jsonl", "ember2018_train_72.jsonl", "ember2018_train_73.jsonl", "ember2018_train_74.jsonl", "ember2018_train_75.jsonl", "ember2018_train_76.jsonl", "ember2018_train_77.jsonl", "ember2018_train_78.jsonl", "ember2018_train_79.jsonl", "ember2018_train_80.jsonl"], "test": ["ember2018_test_1.jsonl", "ember2018_test_2.jsonl", "ember2018_test_3.jsonl", "ember2018_test_4.jsonl", "ember2018_test_5.jsonl", "ember2018_test_6.jsonl", "ember2018_test_7.jsonl", "ember2018_test_8.jsonl", "ember2018_test_9.jsonl", "ember2018_test_10.jsonl", "ember2018_test_11.jsonl", "ember2018_test_12.jsonl", "ember2018_test_13.jsonl", "ember2018_test_14.jsonl", "ember2018_test_15.jsonl", "ember2018_test_16.jsonl", "ember2018_test_17.jsonl", "ember2018_test_18.jsonl", "ember2018_test_19.jsonl", "ember2018_test_20.jsonl"]}
 
 
 
 
 
1
+ {
2
+ "train": ["ember2018_train_1.jsonl"],
3
+
4
+ "test": ["ember2018_test_1.jsonl"]
5
+ }