cw1521 commited on
Commit
d5a13f8
·
1 Parent(s): ec62ee2

Upload build_dataset.py

Browse files
Files changed (1) hide show
  1. build_dataset.py +14 -58
build_dataset.py CHANGED
@@ -25,63 +25,36 @@ def get_file_list():
25
  file_list = json.load(f)
26
  return file_list
27
 
28
- # Find for instance the citation on arxiv or on the dataset repo/website
29
  _CITATION = """\
30
  @InProceedings{huggingface:dataset,
31
  title = {Ember2018},
32
- author={huggingface, Inc.
33
  },
34
  year={2023}
35
  }
36
  """
37
 
38
- # TODO: Add description of the dataset here
39
- # You can copy an official description
40
  _DESCRIPTION = """\
41
- This new dataset is from the EMBER 2018 dataset
42
  """
43
-
44
- # TODO: Add a link to an official homepage for the dataset here
45
  _HOMEPAGE = "https://github.com/elastic/ember"
46
-
47
- # TODO: Add the licence for the dataset here if you can find it
48
  _LICENSE = ""
49
-
50
- # TODO: Add link to the official dataset URLs here
51
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _URLS = {
54
- "first_domain": "https://huggingface.co/datasets/cw1521/ember2018-malware/blob/main/data/"
55
  }
56
 
57
 
58
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
59
- class NewDataset(datasets.GeneratorBasedBuilder):
60
- """TODO: Short description of my dataset."""
61
-
62
  VERSION = datasets.Version("1.1.0")
63
-
64
- # This is an example of a dataset with multiple configurations.
65
- # If you don't want/need to define several sub-sets in your dataset,
66
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
67
-
68
- # If you need to make complex sub-parts in the datasets with configurable options
69
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
70
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
71
-
72
- # You will be able to load one or the other configurations in the following list with
73
- # data = datasets.load_dataset('my_dataset', 'first_domain')
74
- # data = datasets.load_dataset('my_dataset', 'second_domain')
75
  BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
77
- datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
78
  ]
79
 
80
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
81
 
82
  def _info(self):
83
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
84
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
85
  features = datasets.Features(
86
  {
87
  "x": datasets.features.Sequence(
@@ -95,7 +68,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
95
  "sha256": datasets.Value("string")
96
  }
97
  )
98
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
99
  features = datasets.Features(
100
  {
101
  "x": datasets.features.Sequence(
@@ -110,28 +83,14 @@ class NewDataset(datasets.GeneratorBasedBuilder):
110
  }
111
  )
112
  return datasets.DatasetInfo(
113
- # This is the description that will appear on the datasets page.
114
  description=_DESCRIPTION,
115
- # This defines the different columns of the dataset and their types
116
- features=features, # Here we define them above because they are different between the two configurations
117
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
118
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
119
- # supervised_keys=("sentence", "label"),
120
- # Homepage of the dataset for documentation
121
  homepage=_HOMEPAGE,
122
- # License for the dataset if available
123
  license=_LICENSE,
124
- # Citation for the dataset
125
  citation=_CITATION,
126
  )
127
 
128
  def _split_generators(self, dl_manager):
129
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
130
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
131
-
132
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
133
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
134
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
135
  urls = _URLS[self.config.name]
136
  file_list = get_file_list()
137
  file_urls = {
@@ -142,7 +101,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
142
  return [
143
  datasets.SplitGenerator(
144
  name=datasets.Split.TRAIN,
145
- # These kwargs will be passed to _generate_examples
146
  gen_kwargs={
147
  "filenames": file_list["train"],
148
  "local_datafiles": data_dir["train"],
@@ -151,7 +109,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
151
  ),
152
  # datasets.SplitGenerator(
153
  # name=datasets.Split.VALIDATION,
154
- # # These kwargs will be passed to _generate_examples
155
  # gen_kwargs={
156
  # "filepath": [os.path.join(data_dir, f"data/{file}") for file in file_list["dev"]],
157
  # "split": "dev",
@@ -159,16 +116,15 @@ class NewDataset(datasets.GeneratorBasedBuilder):
159
  # ),
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TEST,
162
- # These kwargs will be passed to _generate_examples
163
  gen_kwargs={
164
- "filenames": file_list["train"],
165
- "local_datafiles": data_dir["train"],
166
  "split": "test"
167
  },
168
  ),
169
  ]
170
 
171
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
172
  def _generate_examples(self, filenames, local_datafiles):
173
  key = 0
174
  for id, path in enumerate(filenames):
@@ -180,7 +136,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
180
  data_list = json.load(f)
181
  for data in data_list["data"]:
182
  key += 1
183
- if self.config.name == "first_domain":
184
  # Yields examples as (key, example) tuples
185
  yield key, {
186
  "x": data["x"],
 
25
  file_list = json.load(f)
26
  return file_list
27
 
28
+
29
  _CITATION = """\
30
  @InProceedings{huggingface:dataset,
31
  title = {Ember2018},
32
+ author=Christian Williams
33
  },
34
  year={2023}
35
  }
36
  """
37
 
 
 
38
  _DESCRIPTION = """\
39
+ This dataset is from the EMBER 2018 Malware Analysis dataset
40
  """
 
 
41
  _HOMEPAGE = "https://github.com/elastic/ember"
 
 
42
  _LICENSE = ""
 
 
 
 
43
  _URLS = {
44
+ "text_classification": "https://huggingface.co/datasets/cw1521/ember2018-malware/blob/main/data/"
45
  }
46
 
47
 
48
+ class EMBERConfig(datasets.GeneratorBasedBuilder):
 
 
 
49
  VERSION = datasets.Version("1.1.0")
 
 
 
 
 
 
 
 
 
 
 
 
50
  BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(name="text_classification", version=VERSION, description="This part of my dataset covers a first domain")
 
52
  ]
53
 
54
+ DEFAULT_CONFIG_NAME = "text_classification"
55
 
56
  def _info(self):
57
+ if self.config.name == "text_classification":
 
58
  features = datasets.Features(
59
  {
60
  "x": datasets.features.Sequence(
 
68
  "sha256": datasets.Value("string")
69
  }
70
  )
71
+ else:
72
  features = datasets.Features(
73
  {
74
  "x": datasets.features.Sequence(
 
83
  }
84
  )
85
  return datasets.DatasetInfo(
 
86
  description=_DESCRIPTION,
87
+ features=features,
 
 
 
 
 
88
  homepage=_HOMEPAGE,
 
89
  license=_LICENSE,
 
90
  citation=_CITATION,
91
  )
92
 
93
  def _split_generators(self, dl_manager):
 
 
 
 
 
 
94
  urls = _URLS[self.config.name]
95
  file_list = get_file_list()
96
  file_urls = {
 
101
  return [
102
  datasets.SplitGenerator(
103
  name=datasets.Split.TRAIN,
 
104
  gen_kwargs={
105
  "filenames": file_list["train"],
106
  "local_datafiles": data_dir["train"],
 
109
  ),
110
  # datasets.SplitGenerator(
111
  # name=datasets.Split.VALIDATION,
 
112
  # gen_kwargs={
113
  # "filepath": [os.path.join(data_dir, f"data/{file}") for file in file_list["dev"]],
114
  # "split": "dev",
 
116
  # ),
117
  datasets.SplitGenerator(
118
  name=datasets.Split.TEST,
 
119
  gen_kwargs={
120
+ "filenames": file_list["test"],
121
+ "local_datafiles": data_dir["test"],
122
  "split": "test"
123
  },
124
  ),
125
  ]
126
 
127
+
128
  def _generate_examples(self, filenames, local_datafiles):
129
  key = 0
130
  for id, path in enumerate(filenames):
 
136
  data_list = json.load(f)
137
  for data in data_list["data"]:
138
  key += 1
139
+ if self.config.name == "text_classification":
140
  # Yields examples as (key, example) tuples
141
  yield key, {
142
  "x": data["x"],