rdpahalavan commited on
Commit
c14262c
1 Parent(s): b05ab6e

Update CIC-IDS2017.py

Browse files
Files changed (1) hide show
  1. CIC-IDS2017.py +29 -85
CIC-IDS2017.py CHANGED
@@ -1,116 +1,60 @@
1
  import os
2
- import datasets
3
  import pandas as pd
4
-
5
 
6
  _CITATION = """\
7
- @article{maharajan2020attack,
8
- title={Attack classification and intrusion detection in IoT network using machine learning techniques},
9
- author={Maharajan, R and Raja, KS},
10
- journal={Computers \& Electrical Engineering},
11
- volume={87},
12
- pages={106783},
13
- year={2020},
14
- publisher={Elsevier}
15
- }"""
16
 
17
  _DESCRIPTION = """\
18
- The CIC-IDS2017 dataset is an intrusion detection dataset that consists of network traffic data. \
19
- It contains different network attacks and normal traffic. This dataset can be used for evaluating \
20
- intrusion detection systems in IoT networks.
21
  """
22
 
23
- _HOMEPAGE = "https://www.unb.ca/cic/datasets/ids-2017.html"
24
-
25
- _LICENSE = "Unknown"
26
 
27
- _FOLDERS = {
28
- "Network-Flows": "Network-Flows",
29
- "Packet-Fields": "Packet-Fields",
30
- "Packet-Bytes": "Packet-Bytes",
31
- "Payload-Bytes": "Payload-Bytes",
32
  }
33
 
34
 
35
- class CICIDS2017(datasets.GeneratorBasedBuilder):
 
36
  VERSION = datasets.Version("1.0.0")
37
 
38
  BUILDER_CONFIGS = [
39
- datasets.BuilderConfig(name="Network-Flows", version=VERSION, description="Folder 1 of CIC-IDS2017 dataset"),
40
- datasets.BuilderConfig(name="Packet-Fields", version=VERSION, description="Folder 2 of CIC-IDS2017 dataset"),
41
- datasets.BuilderConfig(name="Packet-Bytes", version=VERSION, description="Folder 3 of CIC-IDS2017 dataset"),
42
- datasets.BuilderConfig(name="Payload-Bytes", version=VERSION, description="Folder 4 of CIC-IDS2017 dataset"),
43
  ]
44
 
45
- DEFAULT_CONFIG_NAME = "Network-Flows"
46
-
47
  def _info(self):
48
- if self.config.name == "Network-Flows":
49
- features = datasets.Features(
50
- {
51
- "flow_id": datasets.Value("int64"),
52
- "source_ip": datasets.Value("string"),
53
- # Add more features specific to folder_1 configuration
54
- }
55
- )
56
- elif self.config.name == "Packet-Fields":
57
- features = datasets.Features(
58
- {
59
- "flow_id": datasets.Value("int64"),
60
- "packet_id": datasets.Value("int64"),
61
- # Add more features specific to folder_2 configuration
62
- }
63
- )
64
- elif self.config.name == "Packet-Bytes":
65
- features = datasets.Features(
66
- {
67
- "flow_id": datasets.Value("int64"),
68
- "packet_id": datasets.Value("int64"),
69
- # Add more features specific to folder_3 configuration
70
- }
71
- )
72
- else: # folder_4
73
- features = datasets.Features(
74
- {
75
- "flow_id": datasets.Value("int64"),
76
- "packet_id": datasets.Value("int64"),
77
- # Add more features specific to folder_4 configuration
78
- }
79
- )
80
-
81
  return datasets.DatasetInfo(
82
  description=_DESCRIPTION,
83
- features=features,
 
 
 
 
 
84
  homepage=_HOMEPAGE,
85
  license=_LICENSE,
86
  citation=_CITATION,
87
  )
88
 
89
  def _split_generators(self, dl_manager):
90
- folder_path = _FOLDERS[self.config.name]
91
- data_dir = dl_manager.download(folder_path)
92
- csv_files = [
93
- filename for filename in os.listdir(data_dir) if filename.endswith(".csv")
94
- ]
95
-
96
  return [
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TRAIN,
99
- gen_kwargs={"data_dir": data_dir, "csv_files": csv_files},
100
- )
101
  ]
102
 
103
- def _generate_examples(self, data_dir, csv_files):
104
- for csv_file in csv_files:
105
- file_path = os.path.join(data_dir, csv_file)
106
- df = pd.read_csv(file_path)
107
- for idx, row in df.iterrows():
108
- example = {
109
- "source_ip": row["source_ip"],
110
- "destination_ip": row["destination_ip"],
111
- "timestamp": row["timestamp"],
112
- "protocol": row["protocol"],
113
- "flow_duration": row["flow_duration"],
114
- # Add more feature values according to the dataset columns
115
- }
116
- yield idx, example
 
1
  import os
 
2
  import pandas as pd
3
+ import datasets
4
 
5
  _CITATION = """\
6
+ @InProceedings{YourReferenceHere,
7
+ }
8
+ """
 
 
 
 
 
 
9
 
10
  _DESCRIPTION = """\
11
+ Description of your dataset.
 
 
12
  """
13
 
14
+ _HOMEPAGE = "https://dataset-homepage/"
15
+ _LICENSE = "Dataset License"
 
16
 
17
+ _URLS = {
18
+ "Network-Flows": "Network-Flows/CICIDS_Flow.csv",
19
+ "Packet-Fields": "Packet-Fields/Packet_Fields_File_10.csv",
20
+ "Packet-Bytes": "Packet-Bytes/Packet_Bytes_File_10.csv",
21
+ "Payload-Bytes": "Payload-Bytes/Payload_Bytes_File_10.csv",
22
  }
23
 
24
 
25
+ class YourDataset(datasets.GeneratorBasedBuilder):
26
+
27
  VERSION = datasets.Version("1.0.0")
28
 
29
  BUILDER_CONFIGS = [
30
+ datasets.BuilderConfig(name=config_name, version=VERSION, description=f"This part of my dataset covers {config_name}")
31
+ for config_name in _URLS.keys()
 
 
32
  ]
33
 
 
 
34
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  return datasets.DatasetInfo(
36
  description=_DESCRIPTION,
37
+ features=datasets.Features({
38
+ # TODO: Adjust features according to your dataset
39
+ "flow_id": datasets.Value("int64"),
40
+ "attack_label": datasets.Value("string"),
41
+ # More columns...
42
+ }),
43
  homepage=_HOMEPAGE,
44
  license=_LICENSE,
45
  citation=_CITATION,
46
  )
47
 
48
  def _split_generators(self, dl_manager):
49
+ url = os.path.join(self.config.data_dir, _URLS[self.config.name])
 
 
 
 
 
50
  return [
51
  datasets.SplitGenerator(
52
  name=datasets.Split.TRAIN,
53
+ gen_kwargs={"filepath": url},
54
+ ),
55
  ]
56
 
57
+ def _generate_examples(self, filepath):
58
+ df = pd.read_csv(filepath)
59
+ for id_, row in df.iterrows():
60
+ yield id_, row.to_dict()