File size: 5,015 Bytes
0a4a434 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import datasets
import pandas as pd
_CITATION = """\
@article{maharajan2020attack,
title={Attack classification and intrusion detection in IoT network using machine learning techniques},
author={Maharajan, R and Raja, KS},
journal={Computers \& Electrical Engineering},
volume={87},
pages={106783},
year={2020},
publisher={Elsevier}
}"""
_DESCRIPTION = """\
The CIC-IDS2017 dataset is an intrusion detection dataset that consists of network traffic data. \
It contains different network attacks and normal traffic. This dataset can be used for evaluating \
intrusion detection systems in IoT networks.
"""
_HOMEPAGE = "https://www.unb.ca/cic/datasets/ids-2017.html"
_LICENSE = "Unknown"
_FOLDERS = {
"folder_1": "rdpahalavan/CIC-IDS2017/Network-Flows",
"folder_2": "rdpahalavan/CIC-IDS2017/Payload-Bytes",
"folder_3": "rdpahalavan/CIC-IDS2017/Packet-Bytes",
"folder_4": "rdpahalavan/CIC-IDS2017/Packet-Fields",
}
class CICIDS2017(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="folder_1", version=VERSION, description="Folder 1 of CIC-IDS2017 dataset"),
datasets.BuilderConfig(name="folder_2", version=VERSION, description="Folder 2 of CIC-IDS2017 dataset"),
datasets.BuilderConfig(name="folder_3", version=VERSION, description="Folder 3 of CIC-IDS2017 dataset"),
datasets.BuilderConfig(name="folder_4", version=VERSION, description="Folder 4 of CIC-IDS2017 dataset"),
]
DEFAULT_CONFIG_NAME = "folder_1"
def _info(self):
if self.config.name == "folder_1":
features = datasets.Features(
{
"source_ip": datasets.Value("string"),
"destination_ip": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"protocol": datasets.Value("string"),
"flow_duration": datasets.Value("float"),
# Add more features specific to folder_1 configuration
}
)
elif self.config.name == "folder_2":
features = datasets.Features(
{
"source_ip": datasets.Value("string"),
"destination_ip": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"protocol": datasets.Value("string"),
"flow_duration": datasets.Value("float"),
# Add more features specific to folder_2 configuration
}
)
elif self.config.name == "folder_3":
features = datasets.Features(
{
"source_ip": datasets.Value("string"),
"destination_ip": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"protocol": datasets.Value("string"),
"flow_duration": datasets.Value("float"),
# Add more features specific to folder_3 configuration
}
)
else: # folder_4
features = datasets.Features(
{
"source_ip": datasets.Value("string"),
"destination_ip": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"protocol": datasets.Value("string"),
"flow_duration": datasets.Value("float"),
# Add more features specific to folder_4 configuration
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
folder_path = _FOLDERS[self.config.name]
data_dir = dl_manager.download(folder_path)
csv_files = [
filename for filename in os.listdir(data_dir) if filename.endswith(".csv")
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir, "csv_files": csv_files},
)
]
def _generate_examples(self, data_dir, csv_files):
for csv_file in csv_files:
file_path = os.path.join(data_dir, csv_file)
df = pd.read_csv(file_path)
for idx, row in df.iterrows():
example = {
"source_ip": row["source_ip"],
"destination_ip": row["destination_ip"],
"timestamp": row["timestamp"],
"protocol": row["protocol"],
"flow_duration": row["flow_duration"],
# Add more feature values according to the dataset columns
}
yield idx, example
datasets.load_dataset("rdpahalavan/CIC-IDS2017") |