File size: 1,686 Bytes
b05ab6e
 
c14262c
b05ab6e
 
c14262c
 
 
b05ab6e
 
c14262c
b05ab6e
 
c14262c
 
b05ab6e
c14262c
 
 
 
 
b05ab6e
 
 
b19837c
c14262c
b05ab6e
 
 
c14262c
 
b05ab6e
 
 
 
 
c14262c
 
 
 
 
 
b05ab6e
 
 
 
 
 
c14262c
b05ab6e
 
 
c14262c
 
b05ab6e
 
c14262c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
import pandas as pd
import datasets

_CITATION = """\
@InProceedings{YourReferenceHere,
}
"""

_DESCRIPTION = """\
Description of your dataset.
"""

_HOMEPAGE = "https://dataset-homepage/"
_LICENSE = "Dataset License"

_URLS = {
    "Network-Flows": "Network-Flows/CICIDS_Flow.csv",
    "Packet-Fields": "Packet-Fields/Packet_Fields_File_10.csv",
    "Packet-Bytes": "Packet-Bytes/Packet_Bytes_File_10.csv",
    "Payload-Bytes": "Payload-Bytes/Payload_Bytes_File_10.csv",
}


class CICIDS2017(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=config_name, version=VERSION, description=f"This part of my dataset covers {config_name}")
        for config_name in _URLS.keys()
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                # TODO: Adjust features according to your dataset
                "flow_id": datasets.Value("int64"),
                "attack_label": datasets.Value("string"),
                # More columns...
            }),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        url = os.path.join(self.config.data_dir, _URLS[self.config.name])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": url},
            ),
        ]

    def _generate_examples(self, filepath):
        df = pd.read_csv(filepath)
        for id_, row in df.iterrows():
            yield id_, row.to_dict()