"""Diva: A Fraud Detection Dataset""" from typing import List import datasets import pandas VERSION = datasets.Version("1.0.0") _ORIGINAL_FEATURE_NAMES = [ "age", "workclass", "final_weight", "education", "education-num", "marital_status", "occupation", "relationship", "race", "sex", "capital_gain", "capital_loss", "hours_per_week", "native_country", "threshold" ] _BASE_FEATURE_NAMES = [ "age", "capital_gain", "capital_loss", "education", "final_weight", "hours_per_week", "marital_status", "native_country", "occupation", "race", "relationship", "sex", "workclass", "threshold", ] DESCRIPTION = "Adult dataset from the UCI ML repository." _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Adult" _URLS = ("https://huggingface.co/datasets/mstz/adult/raw/adult.csv") _CITATION = """ @inproceedings{DBLP:conf/kdd/Kohavi96, author = {Ron Kohavi}, editor = {Evangelos Simoudis and Jiawei Han and Usama M. Fayyad}, title = {Scaling Up the Accuracy of Naive-Bayes Classifiers: {A} Decision-Tree Hybrid}, booktitle = {Proceedings of the Second International Conference on Knowledge Discovery and Data Mining (KDD-96), Portland, Oregon, {USA}}, pages = {202--207}, publisher = {{AAAI} Press}, year = {1996}, url = {http://www.aaai.org/Library/KDD/1996/kdd96-033.php}, timestamp = {Mon, 05 Jun 2017 13:20:21 +0200}, biburl = {https://dblp.org/rec/conf/kdd/Kohavi96.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} }""" # Dataset info _urls_per_split = { "train": "https://huggingface.co/datasets/mstz/adult/raw/main/adult_tr.csv", "test": "https://huggingface.co/datasets/mstz/adult/raw/main/adult_ts.csv" } features_per_config = { "income": datasets.Features({"age": datasets.Value("int8"), "capital_gain": datasets.Value("float16"), "capital_loss": datasets.Value("float16"), "education": datasets.Value("int8"), "final_weight": datasets.Value("int16"), "hours_per_week": datasets.Value("int16"), "marital_status": datasets.Value("string"), "native_country": datasets.Value("string"), "occupation": datasets.Value("string"), "race": datasets.Value("string"), "relationship": datasets.Value("string"), "sex": datasets.Value("binary"), "workclass": datasets.Value("binary"), "threshold": datasets.ClassLabel(num_classes=2, names=("no", "yes")) }), "income-no race": datasets.Features({"age": datasets.Value("int8"), "capital_gain": datasets.Value("float16"), "capital_loss": datasets.Value("float16"), "education": datasets.Value("int8"), "final_weight": datasets.Value("int16"), "hours_per_week": datasets.Value("int16"), "marital_status": datasets.Value("string"), "native_country": datasets.Value("string"), "occupation": datasets.Value("string"), "relationship": datasets.Value("string"), "sex": datasets.Value("binary"), "workclass": datasets.Value("binary"), "threshold": datasets.ClassLabel(num_classes=2, names=("no", "yes")) }), "race": datasets.Features({"age": datasets.Value("int8"), "capital_gain": datasets.Value("float16"), "capital_loss": datasets.Value("float16"), "education": datasets.Value("int8"), "final_weight": datasets.Value("int16"), "hours_per_week": datasets.Value("int16"), "marital_status": datasets.Value("string"), "native_country": datasets.Value("string"), "occupation": datasets.Value("string"), "relationship": datasets.Value("string"), "sex": datasets.Value("binary"), "workclass": datasets.Value("binary"), "over_threshold": datasets.Value("binary"), "race": datasets.ClassLabel(num_classes=5, names=["White", "Black", "Asian-Pac-Islander", "Amer-Indian-Eskimo", "Other"]), }), } class AdultConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(AdultConfig, self).__init__(version=VERSION, **kwargs) self.features = features_per_config[kwargs["name"]] class Adult(datasets.GeneratorBasedBuilder): # dataset versions DEFAULT_CONFIG = "income" BUILDER_CONFIGS = [ AdultConfig(name="income", description="Adult for income threshold binary classification."), AdultConfig(name="income-no race", description="Adult for income threshold binary classification, race excluded from features."), AdultConfig(name="race", description="Adult for race multiclass classification."), ] def _info(self): print(self.__dict__) if self.config.name not in features_per_config: raise ValueError(f"Unknown configuration: {self.config.name}") info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE, features=features_per_config[self.config.name]) return info def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: downloads = dl_manager.download_and_extract(_urls_per_split) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloads["test"]}), ] def _generate_examples(self, filepath: str): data = pandas.read_csv(filepath) data = self.preprocess(data, config=self.config.name) for row in data.iterrows(): data_row = dict(row) row_id = hash(str(data_row)) yield row_id, data_row def preprocess(self, data: pandas.DataFrame, config: str = "income") -> pandas.DataFrame: data.drop(["education"], inplace=True) data = data[["age", "capital_gain", "capital_loss", "education", "final_weight", "hours_per_week", "marital_status", "native_country", "occupation", "race", "relationship", "sex", "workclass", "threshold"]] data.columns = _BASE_FEATURE_NAMES return data def income_preprocessing(data: pandas.DataFrame) -> pandas.DataFrame: data = data[features_per_config["income"]] return data def income_norace_preprocessing(data: pandas.DataFrame) -> pandas.DataFrame: data = data[features_per_config["income-no race"]] return data def race_preprocessing(data: pandas.DataFrame) -> pandas.DataFrame: data["over_threshold"] = df.threshold data = data[features_per_config["race"]] return data # TODO: add custom split?