soybean / soybean.py
mstz's picture
Upload 3 files
a2fabf5
raw
history blame
5.51 kB
"""Soybean Dataset"""
from typing import List
from functools import partial
import datasets
import pandas
VERSION = datasets.Version("1.0.0")
_ENCODING_DICS = {
"class": {
value: i for i, value in enumerate(["diaporthe_stem_canker",
"charcoal_rot", "rhizoctonia_root_rot",
"phytophthora_rot", "brown_stem_rot", "powdery_mildew",
"downy_mildew", "brown_spot", "bacterial_blight",
"bacterial_pustule", "purple_seed_stain", "anthracnose",
"phyllosticta_leaf_spot", "alternarialeaf_spot",
"frog_eye_leaf_spot", "diaporthe_pod_&_stem_blight",
"cyst_nematode", "2_4_d_injury", "herbicide_injury"])
}
}
_BASE_FEATURE_NAMES = [
"date",
"plant_stand",
"precip",
"temp",
"hail",
"crop_hist",
"area_damaged",
"severity",
"seed_tmt",
"germination",
"plant_growth",
"leaves",
"leafspots_halo",
"leafspots_marg",
"leafspot_size",
"leaf_shread",
"leaf_malf",
"leaf_mild",
"stem",
"lodging",
"stem_cankers",
"canker_lesion",
"fruiting_bodies",
"external decay",
"mycelium",
"int_discolor",
"sclerotia",
"fruit_pods",
"fruit spots",
"seed",
"mold_growth",
"seed_discolor",
"seed_size",
"shriveling",
"roots",
"class",
]
DESCRIPTION = "Soybean dataset."
_HOMEPAGE = "https://archive-beta.ics.uci.edu/dataset/116/us+census+data+1990"
_URLS = ("https://archive-beta.ics.uci.edu/dataset/116/us+census+data+1990")
_CITATION = """
@misc{misc_us_census_data_(1990)_116,
author = {Meek,Meek, Thiesson,Thiesson & Heckerman,Heckerman},
title = {{US Census Data (1990)}},
howpublished = {UCI Machine Learning Repository},
note = {{DOI}: \\url{10.24432/C5VP42}}
}
"""
# Dataset info
urls_per_split = {
"train": "https://huggingface.co/datasets/mstz/soybean/resolve/main/soybean.csv"
}
features_types_per_config = {
"soybean": {
"date": datasets.Value("string"),
"plant_stand": datasets.Value("string"),
"precip": datasets.Value("string"),
"temp": datasets.Value("string"),
"hail": datasets.Value("string"),
"crop_hist": datasets.Value("string"),
"area_damaged": datasets.Value("string"),
"severity": datasets.Value("string"),
"seed_tmt": datasets.Value("string"),
"germination": datasets.Value("string"),
"plant_growth": datasets.Value("string"),
"leaves": datasets.Value("string"),
"leafspots_halo": datasets.Value("string"),
"leafspots_marg": datasets.Value("string"),
"leafspot_size": datasets.Value("string"),
"leaf_shread": datasets.Value("string"),
"leaf_malf": datasets.Value("string"),
"leaf_mild": datasets.Value("string"),
"stem": datasets.Value("string"),
"lodging": datasets.Value("string"),
"stem_cankers": datasets.Value("string"),
"canker_lesion": datasets.Value("string"),
"fruiting_bodies": datasets.Value("string"),
"external decay": datasets.Value("string"),
"mycelium": datasets.Value("string"),
"int_discolor": datasets.Value("string"),
"sclerotia": datasets.Value("string"),
"fruit_pods": datasets.Value("string"),
"fruit spots": datasets.Value("string"),
"seed": datasets.Value("string"),
"mold_growth": datasets.Value("string"),
"seed_discolor": datasets.Value("string"),
"seed_size": datasets.Value("string"),
"shriveling": datasets.Value("string"),
"roots": datasets.Value("string"),
"class": datasets.ClassLabel(num_classes=19)
}
}
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
class SoybeanConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(SoybeanConfig, self).__init__(version=VERSION, **kwargs)
self.features = features_per_config[kwargs["name"]]
class Soybean(datasets.GeneratorBasedBuilder):
# dataset versions
DEFAULT_CONFIG = "soybean"
binary_configurations = [SoybeanConfig(name=c, description=f"Is this instance of class {c}?")
for c in _ENCODING_DICS["class"].keys()]
BUILDER_CONFIGS = [SoybeanConfig(name="soybean", description="Soybean for binary classification.")]
BUILDER_CONFIGS += binary_configurations
def _info(self):
info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
features=features_per_config[self.config.name])
return info
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloads = dl_manager.download_and_extract(urls_per_split)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]}),
]
def _generate_examples(self, filepath: str):
data = pandas.read_csv(filepath, header=None)
data = self.preprocess(data)
for row_id, row in data.iterrows():
data_row = dict(row)
yield row_id, data_row
def preprocess(self, data: pandas.DataFrame) -> pandas.DataFrame:
data.columns = _BASE_FEATURE_NAMES
for c in _ENCODING_DICS["class"].keys():
if self.config.name == c:
data["class"] = data["class"].apply(lambda x: 1 if x == c else 0)
break
for feature in _ENCODING_DICS:
encoding_function = partial(self.encode, feature)
data[feature] = data[feature].apply(encoding_function)
data = data.rename(columns={"instance migration_code_change_in_msa": "migration_code_change_in_msa"})
return data[list(features_types_per_config[self.config.name].keys())]
def encode(self, feature, value):
if feature in _ENCODING_DICS:
return _ENCODING_DICS[feature][value]
raise ValueError(f"Unknown feature: {feature}")