dcase23-task2-enriched / dcase23-task2-enriched.py
Syoy's picture
overwrite main
8414268
raw
history blame
7.55 kB
import os
import datasets
import datasets.info
import pandas as pd
from pathlib import Path
from datasets import load_dataset
from typing import Iterable, Dict, Optional, Union, List
_CITATION = """\
@dataset{kota_dohi_2023_7687464,
author = {Kota Dohi and
Keisuke and
Noboru and
Daisuke and
Yuma and
Tomoya and
Harsh and
Takashi and
Yohei},
title = {DCASE 2023 Challenge Task 2 Development Dataset},
month = mar,
year = 2023,
publisher = {Zenodo},
version = {1.0},
doi = {10.5281/zenodo.7687464},
url = {https://doi.org/10.5281/zenodo.7687464}
}
"""
_LICENSE = "Creative Commons Attribution 4.0 International Public License"
_METADATA_REG = r"attributes_\d+.csv"
_NUM_TARGETS = 2
_NUM_CLASSES = 7
_TARGET_NAMES = ["normal", "anomaly"]
_CLASS_NAMES = ["gearbox", "fan", "bearing", "slider", "ToyCar", "ToyTrain", "valve"]
_HOMEPAGE = {
"dev": "https://zenodo.org/record/7687464#.Y_96q9LMLmH",
}
DATA_URLS = {
"dev": {
"train": "data/dev_train.tar.gz",
"test": "data/dev_test.tar.gz",
"metadata": "data/dev_metadata.csv",
},
}
STATS = {
"name": "Enriched Dataset of 'DCASE 2023 Challenge Task 2'",
"configs": {
'dev': {
'date': "Mar 1, 2023",
'version': "1.0.0",
'homepage': "https://zenodo.org/record/7687464#.ZABmANLMLmH",
"splits": ["train", "test"],
},
}
}
DATASET = {
'dev': 'DCASE 2023 Challenge Task 2 Development Dataset',
}
_SPOTLIGHT_LAYOUT = "data/config-spotlight-layout.json"
_SPOTLIGHT_RENAME = {
"audio": "original_audio",
"path": "audio",
}
class DCASE2023Task2DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for DCASE2023Task2Dataset."""
def __init__(self, name, version, **kwargs):
self.release_date = kwargs.pop("release_date", None)
self.homepage = kwargs.pop("homepage", None)
self.data_urls = kwargs.pop("data_urls", None)
self.splits = kwargs.pop("splits", None)
self.rename = kwargs.pop("rename", None)
self.layout = kwargs.pop("layout", None)
description = (
f"Dataset for the DCASE 2023 Challenge Task 2 'First-Shot Unsupervised Anomalous Sound Detection "
f"for Machine Condition Monitoring'. released on {self.release_date}. Original data available under"
f"{self.homepage}. "
f"CONFIG: {name}."
)
super(DCASE2023Task2DatasetConfig, self).__init__(
name=name,
version=datasets.Version(version),
description=description,
)
def to_spotlight(self, data: Union[pd.DataFrame, datasets.Dataset]) -> pd.DataFrame:
if type(data) == datasets.Dataset:
df = data.to_pandas()
df["split"] = data.split
df["config"] = data.config_name
class_names = data.features["class"].names
df["class_name"] = df["class"].apply(lambda x: class_names[x])
elif type(data) == pd.DataFrame:
df = data
else:
raise TypeError("type(data) not in Union[pd.DataFrame, datasets.Dataset]")
df["file_path"] = df["path"]
df.rename(columns=self.rename, inplace=True)
return df.copy()
def get_layout(self):
return self.layout
class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder):
"""Dataset for the DCASE 2023 Challenge Task 2 "First-Shot Unsupervised Anomalous Sound Detection
for Machine Condition Monitoring"."""
VERSION = datasets.Version("0.0.2")
DEFAULT_CONFIG_NAME = "dev"
BUILDER_CONFIGS = [
DCASE2023Task2DatasetConfig(
name=key,
version=stats["version"],
dataset=DATASET[key],
homepage=_HOMEPAGE[key],
data_urls=DATA_URLS[key],
release_date=stats["date"],
splits=stats["splits"],
layout=_SPOTLIGHT_LAYOUT,
rename=_SPOTLIGHT_RENAME,
)
for key, stats in STATS["configs"].items()
]
def _info(self):
features = datasets.Features(
{
"audio": datasets.Audio(sampling_rate=16_000),
"path": datasets.Value("string"),
"section": datasets.Value("int64"),
"d1p": datasets.Value("string"),
"d1v": datasets.Value("string"),
"d2p": datasets.Value("string"),
"d2v": datasets.Value("string"),
"d3p": datasets.Value("string"),
"d3v": datasets.Value("string"),
"label": datasets.ClassLabel(num_classes=_NUM_TARGETS, names=_TARGET_NAMES),
"class": datasets.ClassLabel(num_classes=_NUM_CLASSES, names=_CLASS_NAMES),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self.config.description,
features=features,
supervised_keys=datasets.info.SupervisedKeysData("label"),
homepage=self.config.homepage,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(
self,
dl_manager: datasets.DownloadManager
):
"""Returns SplitGenerators."""
dl_manager.download_config.ignore_url_params = True
audio_path = {}
local_extracted_archive = {}
split_type = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST}
for split in split_type:
audio_path[split] = dl_manager.download(self.config.data_urls[split])
local_extracted_archive[split] = dl_manager.extract(
audio_path[split]) if not dl_manager.is_streaming else None
return [
datasets.SplitGenerator(
name=split_type[split],
gen_kwargs={
"split": split,
"local_extracted_archive": local_extracted_archive[split],
"audio_files": dl_manager.iter_archive(audio_path[split]),
"metadata_file": dl_manager.download_and_extract(self.config.data_urls["metadata"]),
},
) for split in split_type
]
def _generate_examples(
self,
split: str,
local_extracted_archive: Union[Dict, List],
audio_files: Optional[Iterable],
metadata_file: Optional[str],
):
"""Yields examples."""
metadata = pd.read_csv(metadata_file)
data_fields = list(self._info().features.keys())
id_ = 0
for path, f in audio_files:
lookup = Path(path).parent.name + "/" + Path(path).name
if lookup in metadata["path"].values:
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
audio = {"path": path, "bytes": f.read()}
result = {field: None for field in data_fields}
result.update(metadata[metadata["path"] == lookup].T.squeeze().to_dict())
result["path"] = path
yield id_, {**result, "audio": audio}
id_ += 1
if __name__ == "__main__":
ds = load_dataset("dcase23-task2-enriched.py", "dev", split="train", streaming=True)