File size: 7,880 Bytes
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
063e542
b6746bc
 
 
 
79b17a2
b6746bc
 
 
 
 
 
 
3a6859a
 
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a6859a
 
 
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a6859a
b6746bc
 
 
 
4b8f83a
b6746bc
 
 
 
 
 
3a6859a
b6746bc
 
 
 
4b8f83a
 
 
 
 
 
 
3a6859a
4b8f83a
 
 
 
 
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79b17a2
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b8f83a
bde62dd
b6746bc
4b8f83a
bde62dd
4b8f83a
 
bde62dd
b6746bc
 
bde62dd
b6746bc
 
4b322ed
b6746bc
 
 
 
4b8f83a
b6746bc
 
 
 
2479fb3
b6746bc
 
 
 
46ff261
12cdb3a
 
b6746bc
12cdb3a
252e475
12cdb3a
 
 
 
 
 
 
 
b6746bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
# coding=utf-8

"""AudioSet sound event classification dataset."""


import os
import json
import gzip
import joblib
import shutil
import pathlib
import logging
import zipfile
import librosa
import textwrap
import datasets
import requests
import itertools
import torchaudio
import typing as tp
import pandas as pd
from pathlib import Path
from copy import deepcopy
from tqdm.auto import tqdm
from rich.logging import RichHandler

from ._audioset import ID2LABEL

logger = logging.getLogger(__name__)
logger.addHandler(RichHandler())
logger.setLevel(logging.INFO)


DATA_DIR_STRUCTURE = """
audios/
β”œβ”€β”€ balanced_train_segments [20550 entries]
β”œβ”€β”€ eval_segments [18887 entries]
└── unbalanced_train_segments
    β”œβ”€β”€ unbalanced_train_segments_part00 [46940 entries]
    ...
    └── unbalanced_train_segments_part40 [9844 entries]
"""

LABEL2ID = {v:k for k, v in ID2LABEL.items()}
CLASSES = list(ID2LABEL.values())


class AudioSetConfig(datasets.BuilderConfig):
    """BuilderConfig for AudioSet."""
    
    def __init__(self, features, **kwargs):
        super(AudioSetConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
        self.features = features


class AudioSet(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        AudioSetConfig(
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=None),
                    "sound": datasets.Sequence(datasets.Value("string")), 
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), 
                }
            ),
            name="20k", 
            description="",
        ), 
        AudioSetConfig(
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=None),
                    "sound": datasets.Sequence(datasets.Value("string")), 
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), 
                }
            ),
            name="500k", 
            description="",
        ), 
        AudioSetConfig(
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=None),
                    "sound": datasets.Sequence(datasets.Value("string")), 
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)), 
                }
            ),
            name="2m", 
            description="",
        ), 
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=self.config.features,
            supervised_keys=None,
            homepage="",
            citation="",
            task_templates=None,
        )

    @property
    def manual_download_instructions(self):
        return (
            "To use AudioSet you have to download it manually. "
            "Please download the dataset from https://huggingface.co/datasets/confit/audioset-full \n"
            "Then extract all files in one folder called `audios` and load the dataset with: "
            "`datasets.load_dataset('confit/audioset', '20k', data_dir='path/to/folder/audios')`\n"
            "The tree structure of the downloaded data looks like: \n"
            f"{DATA_DIR_STRUCTURE}"
        )

    def _split_generators(self, dl_manager):

        data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))

        if not os.path.exists(data_dir):
            raise FileNotFoundError(
                f"{data_dir} does not exist. Make sure you insert a manual dir via "
                f"`datasets.load_dataset('confit/audioset', 'balanced', data_dir=...)` that includes files unzipped from all the zip files. "
                f"Manual download instructions: {self.manual_download_instructions}"
            )

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}),
        ]

    def _generate_examples(self, split, data_dir):
        """Generate examples from AudioSet"""
        # Iterating the contents of the data to extract the relevant information
        extensions = ['.wav']

        if split == 'train':
            if self.config.name == '20k':
                archive_path = os.path.join(data_dir, 'balanced_train_segments')
                metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-20k.jsonl'
            elif self.config.name == '500k':
                archive_path = os.path.join(data_dir, 'unbalanced_train_segments')
                metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-500k.jsonl'
            elif self.config.name == '2m':
                archive_path = os.path.join(data_dir, 'unbalanced_train_segments')
                metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-2m.jsonl'
        elif split == 'test':
            archive_path = os.path.join(data_dir, 'eval_segments')
            metadata_url = 'https://huggingface.co/datasets/confit/audioset/resolve/main/metadata/audioset-eval.jsonl'

        response = requests.get(metadata_url)
        if response.status_code == 200:
            # Split the content by lines and parse each line as JSON
            # Each line is like {"filename":"YN6UbMsh-q1c.wav","label":["Vehicle","Car"]}
            data_list = [json.loads(line) for line in response.text.splitlines()]
            fileid2labels = {item['filename']:item['labels'] for item in data_list}
        else:
            logger.info(f"Failed to retrieve data: Status code {response.status_code}")
        
        _, wav_paths = fast_scandir(archive_path, extensions, recursive=True)
        wav_paths = [wav_path for wav_path in wav_paths if Path(wav_path).name in fileid2labels]

        for guid, wav_path in enumerate(wav_paths):
            fileid = Path(wav_path).name
            sound = fileid2labels.get(fileid)
            duration = librosa.get_duration(path=wav_path)
            if duration <= 0:
                continue
            try:
                # metadata = torchaudio.info(wav_path)
                # duration = math.ceil(metadata.num_frames / metadata.sample_rate) # in seconds
                yield guid, {
                    "id": str(guid),
                    "file": wav_path, 
                    "audio": wav_path, 
                    "sound": sound, 
                    "label": sound, 
                    "duration": duration
                }
            except:
                continue


def fast_scandir(path: str, extensions: tp.List[str], recursive: bool = False):
    # Scan files recursively faster than glob
    # From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
    subfolders, files = [], []

    try: # hope to avoid 'permission denied' by this try
        for f in os.scandir(path):
            try:  # 'hope to avoid too many levels of symbolic links' error
                if f.is_dir():
                    subfolders.append(f.path)
                elif f.is_file():
                    if os.path.splitext(f.name)[1].lower() in extensions:
                        files.append(f.path)
            except Exception:
                pass
    except Exception:
        pass

    if recursive:
        for path in list(subfolders):
            sf, f = fast_scandir(path, extensions, recursive=recursive)
            subfolders.extend(sf)
            files.extend(f)  # type: ignore

    return subfolders, files