Datasets:

Languages:
English
ArXiv:
License:
polinaeterna HF staff commited on
Commit
2ce90a9
1 Parent(s): 9d86664

add loading script

Browse files
Files changed (1) hide show
  1. peoples_speech.py +174 -0
peoples_speech.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+
17
+ import datasets
18
+ from datasets.tasks import AutomaticSpeechRecognition
19
+ from tqdm.auto import tqdm
20
+
21
+
22
+ # Find for instance the citation on arxiv or on the dataset repo/website
23
+ _CITATION = """\
24
+ @article{DBLP:journals/corr/abs-2111-09344,
25
+ author = {Daniel Galvez and
26
+ Greg Diamos and
27
+ Juan Ciro and
28
+ Juan Felipe Ceron and
29
+ Keith Achorn and
30
+ Anjali Gopi and
31
+ David Kanter and
32
+ Maximilian Lam and
33
+ Mark Mazumder and
34
+ Vijay Janapa Reddi},
35
+ title = {The People's Speech: A Large-Scale Diverse English Speech Recognition
36
+ Dataset for Commercial Usage},
37
+ journal = {CoRR},
38
+ volume = {abs/2111.09344},
39
+ year = {2021},
40
+ url = {https://arxiv.org/abs/2111.09344},
41
+ eprinttype = {arXiv},
42
+ eprint = {2111.09344},
43
+ timestamp = {Mon, 22 Nov 2021 16:44:07 +0100},
44
+ biburl = {https://dblp.org/rec/journals/corr/abs-2111-09344.bib},
45
+ bibsource = {dblp computer science bibliography, https://dblp.org}
46
+ }
47
+ """
48
+
49
+ # You can copy an official description
50
+ _DESCRIPTION = """\
51
+ The People's Speech is a free-to-download 30,000-hour and growing supervised
52
+ conversational English speech recognition dataset licensed for academic and
53
+ commercial usage under CC-BY-SA (with a CC-BY subset).
54
+ """
55
+
56
+ _HOMEPAGE = "https://mlcommons.org/en/peoples-speech/"
57
+
58
+ _LICENSE = [
59
+ "cc-by-2.0", "cc-by-2.5", "cc-by-3.0", "cc-by-4.0", "cc-by-sa-2.5",
60
+ "cc-by-sa-3.0", "cc-by-sa-4.0"
61
+ ]
62
+
63
+ # TODO: Add link to the official dataset URLs here
64
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
65
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
66
+ _URLS = {
67
+ "clean-cc-by": {
68
+ "audio_tar": "",
69
+ "manifest": "",
70
+ },
71
+ "dirty-cc-by": {
72
+ "audio_tar": "",
73
+ "manifest": "",
74
+ },
75
+ "clean-cc-by-sa": {
76
+ "audio_tar": "",
77
+ "manifest": "",
78
+ },
79
+ "dirty-cc-by-sa": {
80
+ "audio_tar": "",
81
+ "manifest": "",
82
+ },
83
+ "microset": {
84
+ "audio_tar": "",
85
+ "manifest": "",
86
+ },
87
+ }
88
+
89
+ # _BASE_URL = "https://huggingface.co/datasets/MLCommons/peoples_speech/resolve/main/"
90
+
91
+ # relative path to data inside dataset's repo
92
+ _DATA_URL = "{config}/{config}_00000{archive_id}.tar"
93
+
94
+ # relative path to metadata inside dataset's repo
95
+ _MANIFEST_URL = "{config}.json"
96
+
97
+
98
+ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
99
+ """The People's Speech dataset."""
100
+
101
+ VERSION = datasets.Version("1.1.0")
102
+ BUILDER_CONFIGS = [
103
+ datasets.BuilderConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
104
+ datasets.BuilderConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
105
+ datasets.BuilderConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
106
+ datasets.BuilderConfig(name="dirty_sa", version=VERSION, description="Dirty, CC-BY-SA licensed subset."),
107
+ ]
108
+ DEFAULT_CONFIG_NAME = "clean"
109
+ DEFAULT_WRITER_BATCH_SIZE = 1
110
+
111
+ def _info(self):
112
+ return datasets.DatasetInfo(
113
+ description=_DESCRIPTION,
114
+ features=datasets.Features(
115
+ {
116
+ "id": datasets.Value("string"),
117
+ "audio": datasets.Audio(sampling_rate=16_000),
118
+ "duration_ms": datasets.Value("int32"),
119
+ "text": datasets.Value("string"),
120
+ }
121
+ ),
122
+ task_templates=[AutomaticSpeechRecognition()],
123
+ supervised_keys=("file", "text"),
124
+ homepage=_HOMEPAGE,
125
+ license="/".join(_LICENSE), # license must be a string
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+ # TODO: for demo purposes I use just first 5 archives
131
+ # TODO: this should be changed to the actual number of archives further
132
+ urls = [_DATA_URL.format(config=self.config.name, archive_id=i) for i in range(5)]
133
+ archives = [dl_manager.iter_archive(dl_manager.download(url)) for url in urls]
134
+
135
+ manifest_url = _MANIFEST_URL.format(config=self.config.name)
136
+ manifest_path = dl_manager.download_and_extract(manifest_url) # maybe just download?
137
+
138
+ return [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TRAIN,
141
+ gen_kwargs={
142
+ "archives": archives,
143
+ "manifest_path": manifest_path
144
+ },
145
+ ),
146
+ ]
147
+
148
+ def _generate_examples(self, archives, manifest_path):
149
+ meta = dict()
150
+ with open(manifest_path, "r", encoding="utf-8") as f:
151
+ for line in tqdm(f, desc="reading metadata file"):
152
+ sample_meta = json.loads(line)
153
+ _id = sample_meta["audio_document_id"]
154
+ texts = sample_meta["training_data"]["label"]
155
+ audio_filenames = sample_meta["training_data"]["name"]
156
+ durations = sample_meta["training_data"]["duration_ms"]
157
+ for audio_filename, text, duration in zip(audio_filenames, texts, durations):
158
+ meta[audio_filename] = {
159
+ "audio_document_id": _id,
160
+ "text": text,
161
+ "duration_ms": duration
162
+ }
163
+
164
+ print("generating examples")
165
+ for archive in archives:
166
+ # note that you don't need to use `tarfile` library and open tar archives manually
167
+ # dl_manager.iter_archive() does it for you :)
168
+ for audio_filename, audio_file in archive:
169
+ yield audio_filename, {
170
+ "id": audio_filename,
171
+ "audio": {"path": audio_filename, "bytes": audio_file.read()},
172
+ "text": meta[audio_filename]["text"],
173
+ "duration_ms": meta[audio_filename]["duration_ms"]
174
+ }