eustlb HF staff commited on
Commit
5202c5c
1 Parent(s): 9edcd32

Delete peoples_speech.py

Browse files
Files changed (1) hide show
  1. peoples_speech.py +0 -225
peoples_speech.py DELETED
@@ -1,225 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import os
17
-
18
- import datasets
19
- from tqdm.auto import tqdm
20
-
21
-
22
- # Find for instance the citation on arxiv or on the dataset repo/website
23
- _CITATION = """\
24
- @article{DBLP:journals/corr/abs-2111-09344,
25
- author = {Daniel Galvez and
26
- Greg Diamos and
27
- Juan Ciro and
28
- Juan Felipe Ceron and
29
- Keith Achorn and
30
- Anjali Gopi and
31
- David Kanter and
32
- Maximilian Lam and
33
- Mark Mazumder and
34
- Vijay Janapa Reddi},
35
- title = {The People's Speech: A Large-Scale Diverse English Speech Recognition
36
- Dataset for Commercial Usage},
37
- journal = {CoRR},
38
- volume = {abs/2111.09344},
39
- year = {2021},
40
- url = {https://arxiv.org/abs/2111.09344},
41
- eprinttype = {arXiv},
42
- eprint = {2111.09344},
43
- timestamp = {Mon, 22 Nov 2021 16:44:07 +0100},
44
- biburl = {https://dblp.org/rec/journals/corr/abs-2111-09344.bib},
45
- bibsource = {dblp computer science bibliography, https://dblp.org}
46
- }
47
- """
48
-
49
- # You can copy an official description
50
- _DESCRIPTION = """\
51
- The People's Speech is a free-to-download 30,000-hour and growing supervised
52
- conversational English speech recognition dataset licensed for academic and
53
- commercial usage under CC-BY-SA (with a CC-BY subset).
54
- """
55
-
56
- _HOMEPAGE = "https://mlcommons.org/en/peoples-speech/"
57
-
58
- _LICENSE = [
59
- "cc-by-2.0", "cc-by-2.5", "cc-by-3.0", "cc-by-4.0", "cc-by-sa-2.5",
60
- "cc-by-sa-3.0", "cc-by-sa-4.0"
61
- ]
62
-
63
- _BASE_URL = "https://huggingface.co/datasets/MLCommons/peoples_speech/resolve/main/"
64
-
65
- # relative path to data inside dataset's repo
66
- _DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar"
67
-
68
- # relative path to file containing number of audio archives inside dataset's repo
69
- _N_SHARDS_URL = _BASE_URL + "n_shards.json"
70
-
71
- # relative path to metadata inside dataset's repo
72
- _MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
73
-
74
-
75
- class PeoplesSpeechConfig(datasets.BuilderConfig):
76
-
77
- def __init__(self, *args, **kwargs):
78
- super().__init__(*args, **kwargs)
79
-
80
-
81
- class PeoplesSpeech(datasets.GeneratorBasedBuilder):
82
- """The People's Speech dataset."""
83
-
84
- VERSION = datasets.Version("1.1.0")
85
- BUILDER_CONFIGS = [
86
- PeoplesSpeechConfig(name="microset", version=VERSION, description="Small subset of clean data for example pusposes."),
87
- PeoplesSpeechConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
88
- PeoplesSpeechConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
89
- PeoplesSpeechConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
90
- PeoplesSpeechConfig(name="dirty_sa", version=VERSION, description="Dirty, CC-BY-SA licensed subset."),
91
- PeoplesSpeechConfig(name="test", version=VERSION, description="Only test data."),
92
- PeoplesSpeechConfig(name="validation", version=VERSION, description="Only validation data."),
93
- ]
94
- DEFAULT_CONFIG_NAME = "clean"
95
- DEFAULT_WRITER_BATCH_SIZE = 512
96
-
97
- def _info(self):
98
- return datasets.DatasetInfo(
99
- description=_DESCRIPTION,
100
- features=datasets.Features(
101
- {
102
- "id": datasets.Value("string"),
103
- "audio": datasets.Audio(),
104
- "duration_ms": datasets.Value("int32"),
105
- "text": datasets.Value("string"),
106
- }
107
- ),
108
- homepage=_HOMEPAGE,
109
- license="/".join(_LICENSE), # license must be a string
110
- citation=_CITATION,
111
- )
112
-
113
- def _split_generators(self, dl_manager):
114
-
115
- if self.config.name == "microset":
116
- # take only first data archive for demo purposes
117
- url = [_DATA_URL.format(split="train", config="clean", archive_id=0)]
118
- archive_path = dl_manager.download(url)
119
- local_extracted_archive_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else [None]
120
- manifest_url = _MANIFEST_URL.format(split="train", config="clean_000000") # train/clean_000000.json
121
- manifest_path = dl_manager.download_and_extract(manifest_url)
122
-
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- gen_kwargs={
127
- "local_extracted_archive_paths": local_extracted_archive_path,
128
- # use iter_archive here to access the files in the TAR archives:
129
- "archives": [dl_manager.iter_archive(path) for path in archive_path],
130
- "manifest_path": manifest_path,
131
- },
132
- ),
133
- ]
134
-
135
- n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
136
- with open(n_shards_path, encoding="utf-8") as f:
137
- n_shards = json.load(f)
138
-
139
- if self.config.name in ["validation", "test"]:
140
- splits_to_configs = {self.config.name: self.config.name}
141
- else:
142
- splits_to_configs = {
143
- "train": self.config.name,
144
- "validation": "validation",
145
- "test": "test"
146
- }
147
-
148
- audio_urls = {
149
- split: [
150
- _DATA_URL.format(split=split, config=config, archive_id=i) for i in range(n_shards[split][config])
151
- ] for split, config in splits_to_configs.items()
152
- }
153
- audio_archive_paths = dl_manager.download(audio_urls)
154
-
155
- # In non-streaming mode, we extract the archives to have the data locally:
156
- local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \
157
- if not dl_manager.is_streaming else \
158
- {split: [None] * len(audio_archive_paths[split]) for split in splits_to_configs}
159
-
160
- manifest_urls = {
161
- split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items()
162
- }
163
- manifest_paths = dl_manager.download_and_extract(manifest_urls)
164
-
165
- # To access the audio data from the TAR archives using the download manager,
166
- # we have to use the dl_manager.iter_archive method
167
- #
168
- # This is because dl_manager.download_and_extract
169
- # doesn't work to stream TAR archives in streaming mode.
170
- # (we have to stream the files of a TAR archive one by one)
171
- #
172
- # The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
173
- # file in a TAR archive.
174
- splits_to_names = {
175
- "train": datasets.Split.TRAIN,
176
- "validation": datasets.Split.VALIDATION,
177
- "test": datasets.Split.TEST,
178
- }
179
- split_generators = []
180
- for split in splits_to_configs:
181
- split_generators.append(
182
- datasets.SplitGenerator(
183
- name=splits_to_names[split],
184
- gen_kwargs={
185
- "local_extracted_archive_paths": local_extracted_archive_paths[split],
186
- # use iter_archive here to access the files in the TAR archives:
187
- "archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]],
188
- "manifest_path": manifest_paths[split],
189
- }
190
- )
191
- )
192
-
193
- return split_generators
194
-
195
- def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
196
- meta = dict()
197
- with open(manifest_path, "r", encoding="utf-8") as f:
198
- for line in tqdm(f, desc="reading metadata file"):
199
- sample_meta = json.loads(line)
200
- _id = sample_meta["audio_document_id"]
201
- texts = sample_meta["training_data"]["label"]
202
- audio_filenames = sample_meta["training_data"]["name"]
203
- durations = sample_meta["training_data"]["duration_ms"]
204
- for audio_filename, text, duration in zip(audio_filenames, texts, durations):
205
- audio_filename = audio_filename.lstrip("./")
206
- meta[audio_filename] = {
207
- "audio_document_id": _id,
208
- "text": text,
209
- "duration_ms": duration
210
- }
211
-
212
- for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
213
- # Here we iterate over all the files within the TAR archive:
214
- for audio_filename, audio_file in archive:
215
- audio_filename = audio_filename.lstrip("./")
216
- # if an audio file exists locally (i.e. in default, non-streaming mode) set the full path to it
217
- # joining path to directory that the archive was extracted to and audio filename.
218
- path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path \
219
- else audio_filename
220
- yield audio_filename, {
221
- "id": audio_filename,
222
- "audio": {"path": path, "bytes": audio_file.read()},
223
- "text": meta[audio_filename]["text"],
224
- "duration_ms": meta[audio_filename]["duration_ms"]
225
- }