sanchit-gandhi commited on
Commit
0f0101b
·
1 Parent(s): c355214
Files changed (1) hide show
  1. common_voice_13_0.py +22 -2
common_voice_13_0.py CHANGED
@@ -50,6 +50,10 @@ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
50
 
51
  _N_SHARDS_URL = _BASE_URL + "n_shards.json"
52
 
 
 
 
 
53
 
54
  class CommonVoiceConfig(datasets.BuilderConfig):
55
  """BuilderConfig for CommonVoice."""
@@ -117,6 +121,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
117
  "locale": datasets.Value("string"),
118
  "segment": datasets.Value("string"),
119
  "variant": datasets.Value("string"),
 
120
  }
121
  )
122
 
@@ -138,6 +143,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
138
 
139
  audio_urls = {}
140
  splits = ("train", "dev", "test", "other", "invalidated")
 
141
  for split in splits:
142
  audio_urls[split] = [
143
  _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
@@ -148,13 +154,16 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
148
  meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
149
  meta_paths = dl_manager.download_and_extract(meta_urls)
150
 
 
 
 
151
  split_generators = []
152
  split_names = {
153
  "train": datasets.Split.TRAIN,
154
  "dev": datasets.Split.VALIDATION,
155
  "test": datasets.Split.TEST,
156
  }
157
- for split in splits:
158
  split_generators.append(
159
  datasets.SplitGenerator(
160
  name=split_names.get(split, split),
@@ -162,13 +171,14 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
162
  "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
163
  "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
164
  "meta_path": meta_paths[split],
 
165
  },
166
  ),
167
  )
168
 
169
  return split_generators
170
 
171
- def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
172
  data_fields = list(self._info().features.keys())
173
  metadata = {}
174
  with open(meta_path, encoding="utf-8") as f:
@@ -186,6 +196,14 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
186
  row[field] = ""
187
  metadata[row["path"]] = row
188
 
 
 
 
 
 
 
 
 
189
  for i, audio_archive in enumerate(archives):
190
  for filename, file in audio_archive:
191
  _, filename = os.path.split(filename)
@@ -196,6 +214,8 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
196
  result["audio"] = {"path": path, "bytes": file.read()}
197
  # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
198
  result["path"] = path if local_extracted_archive_paths else filename
 
199
 
200
  yield path, result
201
 
 
 
50
 
51
  _N_SHARDS_URL = _BASE_URL + "n_shards.json"
52
 
53
+ _WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/common_voice_13_0/resolve/main/transcription_data/greedy_search/"
54
+
55
+ _WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.txt"
56
+
57
 
58
  class CommonVoiceConfig(datasets.BuilderConfig):
59
  """BuilderConfig for CommonVoice."""
 
121
  "locale": datasets.Value("string"),
122
  "segment": datasets.Value("string"),
123
  "variant": datasets.Value("string"),
124
+ "whisper_transcript": datasets.Value("string"),
125
  }
126
  )
127
 
 
143
 
144
  audio_urls = {}
145
  splits = ("train", "dev", "test", "other", "invalidated")
146
+ splits_alt = ("train", "validation", "test")
147
  for split in splits:
148
  audio_urls[split] = [
149
  _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
 
154
  meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
155
  meta_paths = dl_manager.download_and_extract(meta_urls)
156
 
157
+ transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split) for split in splits_alt}
158
+ transcript_archive_path = dl_manager.download(transcription_urls)
159
+
160
  split_generators = []
161
  split_names = {
162
  "train": datasets.Split.TRAIN,
163
  "dev": datasets.Split.VALIDATION,
164
  "test": datasets.Split.TEST,
165
  }
166
+ for split, split_alt in zip(splits, splits_alt):
167
  split_generators.append(
168
  datasets.SplitGenerator(
169
  name=split_names.get(split, split),
 
171
  "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
172
  "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
173
  "meta_path": meta_paths[split],
174
+ "whisper_transcript": transcript_archive_path[split_alt],
175
  },
176
  ),
177
  )
178
 
179
  return split_generators
180
 
181
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path, whisper_transcript):
182
  data_fields = list(self._info().features.keys())
183
  metadata = {}
184
  with open(meta_path, encoding="utf-8") as f:
 
196
  row[field] = ""
197
  metadata[row["path"]] = row
198
 
199
+ whisper_transcripts = []
200
+
201
+ with open(whisper_transcript, encoding="utf-8") as f:
202
+ for row in f:
203
+ whisper_transcripts.append(row.rstrip("\n"))
204
+
205
+ idx = 0
206
+
207
  for i, audio_archive in enumerate(archives):
208
  for filename, file in audio_archive:
209
  _, filename = os.path.split(filename)
 
214
  result["audio"] = {"path": path, "bytes": file.read()}
215
  # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
216
  result["path"] = path if local_extracted_archive_paths else filename
217
+ result["whisper_transcript"] = whisper_transcripts[idx]
218
 
219
  yield path, result
220
 
221
+ idx += 1