Commit
·
7dbcdde
1
Parent(s):
dfa5b2d
file ids - transcriptions
Browse files- common_voice_13_0.py +9 -11
common_voice_13_0.py
CHANGED
@@ -50,9 +50,9 @@ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
|
|
50 |
|
51 |
_N_SHARDS_URL = _BASE_URL + "n_shards.json"
|
52 |
|
53 |
-
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/
|
54 |
|
55 |
-
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.
|
56 |
|
57 |
|
58 |
class CommonVoiceConfig(datasets.BuilderConfig):
|
@@ -110,6 +110,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
110 |
features = datasets.Features(
|
111 |
{
|
112 |
"client_id": datasets.Value("string"),
|
|
|
113 |
"id": datasets.Value("string"),
|
114 |
"audio": datasets.features.Audio(sampling_rate=48_000),
|
115 |
"text": datasets.Value("string"),
|
@@ -196,13 +197,11 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
196 |
row[field] = ""
|
197 |
metadata[row["path"]] = row
|
198 |
|
199 |
-
|
200 |
-
|
201 |
with open(whisper_transcript, encoding="utf-8") as f:
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
idx = 0
|
206 |
|
207 |
for i, audio_archive in enumerate(archives):
|
208 |
for filename, file in audio_archive:
|
@@ -213,9 +212,8 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
213 |
# set the audio feature and the path to the extracted file
|
214 |
path = os.path.join(local_extracted_archive_paths[i], filename) if local_extracted_archive_paths else filename
|
215 |
result["audio"] = {"path": path, "bytes": file.read()}
|
|
|
216 |
result["id"] = filename
|
217 |
-
result["whisper_transcript"] =
|
218 |
|
219 |
yield path, result
|
220 |
-
|
221 |
-
idx += 1
|
|
|
50 |
|
51 |
_N_SHARDS_URL = _BASE_URL + "n_shards.json"
|
52 |
|
53 |
+
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/common_voice_13_0"
|
54 |
|
55 |
+
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv"
|
56 |
|
57 |
|
58 |
class CommonVoiceConfig(datasets.BuilderConfig):
|
|
|
110 |
features = datasets.Features(
|
111 |
{
|
112 |
"client_id": datasets.Value("string"),
|
113 |
+
"path": datasets.Value("string"),
|
114 |
"id": datasets.Value("string"),
|
115 |
"audio": datasets.features.Audio(sampling_rate=48_000),
|
116 |
"text": datasets.Value("string"),
|
|
|
197 |
row[field] = ""
|
198 |
metadata[row["path"]] = row
|
199 |
|
200 |
+
whisper_transcriptions = dict()
|
|
|
201 |
with open(whisper_transcript, encoding="utf-8") as f:
|
202 |
+
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
|
203 |
+
for line in tqdm(reader, desc="Reading transcriptions..."):
|
204 |
+
whisper_transcriptions[line["file_id"]] = line["whisper_transcript"]
|
|
|
205 |
|
206 |
for i, audio_archive in enumerate(archives):
|
207 |
for filename, file in audio_archive:
|
|
|
212 |
# set the audio feature and the path to the extracted file
|
213 |
path = os.path.join(local_extracted_archive_paths[i], filename) if local_extracted_archive_paths else filename
|
214 |
result["audio"] = {"path": path, "bytes": file.read()}
|
215 |
+
result["path"] = path
|
216 |
result["id"] = filename
|
217 |
+
result["whisper_transcript"] = whisper_transcriptions.get(filename, None)
|
218 |
|
219 |
yield path, result
|
|
|
|