File size: 6,731 Bytes
7961dc7 5b690bf 7961dc7 ea4d195 7961dc7 f5b1e80 2425833 7961dc7 2a00bca 7961dc7 8cd1c6e 7961dc7 2425833 7961dc7 e98b918 7961dc7 2425833 7961dc7 e98b918 7961dc7 2425833 7961dc7 8cd1c6e 7961dc7 dce426d 7961dc7 719c04c 7961dc7 edc155b 6aeabcc 44ceed4 8cd1c6e fe7c7b6 1b9f6f1 07281ec 1b9f6f1 6aeabcc 07281ec 6aeabcc 07281ec 6aeabcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
"""SQUAD: The Stanford Question Answering Dataset."""
import os
import json
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_DATA_URL = "https://huggingface.co/datasets/aymanelmar/joha/resolve/main/joha.tar.gz"
_CITATION = """\
@inproceedings{commonvoice:2020,
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
title = {Common Voice: A Massively-Multilingual Speech Corpus},
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {4211--4215},
year = 2020
}
"""
_DESCRIPTION = """\
Common Voice is Mozilla's initiative to help teach machines how real people speak.
The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages.
"""
class johaDataset(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"file_name": datasets.Value("string"),
"words": datasets.Value("string"),
"duration": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=48_000),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_file_path_column="audio", transcription_column="words")],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the TAR archive that contains the audio files:
archive_path = dl_manager.download(_DATA_URL)
# First we locate the data using the path within the archive:
path_to_data = "."
path_to_clips = path_to_data
metadata_filepaths = {
split: "/".join([path_to_data, f"{split}.tsv"])
for split in ["train", "test", "validation"]
}
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path)
# To access the audio data from the TAR archives using the download manager,
# we have to use the dl_manager.iter_archive method.
#
# This is because dl_manager.download_and_extract
# doesn't work to stream TAR archives in streaming mode.
# (we have to stream the files of a TAR archive one by one)
#
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
# file in the TAR archive.
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
),
"archive_iterator2": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"metadata_filepath": metadata_filepaths["train"],
"path_to_clips": path_to_clips ,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"archive_iterator2": dl_manager.iter_archive(
archive_path
),
"metadata_filepath": metadata_filepaths["test"],
"path_to_clips": path_to_clips ,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"archive_iterator2": dl_manager.iter_archive(
archive_path
),
"metadata_filepath": metadata_filepaths["validation"],
"path_to_clips": path_to_clips,
},
),
]
def _generate_examples(self, local_extracted_archive, archive_iterator, archive_iterator2, metadata_filepath, path_to_clips):
"""Yields examples."""
data_fields = list(self._info().features.keys())
# audio is not a header of the csv files
data_fields.remove("audio")
path_idx = data_fields.index("file_name")
all_field_values = {}
metadata_found = False
print("metadata_filepath", metadata_filepath)
# Here we iterate over all the files within the TAR archive:
for path, f in archive_iterator:
# Parse the metadata CSV file
if path == metadata_filepath:
metadata_found = True
lines = f.readlines()
headline = lines[0].decode("utf-8")
column_names = headline.strip().split("\t")
assert (
column_names == data_fields
), f"The file should have {data_fields} as column names, but has {column_names}"
for line in lines[1:]:
field_values = line.decode("utf-8").strip().split("\t")
# set full path for mp3 audio file
audio_path = "/".join([path_to_clips,field_values[path_idx]])
all_field_values[audio_path] = field_values
break
for path, f in archive_iterator2:
print("path", path)
if path in all_field_values:
# retrieve the metadata corresponding to this audio file
field_values = all_field_values[path]
result = {key: value for key, value in zip(data_fields, field_values)}
result["audio"] = {"bytes": f.read()}
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
file_name=path
yield file_name, result |