AymanElmar
commited on
Commit
·
7961dc7
1
Parent(s):
0636729
changed joha.py
Browse files
joha.py
CHANGED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""SQUAD: The Stanford Question Answering Dataset."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
from datasets.tasks import QuestionAnsweringExtractive
|
8 |
+
|
9 |
+
|
10 |
+
_DATA_URL = "https://huggingface.co/datasets/aymanelmar/joha/resolve/main/joha.tar.gz"
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@inproceedings{commonvoice:2020,
|
14 |
+
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
|
15 |
+
title = {Common Voice: A Massively-Multilingual Speech Corpus},
|
16 |
+
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
|
17 |
+
pages = {4211--4215},
|
18 |
+
year = 2020
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
Common Voice is Mozilla's initiative to help teach machines how real people speak.
|
24 |
+
The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages.
|
25 |
+
"""
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
class johaDataset(datasets.GeneratorBasedBuilder):
|
30 |
+
|
31 |
+
def _info(self):
|
32 |
+
|
33 |
+
|
34 |
+
features = datasets.Features(
|
35 |
+
{
|
36 |
+
"file_name": datasets.Value("string"),
|
37 |
+
"words": datasets.Value("string"),
|
38 |
+
"duration": datasets.Value("string"),
|
39 |
+
"audio": datasets.Audio(sampling_rate=48_000),
|
40 |
+
}
|
41 |
+
)
|
42 |
+
|
43 |
+
return datasets.DatasetInfo(
|
44 |
+
description=_DESCRIPTION,
|
45 |
+
features=features,
|
46 |
+
supervised_keys=None,
|
47 |
+
citation=_CITATION,
|
48 |
+
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="words")],
|
49 |
+
)
|
50 |
+
|
51 |
+
def _split_generators(self, dl_manager):
|
52 |
+
"""Returns SplitGenerators."""
|
53 |
+
# Download the TAR archive that contains the audio files:
|
54 |
+
archive_path = dl_manager.download(_DATA_URL)
|
55 |
+
|
56 |
+
# First we locate the data using the path within the archive:
|
57 |
+
path_to_data = ""
|
58 |
+
path_to_clips = "/".join([path_to_data, "data"])
|
59 |
+
metadata_filepaths = {
|
60 |
+
split: "/".join([path_to_data, f"{split}.tsv"])
|
61 |
+
for split in ["train", "test", "validation"]
|
62 |
+
}
|
63 |
+
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
|
64 |
+
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
|
65 |
+
|
66 |
+
# To access the audio data from the TAR archives using the download manager,
|
67 |
+
# we have to use the dl_manager.iter_archive method.
|
68 |
+
#
|
69 |
+
# This is because dl_manager.download_and_extract
|
70 |
+
# doesn't work to stream TAR archives in streaming mode.
|
71 |
+
# (we have to stream the files of a TAR archive one by one)
|
72 |
+
#
|
73 |
+
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
|
74 |
+
# file in the TAR archive.
|
75 |
+
|
76 |
+
return [
|
77 |
+
datasets.SplitGenerator(
|
78 |
+
name=datasets.Split.TRAIN,
|
79 |
+
gen_kwargs={
|
80 |
+
"local_extracted_archive": local_extracted_archive,
|
81 |
+
"archive_iterator": dl_manager.iter_archive(
|
82 |
+
archive_path
|
83 |
+
), # use iter_archive here to access the files in the TAR archives
|
84 |
+
"metadata_filepath": metadata_filepaths["train"],
|
85 |
+
"path_to_clips": path_to_clips + "/train",
|
86 |
+
},
|
87 |
+
),
|
88 |
+
datasets.SplitGenerator(
|
89 |
+
name=datasets.Split.TEST,
|
90 |
+
gen_kwargs={
|
91 |
+
"local_extracted_archive": local_extracted_archive,
|
92 |
+
"archive_iterator": dl_manager.iter_archive(
|
93 |
+
archive_path
|
94 |
+
), # use iter_archive here to access the files in the TAR archives
|
95 |
+
"metadata_filepath": metadata_filepaths["test"],
|
96 |
+
"path_to_clips": path_to_clips + "/test",
|
97 |
+
},
|
98 |
+
),
|
99 |
+
datasets.SplitGenerator(
|
100 |
+
name=datasets.Split.VALIDATION,
|
101 |
+
gen_kwargs={
|
102 |
+
"local_extracted_archive": local_extracted_archive,
|
103 |
+
"archive_iterator": dl_manager.iter_archive(
|
104 |
+
archive_path
|
105 |
+
), # use iter_archive here to access the files in the TAR archives
|
106 |
+
"metadata_filepath": metadata_filepaths["validation"],
|
107 |
+
"path_to_clips": path_to_clips+ "/validation",
|
108 |
+
},
|
109 |
+
),
|
110 |
+
]
|
111 |
+
|
112 |
+
def _generate_examples(self, local_extracted_archive, archive_iterator, metadata_filepath, path_to_clips):
|
113 |
+
"""Yields examples."""
|
114 |
+
data_fields = list(self._info().features.keys())
|
115 |
+
|
116 |
+
# audio is not a header of the csv files
|
117 |
+
data_fields.remove("audio")
|
118 |
+
path_idx = data_fields.index("path")
|
119 |
+
|
120 |
+
all_field_values = {}
|
121 |
+
metadata_found = False
|
122 |
+
# Here we iterate over all the files within the TAR archive:
|
123 |
+
for path, f in archive_iterator:
|
124 |
+
# Parse the metadata CSV file
|
125 |
+
if path == metadata_filepath:
|
126 |
+
metadata_found = True
|
127 |
+
lines = f.readlines()
|
128 |
+
headline = lines[0].decode("utf-8")
|
129 |
+
|
130 |
+
column_names = headline.strip().split("\t")
|
131 |
+
assert (
|
132 |
+
column_names == data_fields
|
133 |
+
), f"The file should have {data_fields} as column names, but has {column_names}"
|
134 |
+
for line in lines[1:]:
|
135 |
+
field_values = line.decode("utf-8").strip().split("\t")
|
136 |
+
# set full path for mp3 audio file
|
137 |
+
audio_path = "/".join([path_to_clips, field_values[path_idx]])
|
138 |
+
all_field_values[audio_path] = field_values
|
139 |
+
# Else, read the audio file and yield an example
|
140 |
+
elif path.startswith(path_to_clips):
|
141 |
+
assert metadata_found, "Found audio clips before the metadata TSV file."
|
142 |
+
if not all_field_values:
|
143 |
+
break
|
144 |
+
if path in all_field_values:
|
145 |
+
# retrieve the metadata corresponding to this audio file
|
146 |
+
field_values = all_field_values[path]
|
147 |
+
|
148 |
+
# if data is incomplete, fill with empty values
|
149 |
+
if len(field_values) < len(data_fields):
|
150 |
+
field_values += (len(data_fields) - len(field_values)) * ["''"]
|
151 |
+
|
152 |
+
result = {key: value for key, value in zip(data_fields, field_values)}
|
153 |
+
|
154 |
+
# set audio feature
|
155 |
+
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
|
156 |
+
result["audio"] = {"path": path, "bytes": f.read()}
|
157 |
+
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
|
158 |
+
result["path"] = path if local_extracted_archive else None
|
159 |
+
|
160 |
+
yield path, result
|