File size: 4,176 Bytes
789df26 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# This script for Hugging Face's datasets library was written by Théo Gigant
import csv
import json
import os
from pathlib import Path
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
This corpus consists of approximately 22 hours of speech recordings. Transcripts are provided for all the recordings. The corpus can be divided into 3 parts:
1. Yaounde
Collected by a team from the U.S. Military Academy's Center for Technology Enhanced Language Learning (CTELL) in 2003 in Yaoundé, Cameroon. It has recordings from 84 speakers, 48 male and 36 female.
2. CA16
This part was collected by a RDECOM Science Team who participated in the United Nations exercise Central Accord 16 (CA16) in Libreville, Gabon in June 2016. The Science Team included DARPA's Dr. Boyan Onyshkevich and Dr. Aaron Lawson (SRI International), as well as RDECOM scientists. It has recordings from 125 speakers from Cameroon, Chad, Congo and Gabon.
3. Niger
This part was collected from 23 speakers in Niamey, Niger, Oct. 26-30 2015. These speakers were students in a course for officers and sergeants presented by Army trainers assigned to U.S. Army Africa. The data was collected by RDECOM Science & Technology Advisors Major Eddie Strimel and Mr. Bill Bergen.
"""
_HOMEPAGE = "http://www.openslr.org/57/"
_LICENSE = ""
_URLS = {
"fr": "https://www.openslr.org/resources/57/African_Accented_French.tar.gz",
}
class AfricanAccentedFrench(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="fr", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "fr"
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datapath": data_dir,
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datapath": data_dir,
"split": "test"
},
),
]
def _generate_examples(self, datapath, split):
key = 0
files = {}
for split_name in ["train"] if split=="train" else ["test", "devtest", "dev"]:
for speaker in os.listdir(os.path.join(datapath, "African_Accented_French", "transcripts", split_name)):
for meta in os.listdir(os.path.join(datapath,"African_Accented_French", "transcripts", split_name, speaker)):
with open(os.path.join(datapath,"African_Accented_French", "transcripts", split_name, speaker, meta), 'r') as transcript:
for line in transcript.readlines():
line = line.split(maxsplit=1)
if "answers" not in line[0]:
filename = line[0].split("/")[-1]
if ".tdf" in filename or ".wav" in filename:
filename = f"{filename[:-4]}.wav"
else :
filename = f"{filename}.wav"
files[filename]= line[1]
for f in Path(os.path.join(datapath, "African_Accented_French")).rglob("*.wav"):
if f.name in files.keys():
yield key, {
"sentence": files[f.name],
"audio": f.absolute().as_posix()
}
key += 1
|