adding dataset config
Browse files- multispeaker-tt-sinhala.py +105 -0
multispeaker-tt-sinhala.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gzip
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
import os
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
logger = datasets.logging.get_logger(__name__)
|
8 |
+
_DESCRIPTION = """\
|
9 |
+
This data set contains multi-speaker high quality transcribed audio data for Sinhalese. The data set consists of wave files, and a TSV file.
|
10 |
+
The file si_lk.lines.txt contains a FileID, which in tern contains the UserID and the Transcription of audio in the file.
|
11 |
+
The data set has been manually quality checked, but there might still be errors.
|
12 |
+
|
13 |
+
This dataset was collected by Google in Sri Lanka.
|
14 |
+
"""
|
15 |
+
_CITATION = """
|
16 |
+
@inproceedings{kjartansson-etal-tts-sltu2018,
|
17 |
+
title = {{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Framework for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
|
18 |
+
author = {Keshan Sodimana and Knot Pipatsrisawat and Linne Ha and Martin Jansche and Oddur Kjartansson and Pasindu De Silva and Supheakmungkol Sarin},
|
19 |
+
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
|
20 |
+
year = {2018},
|
21 |
+
address = {Gurugram, India},
|
22 |
+
month = aug,
|
23 |
+
pages = {66--70},
|
24 |
+
URL = {http://dx.doi.org/10.21437/SLTU.2018-14}
|
25 |
+
}
|
26 |
+
"""
|
27 |
+
_URL = "https://www.openslr.org/30/"
|
28 |
+
_DATA_URL = "https://www.openslr.org/resources/30/si_lk.tar.gz"
|
29 |
+
_LICENSE = "https://www.openslr.org/resources/30/LICENSE.txt"
|
30 |
+
_LANGUAGES = [
|
31 |
+
"si",
|
32 |
+
]
|
33 |
+
|
34 |
+
|
35 |
+
class SiTTSConfig(datasets.BuilderConfig):
|
36 |
+
"""BuilderConfig for SiTTS."""
|
37 |
+
|
38 |
+
def __init__(self, *args, languages, **kwargs):
|
39 |
+
"""BuilderConfig for SiTTS.
|
40 |
+
Args:
|
41 |
+
languages (:obj:`List[str]`): list of languages to load
|
42 |
+
**kwargs: keyword arguments forwarded to super.
|
43 |
+
"""
|
44 |
+
super().__init__(
|
45 |
+
*args, name="+".join(languages), **kwargs,
|
46 |
+
)
|
47 |
+
self.languages = languages
|
48 |
+
|
49 |
+
|
50 |
+
class SiTTS(datasets.GeneratorBasedBuilder):
|
51 |
+
"""SiTTS, a manually quality checked, Sinhala multi-speaker TTS corpora."""
|
52 |
+
|
53 |
+
BUILDER_CONFIGS = [SiTTSConfig(languages=[lang]) for lang in _LANGUAGES]
|
54 |
+
BUILDER_CONFIG_CLASS = SiTTSConfig
|
55 |
+
|
56 |
+
def _info(self):
|
57 |
+
return datasets.DatasetInfo(
|
58 |
+
description=_DESCRIPTION,
|
59 |
+
features=datasets.Features(
|
60 |
+
{
|
61 |
+
"sentence": datasets.Value("string"),
|
62 |
+
"file_path": datasets.Value("string"),
|
63 |
+
}
|
64 |
+
),
|
65 |
+
supervised_keys=None,
|
66 |
+
homepage=_URL,
|
67 |
+
citation=_CITATION,
|
68 |
+
license=_LICENSE,
|
69 |
+
)
|
70 |
+
|
71 |
+
def _split_generators(self, dl_manager):
|
72 |
+
abs_path_to_clips = dl_manager.download_and_extract(_DATA_URL)
|
73 |
+
abs_path_to_data = dl_manager.download(f"{_URL}si_lk.lines.txt")
|
74 |
+
return [
|
75 |
+
datasets.SplitGenerator(
|
76 |
+
name=datasets.Split.TRAIN,
|
77 |
+
gen_kwargs={
|
78 |
+
"filepath": os.path.join(abs_path_to_data, "si_lk.lines.txt"),
|
79 |
+
"path_to_clips": abs_path_to_clips,
|
80 |
+
},
|
81 |
+
),
|
82 |
+
]
|
83 |
+
|
84 |
+
def _generate_examples(self, filepath, path_to_clips):
|
85 |
+
data_fields = list(self._info().features.keys())
|
86 |
+
path_idx = data_fields.index("file_path")
|
87 |
+
|
88 |
+
with open(filepath, encoding="utf-8") as f:
|
89 |
+
lines = f.readlines()
|
90 |
+
|
91 |
+
for id_, line in enumerate(lines):
|
92 |
+
sentence = re.findall(r'"(.*?)"', line)[0].strip()
|
93 |
+
file_path = f'{re.findall(r"(sin_[^\s]+)", line)[0]}.wav'
|
94 |
+
field_values = [sentence, file_path]
|
95 |
+
|
96 |
+
# set absolute path for wav audio file
|
97 |
+
field_values[path_idx] = os.path.join(
|
98 |
+
path_to_clips, field_values[path_idx]
|
99 |
+
)
|
100 |
+
|
101 |
+
# if data is incomplete, fill with empty values
|
102 |
+
if len(field_values) < len(data_fields):
|
103 |
+
field_values += (len(data_fields) - len(field_values)) * ["''"]
|
104 |
+
|
105 |
+
yield id_, {key: value for key, value in zip(data_fields, field_values)}
|