holylovenia commited on
Commit
11707a8
·
1 Parent(s): 479aedc

Upload indspeech_news_tts.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indspeech_news_tts.py +209 -0
indspeech_news_tts.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+ _CITATION = """\
27
+ @inproceedings{sakti-tts-cocosda-2008,
28
+ title = "Development of HMM-based Indonesian Speech Synthesis",
29
+ author = "Sakti, Sakriani and Maia, Ranniery and Sakai, Shinsuke and Nakamura, Satoshi",
30
+ booktitle = "Proc. Oriental COCOSDA",
31
+ year = "2008",
32
+ pages = "215--220"
33
+ address = "Kyoto, Japan"
34
+ }
35
+
36
+ @inproceedings{sakti-tts-malindo-2010,
37
+ title = "Quality and Intelligibility Assessment of Indonesian HMM-Based Speech Synthesis System",
38
+ author = "Sakti, Sakriani and Sakai, Shinsuke and Isotani, Ryosuke and Kawai, Hisashi and Nakamura, Satoshi",
39
+ booktitle = "Proc. MALINDO",
40
+ year = "2010",
41
+ pages = "51--57"
42
+ address = "Jakarta, Indonesia"
43
+ }
44
+
45
+ @article{sakti-s2st-csl-2013,
46
+ title = "{A-STAR}: Toward Tranlating Asian Spoken Languages",
47
+ author = "Sakti, Sakriani and Paul, Michael and Finch, Andrew and Sakai, Shinsuke and Thang, Tat Vu, and Kimura, Noriyuki
48
+ and Hori, Chiori and Sumita, Eiichiro and Nakamura, Satoshi and Park, Jun and Wutiwiwatchai, Chai and Xu, Bo and Riza, Hammam
49
+ and Arora, Karunesh and Luong, Chi Mai and Li, Haizhou",
50
+ journal = "Special issue on Speech-to-Speech Translation, Computer Speech and Language Journal",
51
+ volume = "27",
52
+ number ="2",
53
+ pages = "509--527",
54
+ year = "2013",
55
+ publisher = "Elsevier"
56
+ }
57
+ """
58
+
59
+ _DATASETNAME = "INDspeech_NEWS_TTS"
60
+ _LANGUAGES = ["ind"]
61
+
62
+ _DESCRIPTION = """\
63
+ INDspeech_NEWS_TTS is a speech dataset for developing an Indonesian text-to-speech synthesis system. The data was developed by Advanced Telecommunication Research Institute International (ATR) Japan under the the Asian speech translation advanced research (A-STAR) project [Sakti et al., 2013].
64
+ """
65
+ _HOMEPAGE = "https://github.com/s-sakti/data_indsp_news_tts"
66
+
67
+ _LICENSE = "CC-BY-NC-SA 4.0"
68
+
69
+ _TRAIN_TASKS = {"120": "Orig_trainset_120min.lst", "60": "Orig_trainset_60min.lst", "30": "Orig_trainset_30min.lst", "12": "Orig_trainset_12min.lst", "ZR": "ZRChallenge_trainset.lst"}
70
+
71
+ _TEST_TASKS = {
72
+ "MOS": "Orig_testset_MOS.lst",
73
+ # "SUS": "Orig_testset_SUS.lst",
74
+ "ZR": "ZRChallenge_testset.lst",
75
+ }
76
+
77
+ _URLS = {"lst_": "https://github.com/s-sakti/data_indsp_news_tts/raw/main/lst/", "speech_": "https://github.com/s-sakti/data_indsp_news_tts/raw/main/speech/", "text_": "https://github.com/s-sakti/data_indsp_news_tts/raw/main/text/orig_transcript"}
78
+
79
+ _SUPPORTED_TASKS = [Tasks.TEXT_TO_SPEECH]
80
+
81
+ _SOURCE_VERSION = "1.0.0"
82
+
83
+ _NUSANTARA_VERSION = "1.0.0"
84
+ _LOCAL = False
85
+
86
+
87
+ def nusantara_config_constructor(schema, version, train_task, test_task):
88
+
89
+ if schema != "source" and schema != "nusantara_sptext":
90
+ raise ValueError(f"Invalid schema: {schema}")
91
+
92
+ return NusantaraConfig(
93
+ name="indspeech_news_tts_{tr_task}_{ts_task}_{schema}".format(schema=schema, tr_task=train_task, ts_task=test_task),
94
+ version=datasets.Version(version),
95
+ description="indspeech_news_tts {schema} schema for {tr_task} train and {ts_task} test task".format(schema=schema, tr_task=train_task, ts_task=test_task),
96
+ schema=schema,
97
+ subset_id="indspeech_news_tts_{tr_task}_{ts_task}".format(tr_task=train_task, ts_task=test_task),
98
+ )
99
+
100
+
101
+ class INDspeechNEWSTTS(datasets.GeneratorBasedBuilder):
102
+ """
103
+ Tasks:
104
+ Original = Train [120, 60, 30, 12], Test [MOS, SUS]
105
+ ZR = Train, Test
106
+ """
107
+
108
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
109
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
110
+
111
+ BUILDER_CONFIGS = (
112
+ [nusantara_config_constructor("source", _SOURCE_VERSION, train, test) for train in ["12", "30", "60", "120"] for test in ["MOS"]]
113
+ + [nusantara_config_constructor("nusantara_sptext", _NUSANTARA_VERSION, train, test) for train in ["12", "30", "60", "120"] for test in ["MOS"]]
114
+ + [nusantara_config_constructor("source", _SOURCE_VERSION, "ZR", "ZR")]
115
+ + [nusantara_config_constructor("nusantara_sptext", _NUSANTARA_VERSION, "ZR", "ZR")]
116
+ )
117
+
118
+ DEFAULT_CONFIG_NAME = "indspeech_news_tts_120_MOS_source"
119
+
120
+ def _info(self) -> datasets.DatasetInfo:
121
+ if self.config.schema == "source":
122
+ features = datasets.Features(
123
+ {
124
+ "id": datasets.Value("string"),
125
+ "speaker_id": datasets.Value("string"),
126
+ "path": datasets.Value("string"),
127
+ "audio": datasets.Audio(sampling_rate=16_000),
128
+ "text": datasets.Value("string"),
129
+ "gender": datasets.Value("string"),
130
+ }
131
+ )
132
+ elif self.config.schema == "nusantara_sptext":
133
+ features = schemas.speech_text_features
134
+
135
+ return datasets.DatasetInfo(
136
+ description=_DESCRIPTION,
137
+ features=features,
138
+ homepage=_HOMEPAGE,
139
+ license=_LICENSE,
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
144
+ """Returns SplitGenerators."""
145
+ tr_task = self.config.name.split("_")[3] # [12,30,60,120,ZR]
146
+ ts_task = self.config.name.split("_")[4] # [MOS, ZR]
147
+
148
+ lst_train_dir = Path(dl_manager.download_and_extract(_URLS["lst_"] + _TRAIN_TASKS[tr_task]))
149
+ lst_test_dir = Path(dl_manager.download_and_extract(_URLS["lst_"] + _TEST_TASKS[ts_task]))
150
+ txt_dir = Path(dl_manager.download_and_extract(_URLS["text_"]))
151
+ speech_dir = {"SPK00_" + str(spk).zfill(2) + "00": Path(dl_manager.download_and_extract(_URLS["speech_"] + "SPK00_" + str(spk).zfill(2) + "00.zip") + "/SPK00_" + str(spk).zfill(2) + "00") for spk in range(0, 21)}
152
+ # print(os.listdir(speech_dir["SPK00_1500"]))
153
+
154
+ return [
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TRAIN,
157
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
158
+ gen_kwargs={
159
+ "filepath": {"samples": lst_train_dir, "text": txt_dir, "speech": speech_dir},
160
+ "split": "train",
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TEST,
165
+ gen_kwargs={
166
+ "filepath": {"samples": lst_test_dir, "text": txt_dir, "speech": speech_dir},
167
+ "split": "test",
168
+ },
169
+ ),
170
+ ]
171
+
172
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
173
+ """Yields examples as (key, example) tuples."""
174
+
175
+ samples = open(filepath["samples"], "r").read().splitlines()
176
+
177
+ transcripts = {}
178
+ with open(filepath["text"]) as file:
179
+ for line in file:
180
+ key, text = line.replace("\n", "").split("\t")
181
+ transcripts[key] = text
182
+
183
+ for key, id in enumerate(samples):
184
+ spk_id, gender, speech_id = id.split("_")
185
+ spk_group = speech_id[:2]
186
+
187
+ if self.config.schema == "source":
188
+ example = {
189
+ "id": id,
190
+ "speaker_id": spk_id,
191
+ "path": os.path.join(filepath["speech"]["SPK00_" + spk_group + "00"], id + ".wav"),
192
+ "audio": os.path.join(filepath["speech"]["SPK00_" + spk_group + "00"], id + ".wav"),
193
+ "text": transcripts[id],
194
+ "gender": gender,
195
+ }
196
+ yield key, example
197
+ elif self.config.schema == "nusantara_sptext":
198
+ example = {
199
+ "id": str(id),
200
+ "speaker_id": spk_id,
201
+ "path": os.path.join(filepath["speech"]["SPK00_" + spk_group + "00"], id + ".wav"),
202
+ "audio": os.path.join(filepath["speech"]["SPK00_" + spk_group + "00"], id + ".wav"),
203
+ "text": transcripts[id],
204
+ "metadata": {
205
+ "speaker_age": None,
206
+ "speaker_gender": gender,
207
+ },
208
+ }
209
+ yield key, example