Create mustc.py
Browse files
mustc.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
|
3 |
+
import csv
|
4 |
+
import os
|
5 |
+
import yaml
|
6 |
+
from itertools import groupby
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
import torchaudio
|
10 |
+
|
11 |
+
import datasets
|
12 |
+
|
13 |
+
|
14 |
+
_VERSION = "3.0.0"
|
15 |
+
|
16 |
+
_CITATION = """
|
17 |
+
@article{CATTONI2021101155,
|
18 |
+
title = {MuST-C: A multilingual corpus for end-to-end speech translation},
|
19 |
+
author = {Roldano Cattoni and Mattia Antonino {Di Gangi} and Luisa Bentivogli and Matteo Negri and Marco Turchi},
|
20 |
+
journal = {Computer Speech & Language},
|
21 |
+
volume = {66},
|
22 |
+
pages = {101155},
|
23 |
+
year = {2021},
|
24 |
+
issn = {0885-2308},
|
25 |
+
doi = {https://doi.org/10.1016/j.csl.2020.101155},
|
26 |
+
url = {https://www.sciencedirect.com/science/article/pii/S0885230820300887},
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """
|
31 |
+
MuST-C is a multilingual speech translation corpus whose size and quality facilitates
|
32 |
+
the training of end-to-end systems for speech translation from English into several languages.
|
33 |
+
For each target language, MuST-C comprises several hundred hours of audio recordings
|
34 |
+
from English [TED Talks](https://www.ted.com/talks), which are automatically aligned
|
35 |
+
at the sentence level with their manual transcriptions and translations.
|
36 |
+
"""
|
37 |
+
|
38 |
+
_HOMEPAGE = "https://ict.fbk.eu/must-c/"
|
39 |
+
|
40 |
+
_LANGUAGES = ["de", "ja", "zh"]
|
41 |
+
|
42 |
+
_SAMPLE_RATE = 16_000
|
43 |
+
|
44 |
+
|
45 |
+
class MUSTC(datasets.GeneratorBasedBuilder):
|
46 |
+
"""MUSTC Dataset."""
|
47 |
+
|
48 |
+
VERSION = datasets.Version(_VERSION)
|
49 |
+
|
50 |
+
BUILDER_CONFIGS = [
|
51 |
+
datasets.BuilderConfig(name=f"en-{lang}", version=datasets.Version(_VERSION)) for lang in _LANGUAGES
|
52 |
+
]
|
53 |
+
|
54 |
+
@property
|
55 |
+
def manual_download_instructions(self):
|
56 |
+
return f"""Please download the MUST-C v3 from https://ict.fbk.eu/must-c/
|
57 |
+
and unpack it with `tar xvzf MUSTC_v3.0_{self.config.name}.tar.gz`.
|
58 |
+
Make sure to pass the path to the directory in which you unpacked the downloaded
|
59 |
+
file as `data_dir`: `datasets.load_dataset('mustc', data_dir="path/to/dir")`
|
60 |
+
"""
|
61 |
+
|
62 |
+
# MUSTC_ROOT # <- point here in --data_dir in arg
|
63 |
+
# βββ en-de
|
64 |
+
# βββ data
|
65 |
+
# βββ dev
|
66 |
+
# β βββ txt
|
67 |
+
# β β βββ dev.de
|
68 |
+
# β β βββ dev.en
|
69 |
+
# β β βββ dev.yaml
|
70 |
+
# β βββ wav
|
71 |
+
# β βββ ted_767.wav
|
72 |
+
# β βββ [...]
|
73 |
+
# β βββ ted_837.wav
|
74 |
+
# βββ train
|
75 |
+
# β βββ txt/
|
76 |
+
# β βββ wav/
|
77 |
+
# βββ tst-COMMON
|
78 |
+
# β βββ txt/
|
79 |
+
# β βββ wav/
|
80 |
+
# βββ tst-HE
|
81 |
+
# βββ txt/
|
82 |
+
# βββ wav/
|
83 |
+
|
84 |
+
def _info(self):
|
85 |
+
return datasets.DatasetInfo(
|
86 |
+
description=_DESCRIPTION,
|
87 |
+
features=datasets.Features(
|
88 |
+
client_id=datasets.Value("string"),
|
89 |
+
file=datasets.Value("string"),
|
90 |
+
audio=datasets.Audio(sampling_rate=_SAMPLE_RATE),
|
91 |
+
sentence=datasets.Value("string"),
|
92 |
+
translation=datasets.Value("string"),
|
93 |
+
id=datasets.Value("string"),
|
94 |
+
),
|
95 |
+
supervised_keys=("file", "translation"),
|
96 |
+
homepage=_HOMEPAGE,
|
97 |
+
citation=_CITATION,
|
98 |
+
)
|
99 |
+
|
100 |
+
def _split_generators(self, dl_manager):
|
101 |
+
source_lang, target_lang = self.config.name.split("-")
|
102 |
+
assert source_lang == "en"
|
103 |
+
assert target_lang in _LANGUAGES
|
104 |
+
|
105 |
+
data_root = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
106 |
+
root_path = Path(data_root) / self.config.name
|
107 |
+
|
108 |
+
if not os.path.exists(root_path):
|
109 |
+
raise FileNotFoundError(
|
110 |
+
"Dataset not found. Manual download required. "
|
111 |
+
f"{self.manual_download_instructions}"
|
112 |
+
)
|
113 |
+
|
114 |
+
return [
|
115 |
+
datasets.SplitGenerator(
|
116 |
+
name=datasets.Split.TRAIN,
|
117 |
+
gen_kwargs={"root_path": root_path, "split": "train"},
|
118 |
+
),
|
119 |
+
datasets.SplitGenerator(
|
120 |
+
name=datasets.Split.VALIDATION,
|
121 |
+
gen_kwargs={"root_path": root_path, "split": "dev"},
|
122 |
+
),
|
123 |
+
datasets.SplitGenerator(
|
124 |
+
name=datasets.Split("tst.COMMON"),
|
125 |
+
gen_kwargs={"root_path": root_path, "split": "tst-COMMON"},
|
126 |
+
),
|
127 |
+
datasets.SplitGenerator(
|
128 |
+
name=datasets.Split("tst.HE"),
|
129 |
+
gen_kwargs={"root_path": root_path, "split": "tst-HE"},
|
130 |
+
),
|
131 |
+
]
|
132 |
+
|
133 |
+
def _generate_examples(self, root_path, split):
|
134 |
+
source_lang, target_lang = self.config.name.split("-")
|
135 |
+
|
136 |
+
# Load audio segments
|
137 |
+
txt_root = Path(root_path) / "data" / split / "txt"
|
138 |
+
with (txt_root / f"{split}.yaml").open("r") as f:
|
139 |
+
segments = yaml.load(f, Loader=yaml.BaseLoader)
|
140 |
+
|
141 |
+
# Load source and target utterances
|
142 |
+
with open(txt_root / f"{split}.{source_lang}", "r") as s_f:
|
143 |
+
with open(txt_root / f"{split}.{target_lang}", "r") as t_f:
|
144 |
+
s_lines = s_f.readlines()
|
145 |
+
t_lines = t_f.readlines()
|
146 |
+
assert len(s_lines) == len(t_lines) == len(segments)
|
147 |
+
for i, (src, trg) in enumerate(zip(s_lines, t_lines)):
|
148 |
+
segments[i][source_lang] = src.rstrip()
|
149 |
+
segments[i][target_lang] = trg.rstrip()
|
150 |
+
|
151 |
+
# Load waveforms
|
152 |
+
_id = 0
|
153 |
+
wav_root = Path(root_path) / "data" / split / "wav"
|
154 |
+
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
|
155 |
+
wav_path = wav_root / wav_filename
|
156 |
+
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
|
157 |
+
for i, segment in enumerate(seg_group):
|
158 |
+
offset = int(float(segment["offset"]) * int(_SAMPLE_RATE))
|
159 |
+
duration = int(float(segment["duration"]) * int(_SAMPLE_RATE))
|
160 |
+
waveform, sr = torchaudio.load(wav_path,
|
161 |
+
frame_offset=offset,
|
162 |
+
num_frames=duration)
|
163 |
+
assert duration == waveform.size(1), (duration, waveform.size(1))
|
164 |
+
assert sr == int(_SAMPLE_RATE), (sr, int(_SAMPLE_RATE))
|
165 |
+
|
166 |
+
yield _id, {
|
167 |
+
"file": wav_path.as_posix(),
|
168 |
+
"audio": {
|
169 |
+
"array": waveform.squeeze().numpy(),
|
170 |
+
"path": wav_path.as_posix(),
|
171 |
+
"sampling_rate": sr,
|
172 |
+
},
|
173 |
+
"sentence": segment[source_lang],
|
174 |
+
"translation": segment[target_lang],
|
175 |
+
"client_id": segment["speaker_id"],
|
176 |
+
"id": f"{wav_path.stem}_{i}",
|
177 |
+
}
|
178 |
+
_id += 1
|