albertvillanova HF staff commited on
Commit
4ca4d06
1 Parent(s): 5ef0b35

Add dataset loading script

Browse files
Files changed (1) hide show
  1. tv3_parla.py +109 -0
tv3_parla.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TV3Parla."""
16
+
17
+ import re
18
+
19
+ import datasets
20
+ from datasets.tasks import AutomaticSpeechRecognition
21
+
22
+
23
+ _CITATION = """\
24
+ @inproceedings{kulebi18_iberspeech,
25
+ author={Baybars Külebi and Alp Öktem},
26
+ title={{Building an Open Source Automatic Speech Recognition System for Catalan}},
27
+ year=2018,
28
+ booktitle={Proc. IberSPEECH 2018},
29
+ pages={25--29},
30
+ doi={10.21437/IberSPEECH.2018-6}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ This corpus includes 240 hours of Catalan speech from broadcast material.
36
+ The details of segmentation, data processing and also model training are explained in Külebi, Öktem; 2018.
37
+ The content is owned by Corporació Catalana de Mitjans Audiovisuals, SA (CCMA);
38
+ we processed their material and hereby making it available under their terms of use.
39
+
40
+ This project was supported by the Softcatalà Association.
41
+ """
42
+
43
+ _HOMEPAGE = "https://collectivat.cat/asr#tv3parla"
44
+
45
+ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International"
46
+
47
+ _REPO = "https://huggingface.co/datasets/albertvillanova/tv3_parla/resolve/main/"
48
+ _URLS = {
49
+ "transcripts": _REPO + "tv3_0.3_{split}.transcription",
50
+ "audio": _REPO + "tv3_0.3.tar.gz",
51
+ }
52
+ _SPLITS = [datasets.Split.TRAIN, datasets.Split.TEST]
53
+
54
+ _PATTERN = re.compile(r"^<s> (?P<text>.+) </s> \((?P<id>\S+)\)$")
55
+
56
+
57
+ class Tv3Parla(datasets.GeneratorBasedBuilder):
58
+ """TV3Parla."""
59
+
60
+ VERSION = datasets.Version("0.3.0")
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {
67
+ "path": datasets.Value("string"),
68
+ "audio": datasets.features.Audio(),
69
+ "text": datasets.Value("string"),
70
+ }
71
+ ),
72
+ supervised_keys=None,
73
+ homepage=_HOMEPAGE,
74
+ license=_LICENSE,
75
+ citation=_CITATION,
76
+ task_templates=[
77
+ AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="text")
78
+ ],
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ urls = {
83
+ split: {key: url.format(split=split) for key, url in _URLS.items()} for split in _SPLITS
84
+ }
85
+ dl_dir = dl_manager.download(urls)
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=split,
89
+ gen_kwargs={
90
+ "transcripts_path": dl_dir[split]["transcripts"],
91
+ "audio_files": dl_manager.iter_archive(dl_dir[split]["audio"]),
92
+ "split": split,
93
+ },
94
+ ) for split in _SPLITS
95
+ ]
96
+
97
+ def _generate_examples(self, transcripts_path, audio_files, split):
98
+ transcripts = {}
99
+ with open(transcripts_path, encoding="utf-8") as transcripts_file:
100
+ for line in transcripts_file:
101
+ match = _PATTERN.match(line)
102
+ transcripts[match["id"]] = match["text"]
103
+ # train: 159242; test: 2220
104
+ for key, (path, file) in enumerate(audio_files):
105
+ if path.endswith(".wav") and f"/{split}/" in path:
106
+ uid = path.split("/")[-1][:-4]
107
+ text = transcripts.pop(uid)
108
+ audio = {"path": path, "bytes": file.read()}
109
+ yield key, {"path": path, "audio": audio, "text": text}