Sh1man commited on
Commit
59a259a
·
1 Parent(s): b3ad1f5

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -0
  2. README.md +60 -3
  3. n_shards.json +6 -0
  4. silero_open_stt.py +183 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ metadata/tts_russian_addresses_rhvoice_4voices/train.tsv filter=lfs diff=lfs merge=lfs -text
61
+ metadata/tts_russian_addresses_rhvoice_4voices/validate.tsv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,60 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - ru
4
+
5
+ license: cc-by-nc-4.0
6
+ task_categories:
7
+ - automatic-speech-recognition
8
+ size_categories:
9
+ - n>1M
10
+ tags:
11
+ - audio
12
+ - speech
13
+ - Russian
14
+ - ASR
15
+ - voice
16
+
17
+ dataset_info:
18
+ features:
19
+ - name: id
20
+ dtype: string
21
+ - name: path
22
+ dtype: string
23
+ - name: text
24
+ dtype: string
25
+ - name: duration
26
+ dtype: float32
27
+ - name: audio
28
+ dtype: audio
29
+ config_name: tts_russian_addresses_rhvoice_4voices
30
+ splits:
31
+ - name: train
32
+ - name: validate
33
+ pretty_name: silero_open_stt
34
+ ---
35
+
36
+
37
+ # silero_open_stt Dataset
38
+
39
+ ## Dataset Description
40
+
41
+ open_stt is a Russian dataset for speech research.
42
+
43
+ ## Dataset Structure
44
+
45
+ This dataset is organized in the Common Voice format:
46
+
47
+ - `/audio/{subset}/{split}/` - Contains TAR files with audio files
48
+ - `/metadata/{subset}/` - Contains TSV files with transcriptions
49
+ - `/n_shards.json` - Contains information about the number of shards for each subset and split
50
+
51
+ This dataset is organized as follows:
52
+
53
+
54
+ ### tts_russian_addresses_rhvoice_4voices subset
55
+
56
+ | Split | Samples |
57
+ |-------|--------|
58
+ | train | 1045102 |
59
+ | validate | 696736 |
60
+ | **Total** | **1741838** |
n_shards.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "tts_russian_addresses_rhvoice_4voices": {
3
+ "train": 12,
4
+ "validate": 8
5
+ }
6
+ }
silero_open_stt.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """silero_open_stt Dataset"""
16
+
17
+ import csv
18
+ import os
19
+ import json
20
+
21
+ import datasets
22
+ from tqdm import tqdm
23
+
24
+
25
+ _DESCRIPTION = """open_stt is a Russian dataset for speech research."""
26
+
27
+ _CITATION = """None"""
28
+
29
+ _HOMEPAGE = "https://github.com/snakers4/open_stt"
30
+ _LICENSE = "cc-by-nc-4.0"
31
+
32
+ _BASE_URL = "https://huggingface.co/datasets/Sh1man/silero_open_stt_opus/resolve/main/"
33
+ _AUDIO_URL = _BASE_URL + "data/{subset}/{split}/{subset}_{split}_{shard_idx}.tar"
34
+ _METADATA_URL = _BASE_URL + "metadata/{subset}/{split}.tsv"
35
+ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
36
+
37
+ # Информация о поднаборах
38
+ SUBSETS = {
39
+ "tts_russian_addresses_rhvoice_4voices": {
40
+ "description": "Поднабор tts_russian_addresses_rhvoice_4voices датасета silero_open_stt",
41
+ },
42
+ }
43
+
44
+
45
+ class silero_open_sttConfig(datasets.BuilderConfig):
46
+ """BuilderConfig для silero_open_stt."""
47
+
48
+ def __init__(self, name, subset, description, **kwargs):
49
+ """BuilderConfig для silero_open_stt.
50
+
51
+ Args:
52
+ name: Название набора данных
53
+ subset: Поднабор данных
54
+ description: Описание поднабора
55
+ **kwargs: Дополнительные аргументы для суперкласса
56
+ """
57
+ self.subset = subset
58
+ super(silero_open_sttConfig, self).__init__(
59
+ name=name,
60
+ version=datasets.Version("1.0.0"),
61
+ description=description,
62
+ **kwargs,
63
+ )
64
+
65
+
66
+ class silero_open_stt(datasets.GeneratorBasedBuilder):
67
+ """Аудио-датасет silero_open_stt."""
68
+
69
+ VERSION = datasets.Version("1.0.0")
70
+ DEFAULT_WRITER_BATCH_SIZE = 1000
71
+
72
+ BUILDER_CONFIGS = [
73
+ silero_open_sttConfig(
74
+ name=subset,
75
+ subset=subset,
76
+ description=f"silero_open_stt - {info['description']}",
77
+ )
78
+ for subset, info in SUBSETS.items()
79
+ ]
80
+
81
+ def _info(self):
82
+ features = datasets.Features(
83
+ {
84
+ "id": datasets.Value("string"),
85
+ "path": datasets.Value("string"),
86
+ "text": datasets.Value("string"),
87
+ "duration": datasets.Value("float32"),
88
+ "audio": datasets.Audio(sampling_rate=16000),
89
+ }
90
+ )
91
+
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=features,
95
+ supervised_keys=None,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ citation=_CITATION
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ """Returns SplitGenerators."""
103
+ subset = self.config.subset
104
+
105
+ # Загружаем информацию о количестве шардов
106
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
107
+ with open(n_shards_path, encoding="utf-8") as f:
108
+ n_shards = json.load(f)
109
+
110
+ # Проверяем наличие данных для выбранного поднабора
111
+ if subset not in n_shards:
112
+ raise ValueError(f"Subset {subset} not found in n_shards.json")
113
+
114
+ # Определяем доступные сплиты для этого поднабора
115
+ splits = list(n_shards[subset].keys())
116
+ if not splits:
117
+ raise ValueError(f"No splits found for subset {subset}")
118
+
119
+ # Создаем URLs для аудио файлов
120
+ audio_urls = {}
121
+ for split in splits:
122
+ if n_shards[subset][split] > 0:
123
+ audio_urls[split] = [
124
+ _AUDIO_URL.format(subset=subset, split=split, shard_idx=i)
125
+ for i in range(n_shards[subset][split])
126
+ ]
127
+
128
+ # Скачиваем архивы
129
+ archive_paths = dl_manager.download(audio_urls)
130
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
131
+
132
+ # Скачиваем метаданные
133
+ meta_urls = {split: _METADATA_URL.format(subset=subset, split=split) for split in splits}
134
+ meta_paths = dl_manager.download_and_extract(meta_urls)
135
+
136
+ # Определяем генераторы сплитов
137
+ split_generators = []
138
+ split_names = {
139
+ "train": datasets.Split.TRAIN,
140
+ "validate": datasets.Split.VALIDATION,
141
+ "test": datasets.Split.TEST,
142
+ }
143
+
144
+ for split in splits:
145
+ split_generators.append(
146
+ datasets.SplitGenerator(
147
+ name=split_names.get(split, split),
148
+ gen_kwargs={
149
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
150
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths[split]],
151
+ "meta_path": meta_paths[split],
152
+ },
153
+ ),
154
+ )
155
+
156
+ return split_generators
157
+
158
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
159
+ """Yields examples."""
160
+ data_fields = list(self._info().features.keys())
161
+ metadata = {}
162
+
163
+ with open(meta_path, encoding="utf-8") as f:
164
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
165
+ for row in tqdm(reader, desc="Reading metadata..."):
166
+ # Проверяем наличие всех полей
167
+ for field in data_fields:
168
+ if field not in row and field != "audio":
169
+ row[field] = ""
170
+
171
+ metadata[row["path"]] = row
172
+
173
+ for i, audio_archive in enumerate(archives):
174
+ for path, file in audio_archive:
175
+ _, filename = os.path.split(path)
176
+ if filename in metadata:
177
+ result = dict(metadata[filename])
178
+ # set the audio feature and the path to the extracted file
179
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
180
+ result["audio"] = {"path": path, "bytes": file.read()}
181
+ result["path"] = path
182
+ yield path, result
183
+