edit files
Browse files- su_id_asr_split.py +82 -59
su_id_asr_split.py
CHANGED
@@ -6,8 +6,11 @@ import datasets
|
|
6 |
|
7 |
from seacrowd.utils import schemas
|
8 |
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
-
from seacrowd.utils.constants import (
|
10 |
-
|
|
|
|
|
|
|
11 |
|
12 |
_DATASETNAME = "su_id_asr"
|
13 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
@@ -32,6 +35,7 @@ This dataset was collected by Google in Indonesia.
|
|
32 |
"""
|
33 |
|
34 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
|
|
35 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
36 |
|
37 |
_URLs = {
|
@@ -41,83 +45,102 @@ _URLs = {
|
|
41 |
}
|
42 |
|
43 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
|
|
44 |
_SOURCE_VERSION = "1.0.0"
|
45 |
_SEACROWD_VERSION = "2024.06.20"
|
46 |
|
47 |
|
48 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def _info(self):
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
train_file = dl_manager.download(_URLs["su_id_asr_train"])
|
63 |
-
dev_file = dl_manager.download(_URLs["su_id_asr_dev"])
|
64 |
-
test_file = dl_manager.download(_URLs["su_id_asr_test"])
|
65 |
|
66 |
return [
|
67 |
datasets.SplitGenerator(
|
68 |
name=datasets.Split.TRAIN,
|
69 |
-
gen_kwargs={"filepath":
|
70 |
),
|
71 |
datasets.SplitGenerator(
|
72 |
name=datasets.Split.VALIDATION,
|
73 |
-
gen_kwargs={"filepath":
|
74 |
),
|
75 |
datasets.SplitGenerator(
|
76 |
name=datasets.Split.TEST,
|
77 |
-
gen_kwargs={"filepath":
|
78 |
),
|
79 |
]
|
80 |
|
81 |
def _generate_examples(self, filepath: str):
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
105 |
}
|
106 |
-
|
107 |
-
print(f"Audio file does not exist: {wav_path}. Skipping this entry.")
|
108 |
-
|
109 |
-
print(f"Completed loading data from {filepath}.")
|
110 |
-
|
111 |
-
except FileNotFoundError:
|
112 |
-
print(f"Error: The file {filepath} was not found. Please check the path.")
|
113 |
-
except Exception as e:
|
114 |
-
print(f"An error occurred: {e}")
|
115 |
-
|
116 |
-
# Load the dataset
|
117 |
-
try:
|
118 |
-
dataset = datasets.load_dataset(__name__) # Use the current module name
|
119 |
-
print("Dataset loaded successfully.")
|
120 |
-
print(dataset)
|
121 |
-
|
122 |
-
except Exception as e:
|
123 |
-
print(f"An error occurred while loading the dataset: {e}")
|
|
|
6 |
|
7 |
from seacrowd.utils import schemas
|
8 |
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import (
|
10 |
+
DEFAULT_SEACROWD_VIEW_NAME,
|
11 |
+
DEFAULT_SOURCE_VIEW_NAME,
|
12 |
+
Tasks,
|
13 |
+
)
|
14 |
|
15 |
_DATASETNAME = "su_id_asr"
|
16 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
|
|
35 |
"""
|
36 |
|
37 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
38 |
+
|
39 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
40 |
|
41 |
_URLs = {
|
|
|
45 |
}
|
46 |
|
47 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
48 |
+
|
49 |
_SOURCE_VERSION = "1.0.0"
|
50 |
_SEACROWD_VERSION = "2024.06.20"
|
51 |
|
52 |
|
53 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
54 |
+
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
55 |
+
|
56 |
+
BUILDER_CONFIGS = [
|
57 |
+
SEACrowdConfig(
|
58 |
+
name="su_id_asr_source",
|
59 |
+
version=datasets.Version(_SOURCE_VERSION),
|
60 |
+
description="SU_ID_ASR source schema",
|
61 |
+
schema="source",
|
62 |
+
subset_id="su_id_asr",
|
63 |
+
),
|
64 |
+
SEACrowdConfig(
|
65 |
+
name="su_id_asr_seacrowd_sptext",
|
66 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
67 |
+
description="SU_ID_ASR Nusantara schema",
|
68 |
+
schema="seacrowd_sptext",
|
69 |
+
subset_id="su_id_asr",
|
70 |
+
),
|
71 |
+
]
|
72 |
+
|
73 |
+
DEFAULT_CONFIG_NAME = "su_id_asr_source"
|
74 |
+
|
75 |
def _info(self):
|
76 |
+
if self.config.schema == "source":
|
77 |
+
features = datasets.Features(
|
78 |
+
{
|
79 |
+
"id": datasets.Value("string"),
|
80 |
+
"speaker_id": datasets.Value("string"),
|
81 |
+
"path": datasets.Value("string"),
|
82 |
+
"audio": datasets.Audio(sampling_rate=16_000),
|
83 |
+
"text": datasets.Value("string"),
|
84 |
+
}
|
85 |
+
)
|
86 |
+
elif self.config.schema == "seacrowd_sptext":
|
87 |
+
features = schemas.speech_text_features
|
88 |
+
|
89 |
+
return datasets.DatasetInfo(
|
90 |
+
description=_DESCRIPTION,
|
91 |
+
features=features,
|
92 |
+
homepage=_HOMEPAGE,
|
93 |
+
license=_LICENSE,
|
94 |
+
citation=_CITATION,
|
95 |
+
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
|
96 |
+
)
|
97 |
|
98 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
99 |
+
base_path_train = dl_manager.download_and_extract(_URLs["su_id_asr_train"])
|
100 |
+
base_path_validation = dl_manager.download_and_extract(_URLs["su_id_asr_dev"])
|
101 |
+
base_path_test = dl_manager.download_and_extract(_URLs["su_id_asr_test"])
|
|
|
|
|
|
|
102 |
|
103 |
return [
|
104 |
datasets.SplitGenerator(
|
105 |
name=datasets.Split.TRAIN,
|
106 |
+
gen_kwargs={"filepath": base_path_train, "split": "train"},
|
107 |
),
|
108 |
datasets.SplitGenerator(
|
109 |
name=datasets.Split.VALIDATION,
|
110 |
+
gen_kwargs={"filepath": base_path_validation, "split": "validation"},
|
111 |
),
|
112 |
datasets.SplitGenerator(
|
113 |
name=datasets.Split.TEST,
|
114 |
+
gen_kwargs={"filepath": base_path_test, "split": "test"},
|
115 |
),
|
116 |
]
|
117 |
|
118 |
def _generate_examples(self, filepath: str):
|
119 |
+
# Construct the path for the TSV file
|
120 |
+
tsv_file = os.path.join(filepath, "utt_spk_text.tsv")
|
121 |
+
|
122 |
+
# Check if the TSV file exists
|
123 |
+
if not os.path.exists(tsv_file):
|
124 |
+
raise FileNotFoundError(f"TSV file not found at: {tsv_file}")
|
125 |
+
|
126 |
+
with open(tsv_file, "r") as file:
|
127 |
+
tsv_reader = csv.reader(file, delimiter="\t")
|
128 |
+
|
129 |
+
for line in tsv_reader:
|
130 |
+
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
131 |
+
wav_path = os.path.join(filepath, "{}.flac".format(audio_id))
|
132 |
+
|
133 |
+
if os.path.exists(wav_path):
|
134 |
+
ex = {
|
135 |
+
"id": audio_id,
|
136 |
+
"speaker_id": speaker_id,
|
137 |
+
"path": wav_path,
|
138 |
+
"audio": wav_path,
|
139 |
+
"text": transcription_text,
|
140 |
+
}
|
141 |
+
if self.config.schema == "seacrowd_sptext":
|
142 |
+
ex["metadata"] = {
|
143 |
+
"speaker_age": None,
|
144 |
+
"speaker_gender": None,
|
145 |
}
|
146 |
+
yield audio_id, ex
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|