evanslur commited on
Commit
9f7b1dd
1 Parent(s): b767239

edit files

Browse files
Files changed (1) hide show
  1. su_id_asr_split.py +82 -59
su_id_asr_split.py CHANGED
@@ -6,8 +6,11 @@ import datasets
6
 
7
  from seacrowd.utils import schemas
8
  from seacrowd.utils.configs import SEACrowdConfig
9
- from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
10
- DEFAULT_SOURCE_VIEW_NAME, Tasks)
 
 
 
11
 
12
  _DATASETNAME = "su_id_asr"
13
  _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
@@ -32,6 +35,7 @@ This dataset was collected by Google in Indonesia.
32
  """
33
 
34
  _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
 
35
  _LICENSE = "Attribution-ShareAlike 4.0 International."
36
 
37
  _URLs = {
@@ -41,83 +45,102 @@ _URLs = {
41
  }
42
 
43
  _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
 
44
  _SOURCE_VERSION = "1.0.0"
45
  _SEACROWD_VERSION = "2024.06.20"
46
 
47
 
48
  class SuIdASR(datasets.GeneratorBasedBuilder):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  def _info(self):
50
- features = datasets.Features({
51
- "id": datasets.Value("string"),
52
- "speaker_id": datasets.Value("string"),
53
- "audio": datasets.Audio(sampling_rate=16_000),
54
- "text": datasets.Value("string"),
55
- })
56
- return datasets.DatasetInfo(features=features)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
59
- print("Downloading the dataset...")
60
-
61
- # Download the files
62
- train_file = dl_manager.download(_URLs["su_id_asr_train"])
63
- dev_file = dl_manager.download(_URLs["su_id_asr_dev"])
64
- test_file = dl_manager.download(_URLs["su_id_asr_test"])
65
 
66
  return [
67
  datasets.SplitGenerator(
68
  name=datasets.Split.TRAIN,
69
- gen_kwargs={"filepath": train_file},
70
  ),
71
  datasets.SplitGenerator(
72
  name=datasets.Split.VALIDATION,
73
- gen_kwargs={"filepath": dev_file},
74
  ),
75
  datasets.SplitGenerator(
76
  name=datasets.Split.TEST,
77
- gen_kwargs={"filepath": test_file},
78
  ),
79
  ]
80
 
81
  def _generate_examples(self, filepath: str):
82
- """Yields examples as (key, example) tuples."""
83
- try:
84
- with open(filepath, "r") as f:
85
- tsv_reader = csv.reader(f, delimiter="\t")
86
- header = next(tsv_reader) # Skip header if present
87
- print(f"Header: {header}") # Print the header for debugging
88
-
89
- for line in tsv_reader:
90
- print(f"Line: {line}") # Print each line to check contents
91
- if len(line) < 3: # Ensure there are enough columns
92
- print("Skipping line, not enough columns:", line)
93
- continue
94
-
95
- audio_id, sp_id, text = line[0], line[1], line[2]
96
- wav_path = os.path.join(os.path.dirname(filepath), f"{audio_id}.flac")
97
-
98
- # Check if the audio file exists
99
- if os.path.exists(wav_path):
100
- yield audio_id, {
101
- "id": audio_id,
102
- "speaker_id": sp_id,
103
- "audio": wav_path,
104
- "text": text,
 
 
 
105
  }
106
- else:
107
- print(f"Audio file does not exist: {wav_path}. Skipping this entry.")
108
-
109
- print(f"Completed loading data from {filepath}.")
110
-
111
- except FileNotFoundError:
112
- print(f"Error: The file {filepath} was not found. Please check the path.")
113
- except Exception as e:
114
- print(f"An error occurred: {e}")
115
-
116
- # Load the dataset
117
- try:
118
- dataset = datasets.load_dataset(__name__) # Use the current module name
119
- print("Dataset loaded successfully.")
120
- print(dataset)
121
-
122
- except Exception as e:
123
- print(f"An error occurred while loading the dataset: {e}")
 
6
 
7
  from seacrowd.utils import schemas
8
  from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import (
10
+ DEFAULT_SEACROWD_VIEW_NAME,
11
+ DEFAULT_SOURCE_VIEW_NAME,
12
+ Tasks,
13
+ )
14
 
15
  _DATASETNAME = "su_id_asr"
16
  _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
 
35
  """
36
 
37
  _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
38
+
39
  _LICENSE = "Attribution-ShareAlike 4.0 International."
40
 
41
  _URLs = {
 
45
  }
46
 
47
  _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
48
+
49
  _SOURCE_VERSION = "1.0.0"
50
  _SEACROWD_VERSION = "2024.06.20"
51
 
52
 
53
  class SuIdASR(datasets.GeneratorBasedBuilder):
54
+ """su_id contains ~220K utterances for Sundanese ASR training data."""
55
+
56
+ BUILDER_CONFIGS = [
57
+ SEACrowdConfig(
58
+ name="su_id_asr_source",
59
+ version=datasets.Version(_SOURCE_VERSION),
60
+ description="SU_ID_ASR source schema",
61
+ schema="source",
62
+ subset_id="su_id_asr",
63
+ ),
64
+ SEACrowdConfig(
65
+ name="su_id_asr_seacrowd_sptext",
66
+ version=datasets.Version(_SEACROWD_VERSION),
67
+ description="SU_ID_ASR Nusantara schema",
68
+ schema="seacrowd_sptext",
69
+ subset_id="su_id_asr",
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = "su_id_asr_source"
74
+
75
  def _info(self):
76
+ if self.config.schema == "source":
77
+ features = datasets.Features(
78
+ {
79
+ "id": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("string"),
81
+ "path": datasets.Value("string"),
82
+ "audio": datasets.Audio(sampling_rate=16_000),
83
+ "text": datasets.Value("string"),
84
+ }
85
+ )
86
+ elif self.config.schema == "seacrowd_sptext":
87
+ features = schemas.speech_text_features
88
+
89
+ return datasets.DatasetInfo(
90
+ description=_DESCRIPTION,
91
+ features=features,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
96
+ )
97
 
98
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
99
+ base_path_train = dl_manager.download_and_extract(_URLs["su_id_asr_train"])
100
+ base_path_validation = dl_manager.download_and_extract(_URLs["su_id_asr_dev"])
101
+ base_path_test = dl_manager.download_and_extract(_URLs["su_id_asr_test"])
 
 
 
102
 
103
  return [
104
  datasets.SplitGenerator(
105
  name=datasets.Split.TRAIN,
106
+ gen_kwargs={"filepath": base_path_train, "split": "train"},
107
  ),
108
  datasets.SplitGenerator(
109
  name=datasets.Split.VALIDATION,
110
+ gen_kwargs={"filepath": base_path_validation, "split": "validation"},
111
  ),
112
  datasets.SplitGenerator(
113
  name=datasets.Split.TEST,
114
+ gen_kwargs={"filepath": base_path_test, "split": "test"},
115
  ),
116
  ]
117
 
118
  def _generate_examples(self, filepath: str):
119
+ # Construct the path for the TSV file
120
+ tsv_file = os.path.join(filepath, "utt_spk_text.tsv")
121
+
122
+ # Check if the TSV file exists
123
+ if not os.path.exists(tsv_file):
124
+ raise FileNotFoundError(f"TSV file not found at: {tsv_file}")
125
+
126
+ with open(tsv_file, "r") as file:
127
+ tsv_reader = csv.reader(file, delimiter="\t")
128
+
129
+ for line in tsv_reader:
130
+ audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
131
+ wav_path = os.path.join(filepath, "{}.flac".format(audio_id))
132
+
133
+ if os.path.exists(wav_path):
134
+ ex = {
135
+ "id": audio_id,
136
+ "speaker_id": speaker_id,
137
+ "path": wav_path,
138
+ "audio": wav_path,
139
+ "text": transcription_text,
140
+ }
141
+ if self.config.schema == "seacrowd_sptext":
142
+ ex["metadata"] = {
143
+ "speaker_age": None,
144
+ "speaker_gender": None,
145
  }
146
+ yield audio_id, ex