import csv import os from typing import Dict, List import datasets from seacrowd.utils import schemas from seacrowd.utils.configs import SEACrowdConfig from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks) _DATASETNAME = "su_id_asr" _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME _LANGUAGES = ["sun"] _LOCAL = False _CITATION = """\ @inproceedings{sodimana18_sltu, author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha}, title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}}, year=2018, booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)}, pages={66--70}, doi={10.21437/SLTU.2018-14} } """ _DESCRIPTION = """\ Sundanese ASR training data set containing ~220K utterances. This dataset was collected by Google in Indonesia. """ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr" _LICENSE = "Attribution-ShareAlike 4.0 International." _URLs = { "su_id_asr_train": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ESbYerhrepxPsggILmK8hZwB9ywXeZzLX7fF885Yo9F7JA", "su_id_asr_dev": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=EdmZ2KYglRBJrKacGRklGD4BEcZXqY6txIrEhj2csx3I3g", "su_id_asr_test": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ET_Yu0vwbk9Mu-2vg68mSnkBJ-CnY1DOBjm8GVjGLKFZxQ", } _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] _SOURCE_VERSION = "1.0.0" _SEACROWD_VERSION = "2024.06.20" class SuIdASR(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features({ "id": datasets.Value("string"), "speaker_id": datasets.Value("string"), "audio": datasets.Audio(sampling_rate=16_000), "text": datasets.Value("string"), }) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: print("Downloading the dataset...") # Download the files train_file = dl_manager.download(_URLs["su_id_asr_train"]) dev_file = dl_manager.download(_URLs["su_id_asr_dev"]) test_file = dl_manager.download(_URLs["su_id_asr_test"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev_file}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": test_file}, ), ] def _generate_examples(self, filepath: str): """Yields examples as (key, example) tuples.""" try: with open(filepath, "r") as f: tsv_reader = csv.reader(f, delimiter="\t") header = next(tsv_reader) # Skip header if present print(f"Header: {header}") # Print the header for debugging for line in tsv_reader: print(f"Line: {line}") # Print each line to check contents if len(line) < 3: # Ensure there are enough columns print("Skipping line, not enough columns:", line) continue audio_id, sp_id, text = line[0], line[1], line[2] wav_path = os.path.join(os.path.dirname(filepath), f"{audio_id}.flac") # Check if the audio file exists if os.path.exists(wav_path): yield audio_id, { "id": audio_id, "speaker_id": sp_id, "audio": wav_path, "text": text, } else: print(f"Audio file does not exist: {wav_path}. Skipping this entry.") print(f"Completed loading data from {filepath}.") except FileNotFoundError: print(f"Error: The file {filepath} was not found. Please check the path.") except Exception as e: print(f"An error occurred: {e}") # Load the dataset try: dataset = datasets.load_dataset(__name__) # Use the current module name print("Dataset loaded successfully.") print(dataset) except Exception as e: print(f"An error occurred while loading the dataset: {e}")