youtube-persian-asr / youtube-persian-asr.py
PerSets's picture
fix: loading script base_url fixed
7890281
import os
import tarfile
import datasets
import pandas as pd
from typing import Dict, List
import io
from tqdm import tqdm
import csv
import os
_DESCRIPTION = """
This dataset consists of over 385 hours of audio extracted from various YouTube videos in the Persian language.
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
1. Perform their own quality assessment
2. Create their own train/validation/test splits based on their specific needs
3. Validate a subset of the data if needed for their use case
"""
_CITATION = """
Use this repo info/link for citation.
"""
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
_HOMEPAGE = "https://huggingface.co/datasets/PerSets/youtube-persian-asr"
_BASE_URL = "https://huggingface.co/datasets/PerSets/youtube-persian-asr/resolve/main/"
_AUDIO_URL = _BASE_URL + "data/unvalidated_{shard_idx}.tar"
class FarsiYoutubeDataset(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({
"audio": datasets.Audio(sampling_rate=44_000), # Adjust sampling rate as needed
"text": datasets.Value("string"),
"file_name": datasets.Value("string"),
}),
supervised_keys=None,
license=_LICENSE,
citation=_CITATION,
version=self.VERSION,
description=_DESCRIPTION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 22)]
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(
name="unvalidated", # Or adjust splits as needed
gen_kwargs={
#"tar_dir": tar_dir,
#"metadata_path": metadata_path,
"local_extracted_archive_paths": local_extracted_archive_paths,
"archives": [dl_manager.iter_archive(path) for path in archive_paths],
"meta_path": _BASE_URL + "unvalidated.csv",
},
),
]
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
"""Yields examples."""
# Load TSV metadata
data_fields = list(self._info().features.keys())
metadata = {}
with open(meta_path, encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
for row in tqdm(reader, desc="Reading metadata..."):
if not row["file_name"].endswith(".mp3"):
row["file_name"] += ".mp3"
if "sentence" in row:
row['text'] = row['sentence']
del row['sentence']
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["file_name"]] = row
for i, audio_archive in enumerate(archives):
for path, file in audio_archive:
_, filename = os.path.split(path)
if filename in metadata:
result = dict(metadata[filename])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
result["audio"] = {"path": path, "bytes": file.read()}
result["file_name"] = path
yield path, result