"""TODO(squad_v1_pt): Add a description here."""


import json
import csv
import datasets
from datasets.tasks import QuestionAnsweringExtractive

# TODO(squad_v1_pt): BibTeX citation
_CITATION = """\
@article{2016arXiv160605250R,
       author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
                 Konstantin and {Liang}, Percy},
        title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
      journal = {arXiv e-prints},
         year = 2016,
          eid = {arXiv:1606.05250},
        pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
       eprint = {1606.05250},
}
"""

# TODO(squad_v1_pt):
_DESCRIPTION = """\
NEPALI ASR
"""

_URL = "https://huggingface.co/datasets/SumitMdhr/NEPALI-ASR/resolve/main/"
_URLS = {
    "audio": _URL + "audio.tar.gz",
    "transcription": _URL + "c_1_trans.csv",
}


class NepaliAsr(datasets.GeneratorBasedBuilder):
    def _info(self):
        # TODO(squad_v1_pt): Specifies the datasets.DatasetInfo object
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                    "audio": datasets.Audio(),
                    "label": datasets.Value("string"),
                    # These are the features of your dataset like images, labels ...
                }
            ),
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset.
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage="https://www.openslr.org/54/",
            citation=_CITATION,
            task_templates=[
                datasets.tasks.AutomaticSpeechRecognition(
                    audio_column="audio", transcription_column="label"
                )
            ],
        )

    def _split_generators(self, dl_manager):
        path = dl_manager.download(_URLS["audio"])
        audio_iters = dl_manager.iter_archive(path)
        index_file = dl_manager.download(_URLS["transcription"])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"transcriptions": index_file, "audios": audio_iters},
            ),
        ]

    def _generate_examples(self, transcriptions, audios):
        idx = 0
        transcript = []
        with open(transcriptions, encoding="utf-8") as f:
            reader = csv.DictReader(f)
            for key, row in enumerate(reader):
                transcript.append(row["trans"])
        for filepath, audio in audios:
            yield idx, {
                "audio": filepath,
                "label": transcript[idx],
            }
            idx += 1