Datasets:

Languages:
Khmer
ArXiv:
License:
holylovenia commited on
Commit
ac91d29
·
verified ·
1 Parent(s): 4ed671d

Upload kheng_info.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. kheng_info.py +113 -0
kheng_info.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+ import pandas as pd
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import Licenses, Tasks
12
+
13
+ # no bibtex citation
14
+ _CITATION = ""
15
+
16
+ _DATASETNAME = "kheng_info"
17
+
18
+ _DESCRIPTION = """\
19
+ The Kheng.info Speech dataset was derived from recordings of Khmer words on the Khmer dictionary website kheng.info.
20
+ The recordings were recorded by a native Khmer speaker.
21
+ The recordings are short, generally ranging between 1 to 2 seconds only.
22
+ """
23
+
24
+ _HOMEPAGE = "https://huggingface.co/datasets/seanghay/khmer_kheng_info_speech"
25
+
26
+ _LANGUAGES = ["khm"]
27
+
28
+ _LICENSE = Licenses.UNKNOWN.value
29
+
30
+ _LOCAL = False
31
+
32
+ _URLS = {
33
+ _DATASETNAME: "https://huggingface.co/datasets/seanghay/khmer_kheng_info_speech/resolve/main/data/train-00000-of-00001-4e7ad082a34164d1.parquet",
34
+ }
35
+
36
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
37
+
38
+ _SOURCE_VERSION = "1.0.0"
39
+
40
+ _SEACROWD_VERSION = "2024.06.20"
41
+
42
+
43
+ class KhengInfoDataset(datasets.GeneratorBasedBuilder):
44
+ """This is the Kheng.info Speech dataset, which wasderived from recordings on the Khmer dictionary website kheng.info"""
45
+
46
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
47
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
48
+
49
+ BUILDER_CONFIGS = [
50
+ SEACrowdConfig(
51
+ name=f"{_DATASETNAME}_source",
52
+ version=SOURCE_VERSION,
53
+ description=f"{_DATASETNAME} source schema",
54
+ schema="source",
55
+ subset_id=f"{_DATASETNAME}",
56
+ ),
57
+ SEACrowdConfig(
58
+ name=f"{_DATASETNAME}_seacrowd_sptext",
59
+ version=SEACROWD_VERSION,
60
+ description=f"{_DATASETNAME} SEACrowd schema",
61
+ schema="seacrowd_sptext",
62
+ subset_id=f"{_DATASETNAME}",
63
+ ),
64
+ ]
65
+
66
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
67
+
68
+ def _info(self) -> datasets.DatasetInfo:
69
+ if self.config.schema == "source":
70
+ features = datasets.Features({"word": datasets.Value("string"), "duration_ms": datasets.Value("int64"), "audio": datasets.Audio(sampling_rate=16_000)})
71
+
72
+ elif self.config.schema == "seacrowd_sptext":
73
+ features = schemas.speech_text_features
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
84
+ urls = _URLS[_DATASETNAME]
85
+ data_dir = dl_manager.download_and_extract(urls)
86
+
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={
91
+ "filepath": data_dir,
92
+ },
93
+ )
94
+ ]
95
+
96
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
97
+ df = pd.read_parquet(filepath, engine="pyarrow")
98
+ if self.config.schema == "source":
99
+ for _id, row in df.iterrows():
100
+ yield _id, {"word": row["word"], "duration_ms": row["duration_ms"], "audio": row["audio"]}
101
+ elif self.config.schema == "seacrowd_sptext":
102
+ for _id, row in df.iterrows():
103
+ yield _id, {
104
+ "id": _id,
105
+ "path": row["audio"],
106
+ "audio": row["audio"],
107
+ "text": row["word"],
108
+ "speaker_id": None,
109
+ "metadata": {
110
+ "speaker_age": None,
111
+ "speaker_gender": None,
112
+ },
113
+ }