Datasets:

Modalities:
Audio
Text
Libraries:
Datasets
License:
taiqihe commited on
Commit
cbccc97
·
verified ·
1 Parent(s): 217a980

initialize data

Browse files
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.json filter=lfs diff=lfs merge=lfs -text
56
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
NINJAL-Ainu-Folklore.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """NINJAL Ainu folklore corpus"""
16
+
17
+ import os
18
+ import json
19
+
20
+ import datasets
21
+
22
+
23
+ _DESCRIPTION = ""
24
+ _CITATION = ""
25
+ _HOMEPAGE_URL = ""
26
+
27
+ _BASE_PATH = "data/"
28
+ _DATA_URL = _BASE_PATH + "audio/{split}.tar.gz"
29
+ _META_URL = _BASE_PATH + "{split}.json"
30
+
31
+
32
+ class AinuFolkloreConfig(datasets.BuilderConfig):
33
+ def __init__(self, name, **kwargs):
34
+ super().__init__(name=name, version=datasets.Version("0.0.0", ""), **kwargs)
35
+
36
+
37
+ class AinuFolklore(datasets.GeneratorBasedBuilder):
38
+ BUILDER_CONFIGS = [AinuFolkloreConfig("all")]
39
+
40
+ def _info(self):
41
+ task_templates = None
42
+ features = datasets.Features(
43
+ {
44
+ "id": datasets.Value("string"),
45
+ "audio": datasets.features.Audio(sampling_rate=16_000),
46
+ "transcription": datasets.Value("string"),
47
+ "speaker": datasets.Value("string"),
48
+ "surface": datasets.Value("string"),
49
+ "underlying": datasets.Value("string"),
50
+ "gloss": datasets.Value("string"),
51
+ "translation": datasets.Value("string"),
52
+ }
53
+ )
54
+
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=features,
58
+ supervised_keys=("audio", "transcription"),
59
+ homepage=_HOMEPAGE_URL,
60
+ citation=_CITATION,
61
+ task_templates=task_templates,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ splits = ["train", "dev", "test"]
66
+
67
+ data_urls = {split: [_DATA_URL.format(split=split)] for split in splits}
68
+ meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
69
+
70
+ archive_paths = dl_manager.download(data_urls)
71
+ local_extracted_archives = (
72
+ dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
73
+ )
74
+ archive_iters = {
75
+ split: [dl_manager.iter_archive(path) for path in paths]
76
+ for split, paths in archive_paths.items()
77
+ }
78
+
79
+ meta_paths = dl_manager.download(meta_urls)
80
+
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ gen_kwargs={
85
+ "local_extracted_archives": local_extracted_archives.get(
86
+ "train", [None] * len(meta_paths.get("train"))
87
+ ),
88
+ "archive_iters": archive_iters.get("train"),
89
+ "text_paths": meta_paths.get("train"),
90
+ },
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={
95
+ "local_extracted_archives": local_extracted_archives.get(
96
+ "dev", [None] * len(meta_paths.get("dev"))
97
+ ),
98
+ "archive_iters": archive_iters.get("dev"),
99
+ "text_paths": meta_paths.get("dev"),
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={
105
+ "local_extracted_archives": local_extracted_archives.get(
106
+ "test", [None] * len(meta_paths.get("test"))
107
+ ),
108
+ "archive_iters": archive_iters.get("test"),
109
+ "text_paths": meta_paths.get("test"),
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
115
+ assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
116
+ key = 0
117
+
118
+ for archive, text_path, local_extracted_path in zip(
119
+ archive_iters, text_paths, local_extracted_archives
120
+ ):
121
+ with open(text_path, encoding="utf-8") as fin:
122
+ data = json.load(fin)
123
+
124
+ for audio_path, audio_file in archive:
125
+ audio_filename = audio_path.split("/")[-1]
126
+ if audio_filename not in data:
127
+ continue
128
+
129
+ result = data[audio_filename]
130
+ extracted_audio_path = (
131
+ os.path.join(local_extracted_path, audio_filename)
132
+ if local_extracted_path is not None
133
+ else None
134
+ )
135
+ result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
136
+ yield key, result
137
+ key += 1
README.md CHANGED
@@ -18,6 +18,8 @@ license: cc-by-sa-4.0
18
 
19
  ### Limitations
20
 
 
 
21
 
22
  ### Original Source
23
 
 
18
 
19
  ### Limitations
20
 
21
+ ### License
22
+
23
 
24
  ### Original Source
25
 
data/audio/dev.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a7a6593e92848ceb8cb1cf929772ef7e0ebf22a6fc7777c09ba481a795714a8
3
+ size 37029441
data/audio/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b438787d9c34798d63fb6f3b0f54d73cf53163f8b37b3fbcc02fe34338ba4a
3
+ size 38398388
data/audio/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2798fdee6cc7ea1f7c7eff5c1cbb31e0da8b8e32df4c6db2a0550650a580d820
3
+ size 666473167
data/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377ef480d07d62bb5990ae2325ff128cf7b65affb3e9f6204b3cb8208c5b7b2e
3
+ size 164361
data/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaa06bb3304c8442f572a339b63bd6ab3aa2b41bcdf406b498bd28918e87e6bf
3
+ size 163427
data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7521df1c0f863889f633bb09b3f04c5108381e5d091fdee9629a3f5827ee4c45
3
+ size 2938624