flexthink commited on
Commit
fead939
1 Parent(s): 036dc6a

Initial import

Browse files
.gitattributes CHANGED
@@ -35,3 +35,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ dataset/sentence_valid.json filter=lfs diff=lfs merge=lfs -text
39
+ dataset/lexicon_test.json filter=lfs diff=lfs merge=lfs -text
40
+ dataset/lexicon_train.json filter=lfs diff=lfs merge=lfs -text
41
+ dataset/lexicon_valid.json filter=lfs diff=lfs merge=lfs -text
42
+ dataset/sentence_test.json filter=lfs diff=lfs merge=lfs -text
43
+ dataset/sentence_train.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,8 @@
1
- ---
2
- license: other
3
- ---
 
 
 
 
 
 
1
+ # librig2p-nostress - Grapheme-To-Phoneme Dataset
2
+
3
+ This dataset contains samples that can be used to train a Grapheme-to-Phoneme system **without** stress information.
4
+
5
+ The dataset is derived from the following pre-existing datasets:
6
+
7
+ * [LibriSpeech ASR Corpus](https://www.openslr.org/12)
8
+ * [LibriSpeech Alignments](https://github.com/CorentinJ/librispeech-alignments)
dataset/lexicon_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:134d35619767566a61972c9c467f751553c1424471416d5ccd02be2ab7728070
3
+ size 361373
dataset/lexicon_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02bdbd4b798eeaeeee7d5f2a39d67167cb28a81d04b95b9f87f99fabb880ea01
3
+ size 35208912
dataset/lexicon_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cafad66ba83634303c0e139db060745043b0e1c54b4f9c4cb9fec34d3e65261a
3
+ size 358546
dataset/sentence_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c5d98a3fc0561c796b39dfa3b93d7c007c1fb5a17531536f3706936936edaf
3
+ size 2765822
dataset/sentence_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c08926309b25b69784bb6890d24e5f15f3e4e19d2bb99ea0f7b2e5f9d9a9de1
3
+ size 408305
dataset/sentence_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7471a443af29fc90350debaa3d90d03cd0e4cefe9807a4b9312577c79613776d
3
+ size 2833542
librig2p-nostress.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Artem Ploujnikov
3
+
4
+
5
+ # Lint as: python3
6
+ import json
7
+
8
+ import datasets
9
+
10
+ _DESCRIPTION = """\
11
+ Grapheme-to-Phoneme training, validation and test sets
12
+ """
13
+
14
+ _BASE_URL = "https://raw.githubusercontent.com/flexthink/librig2p-nostress/develop/dataset"
15
+
16
+ _HOMEPAGE_URL = "https://github.com/flexthink/librig2p-nostress/tree/develop"
17
+
18
+ _PHONEMES = [
19
+ "AA",
20
+ "AE",
21
+ "AH",
22
+ "AO",
23
+ "AW",
24
+ "AY",
25
+ "B",
26
+ "CH",
27
+ "D",
28
+ "DH",
29
+ "EH",
30
+ "ER",
31
+ "EY",
32
+ "F",
33
+ "G",
34
+ "HH",
35
+ "IH",
36
+ "IY",
37
+ "JH",
38
+ "K",
39
+ "L",
40
+ "M",
41
+ "N",
42
+ "NG",
43
+ "OW",
44
+ "OY",
45
+ "P",
46
+ "R",
47
+ "S",
48
+ "SH",
49
+ "T",
50
+ "TH",
51
+ "UH",
52
+ "UW",
53
+ "V",
54
+ "W",
55
+ "Y",
56
+ "Z",
57
+ "ZH",
58
+ ]
59
+ _ORIGINS = ["librispeech", "librispeech-lex"]
60
+ _NA = "N/A"
61
+ _SPLIT_TYPES = ["train", "valid", "test"]
62
+ _DATA_TYPES = ["lexicon", "sentence"]
63
+ _SPLITS = [
64
+ f"{data_type}_{split_type}"
65
+ for data_type in _DATA_TYPES
66
+ for split_type in _SPLIT_TYPES]
67
+
68
+ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
69
+ def __init__(self, base_url=None, splits=None, *args, **kwargs):
70
+ super().__init__(*args, **kwargs)
71
+ self.base_url = base_url or _BASE_URL
72
+ self.splits = splits or _SPLITS
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=datasets.Features(
78
+ {
79
+ "id": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("string"),
81
+ "origin": datasets.ClassLabel(names=_ORIGINS),
82
+ "char": datasets.Value("string"),
83
+ "phn": datasets.Sequence(datasets.Value("string")),
84
+ },
85
+ ),
86
+ supervised_keys=None,
87
+ homepage=_HOMEPAGE_URL,
88
+ )
89
+
90
+ def _get_url(self, split):
91
+ return f'{self.base_url}/{split}.json'
92
+
93
+ def _split_generator(self, dl_manager, split):
94
+ url = self._get_url(split)
95
+ path = dl_manager.download_and_extract(url)
96
+ return datasets.SplitGenerator(
97
+ name=split,
98
+ gen_kwargs={"datapath": path, "datatype": split},
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ return [
103
+ self._split_generator(dl_manager, split)
104
+ for split in self.splits
105
+ ]
106
+
107
+ def _generate_examples(self, datapath, datatype):
108
+ with open(datapath, encoding="utf-8") as f:
109
+ data = json.load(f)
110
+
111
+ for sentence_counter, (item_id, item) in enumerate(data.items()):
112
+ resp = {
113
+ "id": item_id,
114
+ "speaker_id": str(item.get("speaker_id") or _NA),
115
+ "origin": item["origin"],
116
+ "char": item["char"],
117
+ "phn": item["phn"],
118
+ }
119
+ yield sentence_counter, resp