codingninja commited on
Commit
063d65a
1 Parent(s): a55939a

Upload tokenizer

Browse files
Files changed (4) hide show
  1. added_tokens.json +14 -0
  2. special_tokens_map.json +30 -0
  3. tokenizer_config.json +128 -0
  4. vocab.json +69 -0
added_tokens.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": 69,
3
+ "1": 70,
4
+ "2": 71,
5
+ "3": 72,
6
+ "4": 73,
7
+ "5": 74,
8
+ "6": 75,
9
+ "7": 76,
10
+ "8": 77,
11
+ "9": 78,
12
+ "</s>": 68,
13
+ "<s>": 67
14
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "66": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "67": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "68": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "69": {
36
+ "content": "0",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": false
42
+ },
43
+ "70": {
44
+ "content": "1",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "71": {
52
+ "content": "2",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "72": {
60
+ "content": "3",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "73": {
68
+ "content": "4",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "74": {
76
+ "content": "5",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ },
83
+ "75": {
84
+ "content": "6",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": false
90
+ },
91
+ "76": {
92
+ "content": "7",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": false
98
+ },
99
+ "77": {
100
+ "content": "8",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": false
106
+ },
107
+ "78": {
108
+ "content": "9",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": false
114
+ }
115
+ },
116
+ "bos_token": "<s>",
117
+ "clean_up_tokenization_spaces": true,
118
+ "do_lower_case": false,
119
+ "eos_token": "</s>",
120
+ "model_max_length": 1000000000000000019884624838656,
121
+ "pad_token": "[PAD]",
122
+ "processor_class": "Wav2Vec2BertProcessor",
123
+ "replace_word_delimiter_char": " ",
124
+ "target_lang": null,
125
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
126
+ "unk_token": "[UNK]",
127
+ "word_delimiter_token": "|"
128
+ }
vocab.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 66,
3
+ "[UNK]": 65,
4
+ "|": 0,
5
+ "ਂ": 1,
6
+ "ਅ": 2,
7
+ "ਆ": 3,
8
+ "ਇ": 4,
9
+ "ਈ": 5,
10
+ "ਉ": 6,
11
+ "ਊ": 7,
12
+ "ਏ": 8,
13
+ "ਐ": 9,
14
+ "ਓ": 10,
15
+ "ਔ": 11,
16
+ "ਕ": 12,
17
+ "ਖ": 13,
18
+ "ਗ": 14,
19
+ "ਘ": 15,
20
+ "ਙ": 16,
21
+ "ਚ": 17,
22
+ "ਛ": 18,
23
+ "ਜ": 19,
24
+ "ਝ": 20,
25
+ "ਞ": 21,
26
+ "ਟ": 22,
27
+ "ਠ": 23,
28
+ "ਡ": 24,
29
+ "ਢ": 25,
30
+ "ਣ": 26,
31
+ "ਤ": 27,
32
+ "ਥ": 28,
33
+ "ਦ": 29,
34
+ "ਧ": 30,
35
+ "ਨ": 31,
36
+ "ਪ": 32,
37
+ "ਫ": 33,
38
+ "ਬ": 34,
39
+ "ਭ": 35,
40
+ "ਮ": 36,
41
+ "ਯ": 37,
42
+ "ਰ": 38,
43
+ "ਲ": 39,
44
+ "ਲ਼": 40,
45
+ "ਵ": 41,
46
+ "ਸ਼": 42,
47
+ "ਸ": 43,
48
+ "ਹ": 44,
49
+ "਼": 45,
50
+ "ਾ": 46,
51
+ "ਿ": 47,
52
+ "ੀ": 48,
53
+ "ੁ": 49,
54
+ "ੂ": 50,
55
+ "ੇ": 51,
56
+ "ੈ": 52,
57
+ "ੋ": 53,
58
+ "ੌ": 54,
59
+ "੍": 55,
60
+ "ਖ਼": 56,
61
+ "ਗ਼": 57,
62
+ "ਜ਼": 58,
63
+ "ੜ": 59,
64
+ "ਫ਼": 60,
65
+ "ੰ": 61,
66
+ "ੱ": 62,
67
+ "ੲ": 63,
68
+ "ੳ": 64
69
+ }