cfpark00 commited on
Commit
39c31fc
·
verified ·
1 Parent(s): 5ab5022

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +4 -0
  2. tokenizer.json +64 -0
  3. tokenizer_config.json +26 -0
special_tokens_map.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "eos_token": "<|endoftext|>",
3
+ "pad_token": "<pad>"
4
+ }
tokenizer.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 24,
8
+ "content": "<|endoftext|>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 26,
17
+ "content": "<pad>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ }
24
+ ],
25
+ "normalizer": null,
26
+ "pre_tokenizer": {
27
+ "type": "Whitespace"
28
+ },
29
+ "post_processor": null,
30
+ "decoder": null,
31
+ "model": {
32
+ "type": "WordLevel",
33
+ "vocab": {
34
+ "r": 0,
35
+ "u": 1,
36
+ "f": 2,
37
+ "l": 3,
38
+ "d": 4,
39
+ "b": 5,
40
+ "R": 6,
41
+ "RR": 7,
42
+ "RRR": 8,
43
+ "U": 9,
44
+ "UU": 10,
45
+ "UUU": 11,
46
+ "F": 12,
47
+ "FF": 13,
48
+ "FFF": 14,
49
+ "L": 15,
50
+ "LL": 16,
51
+ "LLL": 17,
52
+ "D": 18,
53
+ "DD": 19,
54
+ "DDD": 20,
55
+ "B": 21,
56
+ "BB": 22,
57
+ "BBB": 23,
58
+ "<|endoftext|>": 24,
59
+ "<unk>": 25,
60
+ "<pad>": 26
61
+ },
62
+ "unk_token": "<unk>"
63
+ }
64
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "24": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "26": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "clean_up_tokenization_spaces": false,
21
+ "eos_token": "<|endoftext|>",
22
+ "extra_special_tokens": {},
23
+ "model_max_length": 1000000000000000019884624838656,
24
+ "pad_token": "<pad>",
25
+ "tokenizer_class": "PreTrainedTokenizer"
26
+ }