vikas117 commited on
Commit
d547b1c
·
verified ·
1 Parent(s): b8fefac

Upload tokenizer

Browse files
Files changed (2) hide show
  1. tokenizer.json +0 -0
  2. tokenizer_config.json +56 -50
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,59 +1,65 @@
1
  {
2
  "add_prefix_space": false,
3
- "added_tokens_decoder": {
4
- "0": {
5
- "content": "<s>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
- "1": {
13
- "content": "<pad>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false,
18
- "special": true
19
- },
20
- "3": {
21
- "content": "<unk>",
22
- "lstrip": false,
23
- "normalized": true,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "4": {
29
- "content": "<mask>",
30
- "lstrip": true,
31
- "normalized": true,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "25678": {
37
- "content": "</s>",
38
- "lstrip": false,
39
- "normalized": true,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
- }
44
  },
45
- "bos_token": "<s>",
46
- "clean_up_tokenization_spaces": false,
47
- "cls_token": "<s>",
48
- "eos_token": "</s>",
49
  "errors": "replace",
50
- "extra_special_tokens": {},
51
- "mask_token": "<mask>",
 
 
 
 
 
 
52
  "max_len": 296,
53
  "model_max_length": 512,
54
- "pad_token": "<pad>",
55
- "sep_token": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  "tokenizer_class": "RobertaTokenizer",
57
  "trim_offsets": true,
58
- "unk_token": "<unk>"
 
 
 
 
 
 
 
59
  }
 
1
  {
2
  "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "cls_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "eos_token": {
21
+ "__type": "AddedToken",
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  },
 
 
 
 
28
  "errors": "replace",
29
+ "mask_token": {
30
+ "__type": "AddedToken",
31
+ "content": "<mask>",
32
+ "lstrip": true,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
  "max_len": 296,
38
  "model_max_length": 512,
39
+ "pad_token": {
40
+ "__type": "AddedToken",
41
+ "content": "<pad>",
42
+ "lstrip": false,
43
+ "normalized": true,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "sep_token": {
48
+ "__type": "AddedToken",
49
+ "content": "</s>",
50
+ "lstrip": false,
51
+ "normalized": true,
52
+ "rstrip": false,
53
+ "single_word": false
54
+ },
55
  "tokenizer_class": "RobertaTokenizer",
56
  "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
  }