danielhanchen commited on
Commit
d30af0a
1 Parent(s): a1fb25e

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|PAD_TOKEN|>": 49152
3
+ }
special_tokens_map.json CHANGED
@@ -17,12 +17,6 @@
17
  "rstrip": false,
18
  "single_word": false
19
  },
20
- "pad_token": "<|endoftext|>",
21
- "unk_token": {
22
- "content": "<|endoftext|>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false
27
- }
28
  }
 
17
  "rstrip": false,
18
  "single_word": false
19
  },
20
+ "pad_token": "<|PAD_TOKEN|>",
21
+ "unk_token": "�"
 
 
 
 
 
 
22
  }
tokenizer.json CHANGED
@@ -155,6 +155,15 @@
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
 
 
 
 
 
 
 
 
 
158
  }
159
  ],
160
  "normalizer": null,
 
155
  "rstrip": false,
156
  "normalized": false,
157
  "special": true
158
+ },
159
+ {
160
+ "id": 49152,
161
+ "content": "<|PAD_TOKEN|>",
162
+ "single_word": false,
163
+ "lstrip": false,
164
+ "rstrip": false,
165
+ "normalized": false,
166
+ "special": true
167
  }
168
  ],
169
  "normalizer": null,
tokenizer_config.json CHANGED
@@ -136,6 +136,14 @@
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
 
 
 
 
 
 
 
 
139
  }
140
  },
141
  "additional_special_tokens": [
@@ -147,9 +155,9 @@
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
149
  "model_max_length": 2048,
150
- "pad_token": "<|endoftext|>",
151
  "padding_side": "left",
152
  "tokenizer_class": "GPT2Tokenizer",
153
- "unk_token": "<|endoftext|>",
154
  "vocab_size": 49152
155
  }
 
136
  "rstrip": false,
137
  "single_word": false,
138
  "special": true
139
+ },
140
+ "49152": {
141
+ "content": "<|PAD_TOKEN|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
  }
148
  },
149
  "additional_special_tokens": [
 
155
  "clean_up_tokenization_spaces": false,
156
  "eos_token": "<|im_end|>",
157
  "model_max_length": 2048,
158
+ "pad_token": "<|PAD_TOKEN|>",
159
  "padding_side": "left",
160
  "tokenizer_class": "GPT2Tokenizer",
161
+ "unk_token": "�",
162
  "vocab_size": 49152
163
  }