YvanCarre commited on
Commit
fa1d57c
·
verified ·
1 Parent(s): 7f54822

Upload tokenizer

Browse files
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "bos_token": "<|endoftext|>",
3
  "eos_token": "<|endoftext|>",
 
4
  "unk_token": "<|endoftext|>"
5
  }
 
1
  {
2
  "bos_token": "<|endoftext|>",
3
  "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
  "unk_token": "<|endoftext|>"
6
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "34999": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
- "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
@@ -13,7 +13,8 @@
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
16
- "model_max_length": 1000000000000000019884624838656,
 
17
  "tokenizer_class": "GPT2Tokenizer",
18
  "unk_token": "<|endoftext|>"
19
  }
 
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
+ "0": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
+ "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
 
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "pad_token": "<|endoftext|>",
18
  "tokenizer_class": "GPT2Tokenizer",
19
  "unk_token": "<|endoftext|>"
20
  }
vocab.json ADDED
The diff for this file is too large to render. See raw diff