VLKVLK commited on
Commit
d8f435c
·
verified ·
1 Parent(s): 025bb2b

Upload tokenizer

Browse files
added_tokens.json CHANGED
@@ -1,14 +1,5 @@
1
  {
2
- "<|assistant|>": 32001,
3
- "<|endoftext|>": 32000,
4
- "<|end|>": 32007,
5
- "<|eot_id|>": 32011,
6
- "<|placeholder1|>": 32002,
7
- "<|placeholder2|>": 32003,
8
- "<|placeholder3|>": 32004,
9
- "<|placeholder4|>": 32005,
10
- "<|placeholder5|>": 32008,
11
- "<|placeholder6|>": 32009,
12
- "<|system|>": 32006,
13
- "<|user|>": 32010
14
  }
 
1
  {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
 
 
 
 
 
 
 
 
 
5
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,13 +1,10 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
  "eos_token": {
10
- "content": "<|eot_id|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
@@ -19,12 +16,5 @@
19
  "normalized": false,
20
  "rstrip": false,
21
  "single_word": false
22
- },
23
- "unk_token": {
24
- "content": "<unk>",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
  }
30
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
 
 
 
6
  "eos_token": {
7
+ "content": "<|im_end|>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
 
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false
 
 
 
 
 
 
 
19
  }
20
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,33 +1,7 @@
1
  {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
- "rstrip": true,
27
- "single_word": false,
28
- "special": false
29
- },
30
- "32000": {
31
  "content": "<|endoftext|>",
32
  "lstrip": false,
33
  "normalized": false,
@@ -35,88 +9,16 @@
35
  "single_word": false,
36
  "special": true
37
  },
38
- "32001": {
39
- "content": "<|assistant|>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": true,
43
- "single_word": false,
44
- "special": true
45
- },
46
- "32002": {
47
- "content": "<|placeholder1|>",
48
- "lstrip": false,
49
- "normalized": false,
50
- "rstrip": true,
51
- "single_word": false,
52
- "special": true
53
- },
54
- "32003": {
55
- "content": "<|placeholder2|>",
56
- "lstrip": false,
57
- "normalized": false,
58
- "rstrip": true,
59
- "single_word": false,
60
- "special": true
61
- },
62
- "32004": {
63
- "content": "<|placeholder3|>",
64
  "lstrip": false,
65
  "normalized": false,
66
- "rstrip": true,
67
- "single_word": false,
68
- "special": true
69
- },
70
- "32005": {
71
- "content": "<|placeholder4|>",
72
- "lstrip": false,
73
- "normalized": false,
74
- "rstrip": true,
75
- "single_word": false,
76
- "special": true
77
- },
78
- "32006": {
79
- "content": "<|system|>",
80
- "lstrip": false,
81
- "normalized": false,
82
- "rstrip": true,
83
- "single_word": false,
84
- "special": true
85
- },
86
- "32007": {
87
- "content": "<|end|>",
88
- "lstrip": false,
89
- "normalized": false,
90
- "rstrip": true,
91
- "single_word": false,
92
- "special": true
93
- },
94
- "32008": {
95
- "content": "<|placeholder5|>",
96
- "lstrip": false,
97
- "normalized": false,
98
- "rstrip": true,
99
- "single_word": false,
100
- "special": true
101
- },
102
- "32009": {
103
- "content": "<|placeholder6|>",
104
- "lstrip": false,
105
- "normalized": false,
106
- "rstrip": true,
107
- "single_word": false,
108
- "special": true
109
- },
110
- "32010": {
111
- "content": "<|user|>",
112
- "lstrip": false,
113
- "normalized": false,
114
- "rstrip": true,
115
  "single_word": false,
116
  "special": true
117
  },
118
- "32011": {
119
- "content": "<|eot_id|>",
120
  "lstrip": false,
121
  "normalized": false,
122
  "rstrip": false,
@@ -124,17 +26,19 @@
124
  "special": true
125
  }
126
  },
127
- "bos_token": "<s>",
128
- "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
 
 
 
 
129
  "clean_up_tokenization_spaces": false,
130
- "eos_token": "<|eot_id|>",
131
- "legacy": false,
132
- "model_max_length": 4096,
133
  "pad_token": "<|endoftext|>",
134
  "padding_side": "left",
135
- "sp_model_kwargs": {},
136
  "split_special_tokens": false,
137
- "tokenizer_class": "LlamaTokenizer",
138
- "unk_token": "<unk>",
139
- "use_default_system_prompt": false
140
  }
 
1
  {
2
+ "add_prefix_space": false,
 
 
3
  "added_tokens_decoder": {
4
+ "151643": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
 
9
  "single_word": false,
10
  "special": true
11
  },
12
+ "151644": {
13
+ "content": "<|im_start|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  "lstrip": false,
15
  "normalized": false,
16
+ "rstrip": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  "single_word": false,
18
  "special": true
19
  },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
 
26
  "special": true
27
  }
28
  },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% set system_message = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n' %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + content + '\n\n### Response:\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n\n' }}{% endif %}{% endfor %}",
35
  "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
  "pad_token": "<|endoftext|>",
40
  "padding_side": "left",
 
41
  "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
 
44
  }
vocab.json ADDED
The diff for this file is too large to render. See raw diff