Update tokenizer_config.json
Browse files- tokenizer_config.json +1 -1
tokenizer_config.json
CHANGED
@@ -1744,7 +1744,7 @@
|
|
1744 |
"<end_of_turn>"
|
1745 |
],
|
1746 |
"bos_token": "<bos>",
|
1747 |
-
"chat_template": "{{ bos_token }}{% if
|
1748 |
"clean_up_tokenization_spaces": false,
|
1749 |
"eos_token": "<eos>",
|
1750 |
"model_max_length": 2048,
|
|
|
1744 |
"<end_of_turn>"
|
1745 |
],
|
1746 |
"bos_token": "<bos>",
|
1747 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{% if message['role'] not in ['user', 'assistant', 'system'] or (loop.index0 > 0 and message['role'] != 'system' and message['role'] == messages[loop.index0 - 1]['role']) %}{{ raise_exception('Invalid role or role sequence') }}{% endif %}{{ '<start_of_turn>' + message['role'] + '\n' + message['content'] | trim + '<end_of_turn>\n' if message['role'] != 'system' else message['content'] + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<start_of_turn>model\n' }}{% endif %}",
|
1748 |
"clean_up_tokenization_spaces": false,
|
1749 |
"eos_token": "<eos>",
|
1750 |
"model_max_length": 2048,
|