File size: 1,125 Bytes
75446c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
{
"add_bos_token":true,
"add_eos_token":false,
"model_max_length":2048,
"pad_token":null,
"sp_model_kwargs":{
},
"tokenizer_class":"LlamaTokenizer",
"clean_up_tokenization_spaces":false,
"bos_token":{
"__type":"AddedToken",
"content":"<s>",
"lstrip":false,
"normalized":true,
"rstrip":false,
"single_word":false
},
"eos_token":{
"__type":"AddedToken",
"content":"</s>",
"lstrip":false,
"normalized":true,
"rstrip":false,
"single_word":false
},
"unk_token":{
"__type":"AddedToken",
"content":"<unk>",
"lstrip":false,
"normalized":true,
"rstrip":false,
"single_word":false
},
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
} |