chall-dataset-contamination / tokenizer_config.json
JLsquare's picture
Upload model
16c3fbd
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"0": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"16": {
"content": "0",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"17": {
"content": "1",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"19": {
"content": "3",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"20": {
"content": "4",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"36": {
"content": "D",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"38": {
"content": "F",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"41": {
"content": "I",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"43": {
"content": "K",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"44": {
"content": "L",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"45": {
"content": "M",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"46": {
"content": "N",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"50": {
"content": "R",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"52": {
"content": "T",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"59": {
"content": "_",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"87": {
"content": "{",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"89": {
"content": "}",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"50000": {
"content": "U",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"50001": {
"content": "Y",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
},
"50002": {
"content": "X",
"lstrip": true,
"normalized": true,
"rstrip": true,
"single_word": false,
"special": false
}
},
"additional_special_tokens": [],
"bos_token": "<|endoftext|>",
"clean_up_tokenization_spaces": true,
"eos_token": "<|endoftext|>",
"errors": "replace",
"keep_accents": true,
"max_len": 50,
"model_max_length": 50,
"pad_token": "<|endoftext|>",
"tokenizer_class": "GPT2Tokenizer",
"tokenizer_file": "/home/jl/.cache/huggingface/hub/models--ClassCat--gpt2-base-french/snapshots/902ec822995ce12f979d0a5277ee9c2a1b610df1/tokenizer.json",
"unk_token": "<|endoftext|>"
}