parasora commited on
Commit
337946c
·
verified ·
1 Parent(s): 68e5f94

Upload 5 files

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "mask_token": "…",
4
+ "unk_token": "<unk>"
5
+ }
token_fraction.csv ADDED
The diff for this file is too large to render. See raw diff
 
token_fraction_describe.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ token text token/text
2
+ count 19980.000000 19980.000000 19980.000000
3
+ mean 87.750000 181.553804 0.494357
4
+ std 237.185668 374.992707 0.100927
5
+ min 7.000000 41.000000 0.134615
6
+ 10% 24.000000 46.000000 0.387381
7
+ 25% 34.000000 64.000000 0.432000
8
+ 33% 42.000000 85.000000 0.449829
9
+ 50% 66.000000 139.000000 0.483740
10
+ 67% 97.000000 205.000000 0.521950
11
+ 75% 117.000000 245.000000 0.545455
12
+ 80% 132.000000 277.000000 0.562044
13
+ 90% 176.100000 371.000000 0.619048
14
+ 95% 221.000000 461.000000 0.674419
15
+ 99% 325.210000 666.000000 0.804878
16
+ max 32162.000000 49156.000000 1.024390
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "…",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "8255": {
28
+ "content": "<0x{i:02X}>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "clean_up_tokenization_spaces": true,
37
+ "eos_token": "</s>",
38
+ "mask_token": "…",
39
+ "model_max_length": 1000000000000000019884624838656,
40
+ "tokenizer_class": "PreTrainedTokenizerFast",
41
+ "unk_token": "<unk>"
42
+ }