zcbenz commited on
Commit
020c464
·
verified ·
1 Parent(s): 0b78aed

Upload model files

Browse files
Files changed (4) hide show
  1. config.json +10 -0
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +19 -0
  4. weights.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "llama",
3
+ "hidden_size": 288,
4
+ "intermediate_size": 768,
5
+ "num_hidden_layers": 6,
6
+ "num_attention_heads": 6,
7
+ "num_key_value_heads": 6,
8
+ "rms_norm_eps": 1e-05,
9
+ "vocab_size": 50570
10
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "unk_token": "<unk|LLM-jp>",
3
+ "bos_token": "<s|LLM-jp>",
4
+ "eos_token": "<EOD|LLM-jp>",
5
+ "pad_token": "<pad|LLM-jp>",
6
+ "cls_token": "<CLS|LLM-jp>",
7
+ "sep_token": "<SEP|LLM-jp>",
8
+ "eod_token": "<EOD|LLM-jp>",
9
+ "mask_token": "<mask|LLM-jp>",
10
+ "extra_ids": 0,
11
+ "additional_special_tokens": [
12
+ "</s|LLM-jp>"
13
+ ],
14
+ "sp_model_kwargs": {},
15
+ "model_max_length": 1000000000000000019884624838656,
16
+ "clean_up_tokenization_spaces": false,
17
+ "special_tokens_map_file": null,
18
+ "tokenizer_class": "PreTrainedTokenizerFast"
19
+ }
weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef1c76338aa3e0c0548c95304ba995cf0178ab185aa127b272e535216fa4e050
3
+ size 140422249