googlefan commited on
Commit
d178374
·
verified ·
1 Parent(s): 96fed8c

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neody/nemma-100m",
3
+ "architectures": [
4
+ "Gemma2ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": 50.0,
9
+ "bos_token_id": 2,
10
+ "cache_implementation": "hybrid",
11
+ "eos_token_id": 1,
12
+ "final_logit_softcapping": 30.0,
13
+ "head_dim": 64,
14
+ "hidden_activation": "gelu_pytorch_tanh",
15
+ "hidden_size": 512,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 1024,
18
+ "max_position_embeddings": 3096,
19
+ "model_type": "gemma2",
20
+ "num_attention_heads": 8,
21
+ "num_hidden_layers": 32,
22
+ "num_key_value_heads": 4,
23
+ "pad_token_id": 0,
24
+ "query_pre_attn_scalar": 224,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_theta": 10000.0,
27
+ "sliding_window": 4096,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.46.2",
30
+ "use_cache": true,
31
+ "vocab_size": 50259
32
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 1,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.46.2"
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0631db5154bae4832256de0dae7e2b4949dfbc296d609f36540c8e1b48c43f84
3
+ size 202632336
tokenizer/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|end_of_turn|>": 50258,
3
+ "<|start_of_turn|>": 50257
4
+ }
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "50257": {
13
+ "content": "<|start_of_turn|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ },
20
+ "50258": {
21
+ "content": "<|end_of_turn|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ }
28
+ },
29
+ "bos_token": "<|endoftext|>",
30
+ "chat_template": "{{ bos_token }}\n{% for message in messages %}\n {{ '<|start_of_turn|>' + message['role'] + '\\n' + message['content'] | trim + '<|end_of_turn|>\\n' }}\n{% endfor %}\n{% if add_generation_prompt %}\n {{'<|start_of_turn|>assistant\\n'}}\n{% endif %}",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "<|endoftext|>",
33
+ "model_max_length": 1024,
34
+ "pad_token": "<|endoftext|>",
35
+ "tokenizer_class": "GPT2Tokenizer",
36
+ "unk_token": "<|endoftext|>"
37
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff