Update tiny models for Meta-Llama-3-8B

#9
by vprov - opened
Files changed (2) hide show
  1. config.json +1 -1
  2. model.safetensors +2 -2
config.json CHANGED
@@ -15,7 +15,7 @@
15
  "max_position_embeddings": 256,
16
  "mlp_bias": false,
17
  "model_type": "llama",
18
- "num_attention_heads": 4,
19
  "num_hidden_layers": 2,
20
  "num_key_value_heads": 8,
21
  "pretraining_tp": 1,
 
15
  "max_position_embeddings": 256,
16
  "mlp_bias": false,
17
  "model_type": "llama",
18
+ "num_attention_heads": 16,
19
  "num_hidden_layers": 2,
20
  "num_key_value_heads": 8,
21
  "pretraining_tp": 1,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72127eb2449185249ceb4408125a1601c8e359d7c15226f8f20e7ca2e84a3678
3
- size 14108992
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df921e5f26cd540bcd023ca67da18286acefa397586e2c9fe1e48d64136a0bca
3
+ size 14502208