Jackmin108 commited on
Commit
2ed58a0
·
1 Parent(s): 0103257

fix: 8 kv heads

Browse files
Files changed (1) hide show
  1. config.json +2 -2
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/data/llama3-real/Meta-Llama-3.1-405B-Instruct",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "model_type": "llama",
21
  "num_attention_heads": 128,
22
  "num_hidden_layers": 126,
23
- "num_key_value_heads": 16,
24
  "pretraining_tp": 1,
25
  "quantization_config": {
26
  "activation_scale_ub": 1200.0,
 
1
  {
2
+ "_name_or_path": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
20
  "model_type": "llama",
21
  "num_attention_heads": 128,
22
  "num_hidden_layers": 126,
23
+ "num_key_value_heads": 8,
24
  "pretraining_tp": 1,
25
  "quantization_config": {
26
  "activation_scale_ub": 1200.0,