Pontonkid commited on
Commit
4b644f6
1 Parent(s): 44a5a90

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +25 -5
config.json CHANGED
@@ -1,7 +1,27 @@
1
  { "_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
2
- "alibi": false, "apply_residual_connection_post_layernorm": false, "architectures": [ "FalconForCausalLM" ],
3
- "attention_dropout": 0.0, "bias": false, "bos_token_id": 11, "eos_token_id": 11, "hidden_dropout": 0.0, "hidden_size": 4544, "initializer_range": 0.02,
4
- "layer_norm_epsilon": 1e-05, "max_position_embeddings": 2048, "model_type": "falcon", "multi_query": true, "n_head": 71, "n_layer": 32, "new_decoder_architecture": false, "num_attention_heads": 71, "num_hidden_layers": 32, "num_kv_heads": 71, "parallel_attn": true,
5
- "quantization_config": { "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": true, "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_has_fp16_weight": false, "llm_int8_skip_modules": null, "llm_int8_threshold": 6.0,
6
- "load_in_4bit": true, "load_in_8bit": false, "quant_method": "bitsandbytes" }, "rope_scaling": null, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.35.2",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "use_cache": false, "vocab_size": 65024 }
 
1
  { "_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
2
+
3
+ "alibi": false, "apply_residual_connection_post_layernorm": false,
4
+
5
+ "architectures": [ "FalconForCausalLM" ],
6
+
7
+ "attention_dropout": 0.0, "bias": false, "bos_token_id": 11,
8
+
9
+ "eos_token_id": 11, "hidden_dropout": 0.0, "hidden_size": 4544, "initializer_range": 0.02,
10
+
11
+ "layer_norm_epsilon": 1e-05, "max_position_embeddings": 2048, "model_type": "falcon",
12
+
13
+ "multi_query": true, "n_head": 71, "n_layer": 32, "new_decoder_architecture": false, "num_attention_heads": 71,
14
+
15
+ "num_hidden_layers": 32, "num_kv_heads": 71, "parallel_attn": true,
16
+
17
+ "quantization_config": { "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4",
18
+
19
+ "bnb_4bit_use_double_quant": true, "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_has_fp16_weight": false,
20
+
21
+ "llm_int8_skip_modules": null, "llm_int8_threshold": 6.0,
22
+
23
+ "load_in_4bit": true, "load_in_8bit": false, "quant_method": "bitsandbytes" },
24
+
25
+ "rope_scaling": null, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.35.2",
26
+
27
  "use_cache": false, "vocab_size": 65024 }