Or4cl3-1 commited on
Commit
adbd668
1 Parent(s): dcc7dc1

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +23 -23
config.json CHANGED
@@ -1,26 +1,26 @@
1
  {
2
- "model_name": "cognitiv-agent_1",
3
- "model_type": "gemma-agent-hybrid", // A more descriptive type
4
- "base_model": "Or4cl3-1/Cognitive-Agent-Gemma_7b",
5
- "merged_models": [
6
- "Or4cl3-1/Cognitive-Agent-Gemma_7b",
7
- "Or4cl3-1/agent_gemma_7b"
8
- ],
9
- "slices": [
10
- {
11
- "sources": [
12
- {"model": "Or4cl3-1/Cognitive-Agent-Gemma_7b", "layer_range": [0, 62]},
13
- {"model": "Or4cl3-1/agent_gemma_7b", "layer_range": [0, 62]}
14
- ],
15
- "merge_method": "slerp"
16
- }
17
  ],
18
- "parameters": {
19
- "t": [
20
- {"filter": "self_attn", "value": [0, 0.5, 0.3, 0.7, 1]},
21
- {"filter": "mlp", "value": [1, 0.5, 0.7, 0.3, 0]},
22
- {"value": 0.5}
23
- ],
24
- "dtype": "bfloat16"
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
26
  }
 
1
  {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.36.2",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
  }