Macropodus commited on
Commit
0f28481
1 Parent(s): 5821e73
lora_baichuan_model_sft_gpt4forall/adapter_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "baichuan-inc/baichuan-7B",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": false,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "W_pack",
15
+ "o_proj"
16
+ ],
17
+ "task_type": "CAUSAL_LM"
18
+ }
lora_baichuan_model_sft_gpt4forall/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5614e4cd1c0752a98ac38bd78e7b06fb3a122f3cd5f6a826b8db9dac524b2713
3
+ size 25209869
lora_baichuan_model_sft_gpt4forall/events.out.tfevents.1687509439 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d934e81867019f1866d3f18d82820c1eead01aac71f7add8e170340f2a3b3b40
3
+ size 1532072
lora_bloomz_model_sft_gpt4forall/adapter_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "bigscience/bloomz-7b1-mt",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": true,
6
+ "inference_mode": false,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.1,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "query_key_value"
15
+ ],
16
+ "task_type": "CAUSAL_LM"
17
+ }
lora_bloomz_model_sft_gpt4forall/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c2d1ba8b3ae8c417f329389c90c3bdc073c164eddcabaa4ca34e940a4312b5
3
+ size 15750245
lora_chatglm_model_sft_gpt4forall/adapter_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": true,
6
+ "inference_mode": false,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "query_key_value"
15
+ ],
16
+ "task_type": "CAUSAL_LM"
17
+ }
lora_chatglm_model_sft_gpt4forall/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced81c2c5d7f707aa2c242341eaf8dbac932358f26ef1a2fd89f276073aecb7f
3
+ size 14700185
lora_chatglm_model_sft_mwp/adapter_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": true,
6
+ "inference_mode": false,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "query_key_value"
15
+ ],
16
+ "task_type": "CAUSAL_LM"
17
+ }
lora_chatglm_model_sft_mwp/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed7d8477ac54a25a537ce37b83eaf51ac9a854284b6c04869ac27a0850dc7e75
3
+ size 14700185
lora_llama_model_sft_gpt4forall/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "eachadea/vicuna-7b-1.1",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": false,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "q_proj",
15
+ "v_proj",
16
+ "k_proj"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
lora_llama_model_sft_gpt4forall/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc86c322782f159d9cae4ac62028d5e4ce12a7bbe215f0815d6864d1abb0aa5
3
+ size 25232013