gerou161 commited on
Commit
e13a22f
·
verified ·
1 Parent(s): 47260fe

Add files using upload-large-folder tool

Browse files
final_model/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
final_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc10e6c530d3c1d8a9ae3da506c02ad962dbece3d4eda44404985c48151a767
3
+ size 2795621622
model_15000/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
model_15000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e56b4bcb41129376ba65c1d6003c1a00ea1ec47efee20327936de8ecf2c2048
3
+ size 2795621622
model_16000/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
model_16000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aecbf6fd7b4f64bf15c2d7e0e093749ef09edeb5d95d13441429891cdb3ffcc7
3
+ size 2795621622
model_17000/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
model_17000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5585358d75ec9c23ec6f6396715355ee0ef9f1e53263c462350292ab691bd4a9
3
+ size 2795621622
model_18000/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
model_18000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:572d810846b5b8c3cddab92e194c9bfedd3fa9d505bd445c19bcbefe8d1313cc
3
+ size 2795621622
model_19000/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1280,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4480,
11
+ "max_position_embeddings": 1024,
12
+ "model_type": "llama",
13
+ "num_attention_heads": 20,
14
+ "num_hidden_layers": 24,
15
+ "num_key_value_heads": 20,
16
+ "pretraining_tp": 1,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": null,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.31.0.dev0",
22
+ "use_cache": true,
23
+ "vocab_size": 50257
24
+ }
model_19000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3854012ef548607de39d6c81bf3b8351cb2dec526e2c7b7f80c25c99ba3b306
3
+ size 2795621622