ajaymin28 commited on
Commit
1f25d28
1 Parent(s): 61c01cb

Delete llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500

Browse files
Files changed (30) hide show
  1. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/added_tokens.json +0 -6
  2. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/config.json +0 -221
  3. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/generation_config.json +0 -14
  4. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +0 -3
  5. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +0 -3
  6. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +0 -3
  7. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +0 -3
  8. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_0_mp_rank_00_model_states.pt +0 -3
  9. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_1_mp_rank_00_model_states.pt +0 -3
  10. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_2_mp_rank_00_model_states.pt +0 -3
  11. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_3_mp_rank_00_model_states.pt +0 -3
  12. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/latest +0 -1
  13. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/merges.txt +0 -0
  14. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00001-of-00004.safetensors +0 -3
  15. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00002-of-00004.safetensors +0 -3
  16. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00003-of-00004.safetensors +0 -3
  17. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00004-of-00004.safetensors +0 -3
  18. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model.safetensors.index.json +0 -772
  19. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_0.pth +0 -3
  20. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_1.pth +0 -3
  21. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_2.pth +0 -3
  22. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_3.pth +0 -3
  23. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/scheduler.pt +0 -3
  24. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/special_tokens_map.json +0 -20
  25. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/tokenizer.json +0 -0
  26. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/tokenizer_config.json +0 -53
  27. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/trainer_state.json +0 -0
  28. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/training_args.bin +0 -3
  29. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/vocab.json +0 -0
  30. llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/zero_to_fp32.py +0 -604
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/added_tokens.json DELETED
@@ -1,6 +0,0 @@
1
- {
2
- "<image>": 151646,
3
- "<|endoftext|>": 151643,
4
- "<|im_end|>": 151645,
5
- "<|im_start|>": 151644
6
- }
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/config.json DELETED
@@ -1,221 +0,0 @@
1
- {
2
- "_name_or_path": "lmms-lab/llava-onevision-qwen2-7b-si",
3
- "add_faster_video": false,
4
- "add_time_instruction": true,
5
- "architectures": [
6
- "LlavaQwenForCausalLM"
7
- ],
8
- "attention_dropout": 0.0,
9
- "bos_token_id": 151643,
10
- "eos_token_id": 151645,
11
- "faster_token_stride": 10,
12
- "force_sample": true,
13
- "hidden_act": "silu",
14
- "hidden_size": 3584,
15
- "ignore_index": -100,
16
- "image_aspect_ratio": "anyres_max_9",
17
- "image_crop_resolution": null,
18
- "image_grid_pinpoints": [
19
- [
20
- 384,
21
- 384
22
- ],
23
- [
24
- 384,
25
- 768
26
- ],
27
- [
28
- 384,
29
- 1152
30
- ],
31
- [
32
- 384,
33
- 1536
34
- ],
35
- [
36
- 384,
37
- 1920
38
- ],
39
- [
40
- 384,
41
- 2304
42
- ],
43
- [
44
- 768,
45
- 384
46
- ],
47
- [
48
- 768,
49
- 768
50
- ],
51
- [
52
- 768,
53
- 1152
54
- ],
55
- [
56
- 768,
57
- 1536
58
- ],
59
- [
60
- 768,
61
- 1920
62
- ],
63
- [
64
- 768,
65
- 2304
66
- ],
67
- [
68
- 1152,
69
- 384
70
- ],
71
- [
72
- 1152,
73
- 768
74
- ],
75
- [
76
- 1152,
77
- 1152
78
- ],
79
- [
80
- 1152,
81
- 1536
82
- ],
83
- [
84
- 1152,
85
- 1920
86
- ],
87
- [
88
- 1152,
89
- 2304
90
- ],
91
- [
92
- 1536,
93
- 384
94
- ],
95
- [
96
- 1536,
97
- 768
98
- ],
99
- [
100
- 1536,
101
- 1152
102
- ],
103
- [
104
- 1536,
105
- 1536
106
- ],
107
- [
108
- 1536,
109
- 1920
110
- ],
111
- [
112
- 1536,
113
- 2304
114
- ],
115
- [
116
- 1920,
117
- 384
118
- ],
119
- [
120
- 1920,
121
- 768
122
- ],
123
- [
124
- 1920,
125
- 1152
126
- ],
127
- [
128
- 1920,
129
- 1536
130
- ],
131
- [
132
- 1920,
133
- 1920
134
- ],
135
- [
136
- 1920,
137
- 2304
138
- ],
139
- [
140
- 2304,
141
- 384
142
- ],
143
- [
144
- 2304,
145
- 768
146
- ],
147
- [
148
- 2304,
149
- 1152
150
- ],
151
- [
152
- 2304,
153
- 1536
154
- ],
155
- [
156
- 2304,
157
- 1920
158
- ],
159
- [
160
- 2304,
161
- 2304
162
- ]
163
- ],
164
- "image_split_resolution": null,
165
- "image_token_index": 151646,
166
- "initializer_range": 0.02,
167
- "intermediate_size": 18944,
168
- "max_position_embeddings": 32768,
169
- "max_window_layers": 28,
170
- "mm_hidden_size": 1152,
171
- "mm_newline_position": "grid",
172
- "mm_patch_merge_type": "spatial_unpad",
173
- "mm_projector_lr": null,
174
- "mm_projector_type": "mlp2x_gelu",
175
- "mm_resampler_type": null,
176
- "mm_spatial_pool_mode": "bilinear",
177
- "mm_spatial_pool_stride": 2,
178
- "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
179
- "mm_use_im_patch_token": false,
180
- "mm_use_im_start_end": false,
181
- "mm_vision_select_feature": "patch",
182
- "mm_vision_select_layer": -2,
183
- "mm_vision_tower": "google/siglip-so400m-patch14-384",
184
- "mm_vision_tower_lr": 2e-06,
185
- "model_type": "llava",
186
- "num_attention_heads": 28,
187
- "num_hidden_layers": 28,
188
- "num_key_value_heads": 4,
189
- "pos_skipping_range": 4096,
190
- "projector_hidden_act": "gelu",
191
- "rms_norm_eps": 1e-06,
192
- "rope_scaling": null,
193
- "rope_theta": 1000000.0,
194
- "sliding_window": 131072,
195
- "text_config": {
196
- "model_type": "llama"
197
- },
198
- "tie_word_embeddings": false,
199
- "tokenizer_model_max_length": 32768,
200
- "tokenizer_padding_side": "right",
201
- "torch_dtype": "bfloat16",
202
- "transformers_version": "4.40.0.dev0",
203
- "use_cache": false,
204
- "use_mm_proj": true,
205
- "use_pos_skipping": false,
206
- "use_sliding_window": false,
207
- "vision_config": {
208
- "hidden_size": 1024,
209
- "image_size": 336,
210
- "intermediate_size": 4096,
211
- "model_type": "clip_vision_model",
212
- "num_attention_heads": 16,
213
- "num_hidden_layers": 24,
214
- "patch_size": 14,
215
- "projection_dim": 768,
216
- "vocab_size": 32000
217
- },
218
- "vision_feature_layer": -2,
219
- "vision_feature_select_strategy": "default",
220
- "vision_tower_pretrained": null
221
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/generation_config.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "bos_token_id": 151643,
3
- "do_sample": true,
4
- "eos_token_id": [
5
- 151645,
6
- 151643
7
- ],
8
- "pad_token_id": 151643,
9
- "repetition_penalty": 1.05,
10
- "temperature": 0.7,
11
- "top_k": 20,
12
- "top_p": 0.8,
13
- "transformers_version": "4.40.0.dev0"
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b907087c7236211452b84f8f9ca6df03f45a1c369c24429251cd2a9a6301b614
3
- size 24091055846
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:78abb3c04264b99dbfc7b9f7819204fe8800d794700fbd2a23fa555c9490f4d4
3
- size 24091055846
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a94007ad2a45f7522e98edf4cd7bba88fa40fb6ccc06534b57979adf4c2b439
3
- size 24091055846
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2d10a9a24e07b23935a919c3be6f835de7b2f4509e644c902b12e55d27f395f
3
- size 24091055846
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_0_mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6cb78c87ca325d6c30238b1c10790ac9525efd0975ddab01716cf4b96527a33b
3
- size 419351
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_1_mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b604565d7ef735dcd1fe53e6cd337f405c0bbab98a65139ec42577be32317a87
3
- size 419351
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_2_mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0228ec2cdc819fb46fa1207ae123b82ecf9917633c82511bc0e9407bc5f5e01
3
- size 419351
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/global_step2500/zero_pp_rank_3_mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:62f7afd84fdcc79738e191fc4676ed0ec24fb5ac2c3e1bfd49e08010ebedc1a2
3
- size 419351
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/latest DELETED
@@ -1 +0,0 @@
1
- global_step2500
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00001-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0227c664d147589418ecc33161e885422be38a6314bb2a2bcb2b4cf05f0b23c
3
- size 4877668032
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00002-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b190799bc2500e485b11fb2edf2834498adb50c400a9f4345e8eea6b4a047ae8
3
- size 4932751008
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00003-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d87e0bd652ce2b77650161a556282c02fc6fe9d6fe4306e238b398d9c84bc14
3
- size 4994571904
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model-00004-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:280869c7cec87417a1312d300b9c8e37dd2973cb0ac1a86f105d3cf3955fce27
3
- size 1255812224
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/model.safetensors.index.json DELETED
@@ -1,772 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 16060697664
4
- },
5
- "weight_map": {
6
- "lm_head.weight": "model-00004-of-00004.safetensors",
7
- "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
- "model.image_newline": "model-00001-of-00004.safetensors",
9
- "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
- "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
- "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
- "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
- "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
- "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
15
- "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
16
- "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
- "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
18
- "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
19
- "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
20
- "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
21
- "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
22
- "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
23
- "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
24
- "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
25
- "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
26
- "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
27
- "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
28
- "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
29
- "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
30
- "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
31
- "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
32
- "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
33
- "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
34
- "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
35
- "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
36
- "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
37
- "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
38
- "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
39
- "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
40
- "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
- "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
42
- "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
- "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
44
- "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
45
- "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
- "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
- "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
- "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
- "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
- "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
51
- "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
52
- "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
53
- "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
54
- "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
55
- "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
56
- "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
57
- "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
58
- "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
59
- "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
60
- "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
61
- "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
62
- "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
63
- "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
64
- "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
65
- "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
66
- "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
67
- "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
68
- "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
69
- "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
70
- "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
71
- "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
72
- "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
73
- "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
74
- "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
75
- "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
76
- "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
77
- "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
78
- "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
- "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
80
- "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
81
- "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
82
- "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
83
- "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
- "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
85
- "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
86
- "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
87
- "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
88
- "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
89
- "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
90
- "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
91
- "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
92
- "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
93
- "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
94
- "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
95
- "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
96
- "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
97
- "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
98
- "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
99
- "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
100
- "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
101
- "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
102
- "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
103
- "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
104
- "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
105
- "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
106
- "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
107
- "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
108
- "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
109
- "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
110
- "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
111
- "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
112
- "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
113
- "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
114
- "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
- "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
116
- "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
117
- "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
118
- "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
119
- "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
120
- "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
121
- "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
122
- "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
123
- "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
124
- "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
125
- "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
126
- "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
127
- "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
128
- "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
129
- "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
130
- "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
131
- "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
132
- "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
133
- "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
134
- "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
135
- "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
136
- "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
137
- "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
138
- "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
139
- "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
140
- "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
141
- "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
142
- "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
143
- "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
144
- "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
145
- "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
146
- "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
147
- "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
148
- "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
149
- "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
150
- "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
- "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
152
- "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
153
- "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
154
- "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
155
- "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
156
- "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
157
- "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
158
- "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
159
- "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
160
- "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
161
- "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
162
- "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
163
- "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
164
- "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
165
- "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
166
- "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
167
- "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
168
- "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
169
- "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
170
- "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
171
- "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
172
- "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
173
- "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
174
- "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
175
- "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
176
- "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
177
- "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
178
- "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
179
- "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
180
- "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
181
- "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
182
- "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
183
- "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
184
- "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
185
- "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
186
- "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
- "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
188
- "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
189
- "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
190
- "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
191
- "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
192
- "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
193
- "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
- "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
195
- "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
196
- "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
197
- "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
198
- "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
199
- "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
200
- "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
201
- "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
202
- "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
203
- "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
204
- "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
205
- "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
206
- "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
207
- "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
208
- "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
209
- "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
210
- "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
211
- "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
212
- "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
213
- "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
214
- "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
215
- "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
216
- "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
217
- "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
218
- "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
219
- "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
220
- "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
221
- "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
222
- "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
223
- "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
224
- "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
225
- "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
226
- "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
227
- "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
228
- "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
229
- "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
230
- "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
231
- "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
232
- "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
233
- "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
234
- "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
235
- "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
236
- "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
237
- "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
238
- "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
239
- "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
240
- "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
241
- "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
242
- "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
243
- "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
244
- "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
245
- "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
246
- "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
247
- "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
248
- "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
249
- "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
250
- "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
251
- "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
252
- "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
253
- "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
254
- "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
255
- "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
256
- "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
257
- "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
258
- "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
259
- "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
260
- "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
261
- "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
262
- "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
263
- "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
264
- "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
265
- "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
266
- "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
267
- "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
268
- "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
269
- "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
270
- "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
271
- "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
272
- "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
273
- "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
274
- "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
275
- "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
276
- "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
277
- "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
278
- "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
279
- "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
280
- "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
281
- "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
282
- "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
283
- "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
284
- "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
285
- "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
286
- "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
287
- "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
288
- "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
289
- "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
290
- "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
291
- "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
292
- "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
293
- "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
294
- "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
295
- "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
296
- "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
297
- "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
298
- "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
299
- "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
300
- "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
301
- "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
302
- "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
303
- "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
304
- "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
305
- "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
306
- "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
307
- "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
308
- "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
309
- "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
310
- "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
311
- "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
312
- "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
313
- "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
314
- "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
315
- "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
316
- "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
317
- "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
318
- "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
319
- "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
320
- "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
321
- "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
322
- "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
323
- "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
324
- "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
325
- "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
326
- "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
327
- "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
328
- "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
329
- "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
330
- "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
331
- "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
332
- "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
333
- "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
334
- "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
335
- "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
336
- "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
337
- "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
338
- "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
339
- "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
340
- "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
341
- "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
342
- "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
343
- "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
344
- "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
345
- "model.mm_projector.0.bias": "model-00004-of-00004.safetensors",
346
- "model.mm_projector.0.weight": "model-00004-of-00004.safetensors",
347
- "model.mm_projector.2.bias": "model-00004-of-00004.safetensors",
348
- "model.mm_projector.2.weight": "model-00004-of-00004.safetensors",
349
- "model.norm.weight": "model-00003-of-00004.safetensors",
350
- "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00003-of-00004.safetensors",
351
- "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
352
- "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
353
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
354
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
355
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
356
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
357
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
358
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
359
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
360
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
361
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
362
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
363
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
364
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
365
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
366
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
367
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
368
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
369
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
370
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
371
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
372
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
373
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
374
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
375
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
376
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
377
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
378
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
379
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
380
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
381
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
382
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
383
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
384
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
385
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
386
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
387
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
388
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
389
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
390
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
391
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
392
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
393
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
394
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
395
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
396
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
397
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
398
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
399
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
400
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
401
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
402
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
403
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
404
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
405
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
406
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
407
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
408
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
409
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
410
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
411
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
412
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
413
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
414
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
415
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
416
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
417
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
418
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
419
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
420
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
421
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
422
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
423
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
424
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
425
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
426
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
427
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
428
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
429
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
430
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
431
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
432
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
433
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
434
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
435
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
436
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
437
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
438
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
439
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
440
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
441
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
442
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
443
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
444
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
445
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
446
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
447
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
448
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
449
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
450
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
451
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
452
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
453
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
454
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
455
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
456
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
457
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
458
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
459
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
460
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
461
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
462
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
463
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
464
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
465
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
466
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
467
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
468
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
469
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
470
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
471
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
472
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
473
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
474
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
475
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
476
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
477
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
478
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
479
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
480
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
481
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
482
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
483
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
484
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
485
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
486
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
487
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
488
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
489
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
490
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
491
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
492
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
493
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
494
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
495
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
496
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
497
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
498
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
499
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
500
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
501
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
502
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
503
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
504
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
505
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
506
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
507
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
508
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
509
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
510
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
511
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
512
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
513
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
514
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
515
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
516
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
517
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
518
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
519
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
520
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
521
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
522
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
523
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
524
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
525
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
526
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
527
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
528
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
529
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
530
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
531
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
532
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
533
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
534
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
535
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
536
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
537
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
538
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
539
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
540
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
541
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
542
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
543
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
544
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
545
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
546
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
547
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
548
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
549
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
550
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
551
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
552
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
553
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
554
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
555
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
556
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
557
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
558
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
559
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
560
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
561
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
562
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
563
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
564
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
565
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
566
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
567
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
568
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
569
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
570
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
571
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
572
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
573
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
574
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
575
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
576
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
577
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
578
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
579
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00004-of-00004.safetensors",
580
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00004-of-00004.safetensors",
581
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
582
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
583
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00004-of-00004.safetensors",
584
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00004-of-00004.safetensors",
585
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
586
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
587
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
588
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
589
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
590
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
591
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
592
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
593
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00004-of-00004.safetensors",
594
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00004-of-00004.safetensors",
595
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00004-of-00004.safetensors",
596
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00004-of-00004.safetensors",
597
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00004-of-00004.safetensors",
598
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00004-of-00004.safetensors",
599
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00004-of-00004.safetensors",
600
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00004-of-00004.safetensors",
601
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
602
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
603
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
604
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
605
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
606
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
607
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
608
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
609
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00004-of-00004.safetensors",
610
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00004-of-00004.safetensors",
611
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00004-of-00004.safetensors",
612
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00004-of-00004.safetensors",
613
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00004-of-00004.safetensors",
614
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00004-of-00004.safetensors",
615
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00004-of-00004.safetensors",
616
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00004-of-00004.safetensors",
617
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
618
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
619
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
620
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
621
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
622
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
623
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
624
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
625
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00004-of-00004.safetensors",
626
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00004-of-00004.safetensors",
627
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00004-of-00004.safetensors",
628
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00004-of-00004.safetensors",
629
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00004-of-00004.safetensors",
630
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00004-of-00004.safetensors",
631
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00004-of-00004.safetensors",
632
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00004-of-00004.safetensors",
633
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
634
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
635
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
636
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
637
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
638
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
639
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
640
- "model.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
641
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00004-of-00004.safetensors",
642
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00004-of-00004.safetensors",
643
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00004-of-00004.safetensors",
644
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00004-of-00004.safetensors",
645
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00004-of-00004.safetensors",
646
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00004-of-00004.safetensors",
647
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00004-of-00004.safetensors",
648
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00004-of-00004.safetensors",
649
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
650
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
651
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
652
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
653
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
654
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
655
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
656
- "model.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
657
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
658
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
659
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
660
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
661
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
662
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
663
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
664
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
665
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
666
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
667
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
668
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
669
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
670
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
671
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
672
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
673
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
674
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
675
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
676
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
677
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
678
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
679
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
680
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
681
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
682
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
683
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
684
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
685
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
686
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
687
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
688
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
689
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
690
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
691
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
692
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
693
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
694
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
695
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
696
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
697
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
698
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
699
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
700
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
701
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
702
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
703
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
704
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
705
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
706
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
707
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
708
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
709
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
710
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
711
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
712
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
713
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
714
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
715
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
716
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
717
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
718
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
719
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
720
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
721
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
722
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
723
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
724
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
725
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
726
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
727
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
728
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
729
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
730
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
731
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
732
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
733
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
734
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
735
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
736
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
737
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
738
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
739
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
740
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
741
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
742
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
743
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
744
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
745
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
746
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
747
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
748
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
749
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
750
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
751
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
752
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
753
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
754
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
755
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
756
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
757
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
758
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
759
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
760
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
761
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
762
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
763
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
764
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
765
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
766
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
767
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
768
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
769
- "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00004-of-00004.safetensors",
770
- "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00004-of-00004.safetensors"
771
- }
772
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:92cc13315f24c28015d695b6cde08bb1cd6fea4cbc435998485ed6fbe4c91285
3
- size 15024
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4c154b6a63e0b1f98f7d2847944398f99f1657d35e8eddf7fdf0ae2c24b0552
3
- size 15024
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f784c6a9507b51189f2caffbd178ea9882103b75852e31c15f47fdae6a43af1d
3
- size 15024
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/rng_state_3.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:34b023e05bc2d12b91dc436d4922b990d50ec8dc56d40dc3e36b3bb34fc81341
3
- size 15024
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:44318e4e5c5027b884cfececcc487f96a157c77de4807e85c07e98cb01ec9bfe
3
- size 1064
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/special_tokens_map.json DELETED
@@ -1,20 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
5
- ],
6
- "eos_token": {
7
- "content": "<|im_end|>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "pad_token": {
14
- "content": "<|endoftext|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false
19
- }
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/tokenizer_config.json DELETED
@@ -1,53 +0,0 @@
1
- {
2
- "add_prefix_space": false,
3
- "added_tokens_decoder": {
4
- "151643": {
5
- "content": "<|endoftext|>",
6
- "lstrip": false,
7
- "normalized": false,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
- "151644": {
13
- "content": "<|im_start|>",
14
- "lstrip": false,
15
- "normalized": false,
16
- "rstrip": false,
17
- "single_word": false,
18
- "special": true
19
- },
20
- "151645": {
21
- "content": "<|im_end|>",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "151646": {
29
- "content": "<image>",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- }
36
- },
37
- "additional_special_tokens": [
38
- "<|im_start|>",
39
- "<|im_end|>"
40
- ],
41
- "bos_token": null,
42
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
43
- "clean_up_tokenization_spaces": false,
44
- "eos_token": "<|im_end|>",
45
- "errors": "replace",
46
- "model_max_length": 32768,
47
- "pad_token": "<|endoftext|>",
48
- "padding_side": "right",
49
- "processor_class": "LlavaProcessor",
50
- "split_special_tokens": false,
51
- "tokenizer_class": "Qwen2Tokenizer",
52
- "unk_token": null
53
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/trainer_state.json DELETED
The diff for this file is too large to render. See raw diff
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6f14f1aea2b39bb85650931642a5e57366be81a0b5f9bee5672af7ea86cee97
3
- size 7864
 
 
 
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
llavanext-google_siglip-so400m-patch14-384-Qwen_Qwen2-7B-Instruct-ov_AG_v5_3_split023_fulltune/checkpoint-2500/zero_to_fp32.py DELETED
@@ -1,604 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- # Copyright (c) Microsoft Corporation.
4
- # SPDX-License-Identifier: Apache-2.0
5
-
6
- # DeepSpeed Team
7
-
8
- # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
- # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
- # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
- # application.
12
- #
13
- # example: python zero_to_fp32.py . pytorch_model.bin
14
-
15
- import argparse
16
- import torch
17
- import glob
18
- import math
19
- import os
20
- import re
21
- from collections import OrderedDict
22
- from dataclasses import dataclass
23
-
24
- # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
- # DeepSpeed data structures it has to be available in the current python environment.
26
- from deepspeed.utils import logger
27
- from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
- FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
- FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
-
31
-
32
- @dataclass
33
- class zero_model_state:
34
- buffers: dict()
35
- param_shapes: dict()
36
- shared_params: list
37
- ds_version: int
38
- frozen_param_shapes: dict()
39
- frozen_param_fragments: dict()
40
-
41
-
42
- debug = 0
43
-
44
- # load to cpu
45
- device = torch.device('cpu')
46
-
47
-
48
- def atoi(text):
49
- return int(text) if text.isdigit() else text
50
-
51
-
52
- def natural_keys(text):
53
- '''
54
- alist.sort(key=natural_keys) sorts in human order
55
- http://nedbatchelder.com/blog/200712/human_sorting.html
56
- (See Toothy's implementation in the comments)
57
- '''
58
- return [atoi(c) for c in re.split(r'(\d+)', text)]
59
-
60
-
61
- def get_model_state_file(checkpoint_dir, zero_stage):
62
- if not os.path.isdir(checkpoint_dir):
63
- raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
-
65
- # there should be only one file
66
- if zero_stage <= 2:
67
- file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
- elif zero_stage == 3:
69
- file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
-
71
- if not os.path.exists(file):
72
- raise FileNotFoundError(f"can't find model states file at '{file}'")
73
-
74
- return file
75
-
76
-
77
- def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
- # XXX: need to test that this simple glob rule works for multi-node setup too
79
- ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
-
81
- if len(ckpt_files) == 0:
82
- raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
-
84
- return ckpt_files
85
-
86
-
87
- def get_optim_files(checkpoint_dir):
88
- return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
-
90
-
91
- def get_model_state_files(checkpoint_dir):
92
- return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
-
94
-
95
- def parse_model_states(files):
96
- zero_model_states = []
97
- for file in files:
98
- state_dict = torch.load(file, map_location=device)
99
-
100
- if BUFFER_NAMES not in state_dict:
101
- raise ValueError(f"{file} is not a model state checkpoint")
102
- buffer_names = state_dict[BUFFER_NAMES]
103
- if debug:
104
- print("Found buffers:", buffer_names)
105
-
106
- # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
- buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
- param_shapes = state_dict[PARAM_SHAPES]
109
-
110
- # collect parameters that are included in param_shapes
111
- param_names = []
112
- for s in param_shapes:
113
- for name in s.keys():
114
- param_names.append(name)
115
-
116
- # update with frozen parameters
117
- frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
- if frozen_param_shapes is not None:
119
- if debug:
120
- print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
- param_names += list(frozen_param_shapes.keys())
122
-
123
- # handle shared params
124
- shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
-
126
- ds_version = state_dict.get(DS_VERSION, None)
127
-
128
- frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
-
130
- z_model_state = zero_model_state(buffers=buffers,
131
- param_shapes=param_shapes,
132
- shared_params=shared_params,
133
- ds_version=ds_version,
134
- frozen_param_shapes=frozen_param_shapes,
135
- frozen_param_fragments=frozen_param_fragments)
136
- zero_model_states.append(z_model_state)
137
-
138
- return zero_model_states
139
-
140
-
141
- def parse_optim_states(files, ds_checkpoint_dir):
142
-
143
- total_files = len(files)
144
- state_dicts = []
145
- for f in files:
146
- state_dict = torch.load(f, map_location=device)
147
- # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
- # and also handle the case where it was already removed by another helper script
149
- state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
- state_dicts.append(state_dict)
151
-
152
- if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
- raise ValueError(f"{files[0]} is not a zero checkpoint")
154
- zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
- world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
-
157
- # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
- # parameters can be different from data parallelism for non-expert parameters. So we can just
159
- # use the max of the partition_count to get the dp world_size.
160
-
161
- if type(world_size) is list:
162
- world_size = max(world_size)
163
-
164
- if world_size != total_files:
165
- raise ValueError(
166
- f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
- "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
- )
169
-
170
- # the groups are named differently in each stage
171
- if zero_stage <= 2:
172
- fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
- elif zero_stage == 3:
174
- fp32_groups_key = FP32_FLAT_GROUPS
175
- else:
176
- raise ValueError(f"unknown zero stage {zero_stage}")
177
-
178
- if zero_stage <= 2:
179
- fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
- elif zero_stage == 3:
181
- # if there is more than one param group, there will be multiple flattened tensors - one
182
- # flattened tensor per group - for simplicity merge them into a single tensor
183
- #
184
- # XXX: could make the script more memory efficient for when there are multiple groups - it
185
- # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
-
187
- fp32_flat_groups = [
188
- torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
- ]
190
-
191
- return zero_stage, world_size, fp32_flat_groups
192
-
193
-
194
- def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
195
- """
196
- Returns fp32 state_dict reconstructed from ds checkpoint
197
-
198
- Args:
199
- - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
-
201
- """
202
- print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
-
204
- optim_files = get_optim_files(ds_checkpoint_dir)
205
- zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
- print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
-
208
- model_files = get_model_state_files(ds_checkpoint_dir)
209
-
210
- zero_model_states = parse_model_states(model_files)
211
- print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
-
213
- if zero_stage <= 2:
214
- return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
215
- exclude_frozen_parameters)
216
- elif zero_stage == 3:
217
- return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
218
- exclude_frozen_parameters)
219
-
220
-
221
- def _zero2_merge_frozen_params(state_dict, zero_model_states):
222
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
223
- return
224
-
225
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
226
- frozen_param_fragments = zero_model_states[0].frozen_param_fragments
227
-
228
- if debug:
229
- num_elem = sum(s.numel() for s in frozen_param_shapes.values())
230
- print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
231
-
232
- wanted_params = len(frozen_param_shapes)
233
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
234
- avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
235
- print(f'Frozen params: Have {avail_numel} numels to process.')
236
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
237
-
238
- total_params = 0
239
- total_numel = 0
240
- for name, shape in frozen_param_shapes.items():
241
- total_params += 1
242
- unpartitioned_numel = shape.numel()
243
- total_numel += unpartitioned_numel
244
-
245
- state_dict[name] = frozen_param_fragments[name]
246
-
247
- if debug:
248
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
249
-
250
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
251
-
252
-
253
- def _has_callable(obj, fn):
254
- attr = getattr(obj, fn, None)
255
- return callable(attr)
256
-
257
-
258
- def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
259
- param_shapes = zero_model_states[0].param_shapes
260
-
261
- # Reconstruction protocol:
262
- #
263
- # XXX: document this
264
-
265
- if debug:
266
- for i in range(world_size):
267
- for j in range(len(fp32_flat_groups[0])):
268
- print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
269
-
270
- # XXX: memory usage doubles here (zero2)
271
- num_param_groups = len(fp32_flat_groups[0])
272
- merged_single_partition_of_fp32_groups = []
273
- for i in range(num_param_groups):
274
- merged_partitions = [sd[i] for sd in fp32_flat_groups]
275
- full_single_fp32_vector = torch.cat(merged_partitions, 0)
276
- merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
277
- avail_numel = sum(
278
- [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
279
-
280
- if debug:
281
- wanted_params = sum([len(shapes) for shapes in param_shapes])
282
- wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
283
- # not asserting if there is a mismatch due to possible padding
284
- print(f"Have {avail_numel} numels to process.")
285
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
286
-
287
- # params
288
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
289
- # out-of-core computing solution
290
- total_numel = 0
291
- total_params = 0
292
- for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
293
- offset = 0
294
- avail_numel = full_single_fp32_vector.numel()
295
- for name, shape in shapes.items():
296
-
297
- unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
298
- total_numel += unpartitioned_numel
299
- total_params += 1
300
-
301
- if debug:
302
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
303
- state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
304
- offset += unpartitioned_numel
305
-
306
- # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
307
- # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
308
- # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
309
- # live optimizer object, so we are checking that the numbers are within the right range
310
- align_to = 2 * world_size
311
-
312
- def zero2_align(x):
313
- return align_to * math.ceil(x / align_to)
314
-
315
- if debug:
316
- print(f"original offset={offset}, avail_numel={avail_numel}")
317
-
318
- offset = zero2_align(offset)
319
- avail_numel = zero2_align(avail_numel)
320
-
321
- if debug:
322
- print(f"aligned offset={offset}, avail_numel={avail_numel}")
323
-
324
- # Sanity check
325
- if offset != avail_numel:
326
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
327
-
328
- print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
329
-
330
-
331
- def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
332
- exclude_frozen_parameters):
333
- state_dict = OrderedDict()
334
-
335
- # buffers
336
- buffers = zero_model_states[0].buffers
337
- state_dict.update(buffers)
338
- if debug:
339
- print(f"added {len(buffers)} buffers")
340
-
341
- if not exclude_frozen_parameters:
342
- _zero2_merge_frozen_params(state_dict, zero_model_states)
343
-
344
- _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
345
-
346
- # recover shared parameters
347
- for pair in zero_model_states[0].shared_params:
348
- if pair[1] in state_dict:
349
- state_dict[pair[0]] = state_dict[pair[1]]
350
-
351
- return state_dict
352
-
353
-
354
- def zero3_partitioned_param_info(unpartitioned_numel, world_size):
355
- remainder = unpartitioned_numel % world_size
356
- padding_numel = (world_size - remainder) if remainder else 0
357
- partitioned_numel = math.ceil(unpartitioned_numel / world_size)
358
- return partitioned_numel, padding_numel
359
-
360
-
361
- def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
362
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
363
- return
364
-
365
- if debug:
366
- for i in range(world_size):
367
- num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
368
- print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
369
-
370
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
371
- wanted_params = len(frozen_param_shapes)
372
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
373
- avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
374
- print(f'Frozen params: Have {avail_numel} numels to process.')
375
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
376
-
377
- total_params = 0
378
- total_numel = 0
379
- for name, shape in zero_model_states[0].frozen_param_shapes.items():
380
- total_params += 1
381
- unpartitioned_numel = shape.numel()
382
- total_numel += unpartitioned_numel
383
-
384
- param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
385
- state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
386
-
387
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
388
-
389
- if debug:
390
- print(
391
- f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
392
- )
393
-
394
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
395
-
396
-
397
- def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
398
- param_shapes = zero_model_states[0].param_shapes
399
- avail_numel = fp32_flat_groups[0].numel() * world_size
400
- # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
401
- # param, re-consolidating each param, while dealing with padding if any
402
-
403
- # merge list of dicts, preserving order
404
- param_shapes = {k: v for d in param_shapes for k, v in d.items()}
405
-
406
- if debug:
407
- for i in range(world_size):
408
- print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
409
-
410
- wanted_params = len(param_shapes)
411
- wanted_numel = sum(shape.numel() for shape in param_shapes.values())
412
- # not asserting if there is a mismatch due to possible padding
413
- avail_numel = fp32_flat_groups[0].numel() * world_size
414
- print(f"Trainable params: Have {avail_numel} numels to process.")
415
- print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
416
-
417
- # params
418
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
419
- # out-of-core computing solution
420
- offset = 0
421
- total_numel = 0
422
- total_params = 0
423
- for name, shape in param_shapes.items():
424
-
425
- unpartitioned_numel = shape.numel()
426
- total_numel += unpartitioned_numel
427
- total_params += 1
428
-
429
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
430
-
431
- if debug:
432
- print(
433
- f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
434
- )
435
-
436
- # XXX: memory usage doubles here
437
- state_dict[name] = torch.cat(
438
- tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
439
- 0).narrow(0, 0, unpartitioned_numel).view(shape)
440
- offset += partitioned_numel
441
-
442
- offset *= world_size
443
-
444
- # Sanity check
445
- if offset != avail_numel:
446
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
447
-
448
- print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
449
-
450
-
451
- def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
452
- exclude_frozen_parameters):
453
- state_dict = OrderedDict()
454
-
455
- # buffers
456
- buffers = zero_model_states[0].buffers
457
- state_dict.update(buffers)
458
- if debug:
459
- print(f"added {len(buffers)} buffers")
460
-
461
- if not exclude_frozen_parameters:
462
- _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
463
-
464
- _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
465
-
466
- # recover shared parameters
467
- for pair in zero_model_states[0].shared_params:
468
- if pair[1] in state_dict:
469
- state_dict[pair[0]] = state_dict[pair[1]]
470
-
471
- return state_dict
472
-
473
-
474
- def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
475
- """
476
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
477
- ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
478
- via a model hub.
479
-
480
- Args:
481
- - ``checkpoint_dir``: path to the desired checkpoint folder
482
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
483
- - ``exclude_frozen_parameters``: exclude frozen parameters
484
-
485
- Returns:
486
- - pytorch ``state_dict``
487
-
488
- Note: this approach may not work if your application doesn't have sufficient free CPU memory and
489
- you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
490
- the checkpoint.
491
-
492
- A typical usage might be ::
493
-
494
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
495
- # do the training and checkpoint saving
496
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
497
- model = model.cpu() # move to cpu
498
- model.load_state_dict(state_dict)
499
- # submit to model hub or save the model to share with others
500
-
501
- In this example the ``model`` will no longer be usable in the deepspeed context of the same
502
- application. i.e. you will need to re-initialize the deepspeed engine, since
503
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
504
-
505
- If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
506
-
507
- """
508
- if tag is None:
509
- latest_path = os.path.join(checkpoint_dir, 'latest')
510
- if os.path.isfile(latest_path):
511
- with open(latest_path, 'r') as fd:
512
- tag = fd.read().strip()
513
- else:
514
- raise ValueError(f"Unable to find 'latest' file at {latest_path}")
515
-
516
- ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
517
-
518
- if not os.path.isdir(ds_checkpoint_dir):
519
- raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
520
-
521
- return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
522
-
523
-
524
- def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
525
- """
526
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
527
- loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
528
-
529
- Args:
530
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
531
- - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
532
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
533
- - ``exclude_frozen_parameters``: exclude frozen parameters
534
- """
535
-
536
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
537
- print(f"Saving fp32 state dict to {output_file}")
538
- torch.save(state_dict, output_file)
539
-
540
-
541
- def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
542
- """
543
- 1. Put the provided model to cpu
544
- 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
545
- 3. Load it into the provided model
546
-
547
- Args:
548
- - ``model``: the model object to update
549
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
550
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
551
-
552
- Returns:
553
- - ``model`: modified model
554
-
555
- Make sure you have plenty of CPU memory available before you call this function. If you don't
556
- have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
557
- conveniently placed for you in the checkpoint folder.
558
-
559
- A typical usage might be ::
560
-
561
- from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
562
- model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
563
- # submit to model hub or save the model to share with others
564
-
565
- Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
566
- of the same application. i.e. you will need to re-initialize the deepspeed engine, since
567
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
568
-
569
- """
570
- logger.info(f"Extracting fp32 weights")
571
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
572
-
573
- logger.info(f"Overwriting model with fp32 weights")
574
- model = model.cpu()
575
- model.load_state_dict(state_dict, strict=False)
576
-
577
- return model
578
-
579
-
580
- if __name__ == "__main__":
581
-
582
- parser = argparse.ArgumentParser()
583
- parser.add_argument("checkpoint_dir",
584
- type=str,
585
- help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
586
- parser.add_argument(
587
- "output_file",
588
- type=str,
589
- help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
590
- parser.add_argument("-t",
591
- "--tag",
592
- type=str,
593
- default=None,
594
- help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
595
- parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
596
- parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
597
- args = parser.parse_args()
598
-
599
- debug = args.debug
600
-
601
- convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
602
- args.output_file,
603
- tag=args.tag,
604
- exclude_frozen_parameters=args.exclude_frozen_parameters)