ryandono commited on
Commit
1317800
·
verified ·
1 Parent(s): 62d87a5

Upload 11 files

Browse files
Paligemma 3B Added Tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image>": 257152
3
+ }
Paligemma 3B Index.json ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 11693865920
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
7
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
8
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
9
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
10
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
11
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
12
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
13
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
14
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
15
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
16
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
17
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
18
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
19
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
20
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
21
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
22
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
23
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
24
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
25
+ "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
26
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
27
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
28
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
29
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
30
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
31
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
32
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
33
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
34
+ "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
35
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
36
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
37
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
38
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
39
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
40
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
41
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
42
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
43
+ "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
44
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
45
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
46
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
47
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
48
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
49
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
50
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
51
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
52
+ "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
53
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
54
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
55
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
56
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
57
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
58
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
59
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
60
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
61
+ "language_model.model.layers.14.input_layernorm.weight": "model-00003-of-00003.safetensors",
62
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
63
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
64
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
65
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
66
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
67
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
68
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
69
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
70
+ "language_model.model.layers.15.input_layernorm.weight": "model-00003-of-00003.safetensors",
71
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
72
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
73
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
74
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
75
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
76
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
77
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
78
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
79
+ "language_model.model.layers.16.input_layernorm.weight": "model-00003-of-00003.safetensors",
80
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
81
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
82
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
83
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
84
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
85
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
86
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
87
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
88
+ "language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00003.safetensors",
89
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
90
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
91
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
92
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
93
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
94
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
95
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
96
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
97
+ "language_model.model.layers.2.input_layernorm.weight": "model-00002-of-00003.safetensors",
98
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
99
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
100
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
101
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
102
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
103
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
104
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
105
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
106
+ "language_model.model.layers.3.input_layernorm.weight": "model-00002-of-00003.safetensors",
107
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
108
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
109
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
110
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
111
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
112
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
113
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
114
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
115
+ "language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00003.safetensors",
116
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
117
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
118
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
119
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
120
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
121
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
122
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
123
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
124
+ "language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00003.safetensors",
125
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
126
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
127
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
128
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
129
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
130
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
131
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
132
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
133
+ "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
134
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
135
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
136
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
137
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
138
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
139
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
140
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
141
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
142
+ "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
143
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
144
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
145
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
146
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
147
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
148
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
149
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
150
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
151
+ "language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
152
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
153
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
154
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
155
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
156
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
157
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
158
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
159
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
160
+ "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
161
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
162
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
163
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
164
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
165
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
166
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
167
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
168
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
169
+ "language_model.model.norm.weight": "model-00003-of-00003.safetensors",
170
+ "multi_modal_projector.linear.bias": "model-00001-of-00003.safetensors",
171
+ "multi_modal_projector.linear.weight": "model-00001-of-00003.safetensors",
172
+ "vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00003.safetensors",
173
+ "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00003.safetensors",
174
+ "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00003.safetensors",
175
+ "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00003.safetensors",
176
+ "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00003.safetensors",
177
+ "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00003.safetensors",
178
+ "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00003.safetensors",
179
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00003.safetensors",
180
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00003.safetensors",
181
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00003.safetensors",
182
+ "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00003.safetensors",
183
+ "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
184
+ "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
185
+ "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
186
+ "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
187
+ "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
188
+ "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
189
+ "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
190
+ "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
191
+ "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00003.safetensors",
192
+ "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00003.safetensors",
193
+ "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00003.safetensors",
194
+ "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00003.safetensors",
195
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00003.safetensors",
196
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00003.safetensors",
197
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00003.safetensors",
198
+ "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00003.safetensors",
199
+ "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
200
+ "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
201
+ "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
202
+ "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
203
+ "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
204
+ "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
205
+ "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
206
+ "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
207
+ "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00003.safetensors",
208
+ "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00003.safetensors",
209
+ "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00003.safetensors",
210
+ "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00003.safetensors",
211
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00003.safetensors",
212
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00003.safetensors",
213
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00003.safetensors",
214
+ "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00003.safetensors",
215
+ "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
216
+ "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
217
+ "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
218
+ "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
219
+ "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
220
+ "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
221
+ "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
222
+ "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
223
+ "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00003.safetensors",
224
+ "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00003.safetensors",
225
+ "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00003.safetensors",
226
+ "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00003.safetensors",
227
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00003.safetensors",
228
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00003.safetensors",
229
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00003.safetensors",
230
+ "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00003.safetensors",
231
+ "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
232
+ "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
233
+ "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
234
+ "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
235
+ "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
236
+ "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
237
+ "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
238
+ "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
239
+ "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00003.safetensors",
240
+ "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00003.safetensors",
241
+ "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00003.safetensors",
242
+ "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00003.safetensors",
243
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00003.safetensors",
244
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00003.safetensors",
245
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00003.safetensors",
246
+ "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00003.safetensors",
247
+ "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
248
+ "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
249
+ "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
250
+ "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
251
+ "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
252
+ "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
253
+ "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
254
+ "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
255
+ "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00003.safetensors",
256
+ "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00003.safetensors",
257
+ "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00003.safetensors",
258
+ "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00003.safetensors",
259
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00003.safetensors",
260
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00003.safetensors",
261
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00003.safetensors",
262
+ "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00003.safetensors",
263
+ "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
264
+ "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
265
+ "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
266
+ "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
267
+ "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
268
+ "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
269
+ "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
270
+ "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
271
+ "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00003.safetensors",
272
+ "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00003.safetensors",
273
+ "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00003.safetensors",
274
+ "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00003.safetensors",
275
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00003.safetensors",
276
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00003.safetensors",
277
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00003.safetensors",
278
+ "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00003.safetensors",
279
+ "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
280
+ "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
281
+ "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
282
+ "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
283
+ "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
284
+ "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
285
+ "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
286
+ "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00003.safetensors",
288
+ "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00003.safetensors",
289
+ "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00003.safetensors",
290
+ "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00003.safetensors",
291
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00003.safetensors",
292
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00003.safetensors",
293
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00003.safetensors",
294
+ "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00003.safetensors",
295
+ "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
296
+ "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
297
+ "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
298
+ "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
299
+ "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
300
+ "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
301
+ "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
302
+ "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
303
+ "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00003.safetensors",
304
+ "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00003.safetensors",
305
+ "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00003.safetensors",
306
+ "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00003.safetensors",
307
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00003.safetensors",
308
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00003.safetensors",
309
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00003.safetensors",
310
+ "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00003.safetensors",
311
+ "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
312
+ "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
313
+ "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
314
+ "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
315
+ "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
316
+ "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
317
+ "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
318
+ "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
319
+ "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00003.safetensors",
320
+ "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00003.safetensors",
321
+ "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00003.safetensors",
322
+ "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00003.safetensors",
323
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00003.safetensors",
324
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00003.safetensors",
325
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00003.safetensors",
326
+ "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00003.safetensors",
327
+ "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
328
+ "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
329
+ "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
330
+ "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
331
+ "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
332
+ "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
333
+ "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
334
+ "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
335
+ "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00003.safetensors",
336
+ "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00003.safetensors",
337
+ "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00003.safetensors",
338
+ "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00003.safetensors",
339
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00003.safetensors",
340
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00003.safetensors",
341
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00003.safetensors",
342
+ "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00003.safetensors",
343
+ "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
344
+ "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
345
+ "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
346
+ "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
347
+ "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
348
+ "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
349
+ "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
350
+ "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
351
+ "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00003.safetensors",
352
+ "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00003.safetensors",
353
+ "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00003.safetensors",
354
+ "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00003.safetensors",
355
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00003.safetensors",
356
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00003.safetensors",
357
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00003.safetensors",
358
+ "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00003.safetensors",
359
+ "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
360
+ "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
361
+ "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
362
+ "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
363
+ "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
364
+ "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
365
+ "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
366
+ "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
367
+ "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00003.safetensors",
368
+ "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00003.safetensors",
369
+ "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00003.safetensors",
370
+ "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00003.safetensors",
371
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00003.safetensors",
372
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00003.safetensors",
373
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00003.safetensors",
374
+ "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00003.safetensors",
375
+ "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
376
+ "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
377
+ "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
378
+ "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
379
+ "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
380
+ "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
381
+ "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
382
+ "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
383
+ "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00003.safetensors",
384
+ "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00003.safetensors",
385
+ "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00003.safetensors",
386
+ "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00003.safetensors",
387
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00003.safetensors",
388
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00003.safetensors",
389
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00003.safetensors",
390
+ "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00003.safetensors",
391
+ "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
392
+ "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
393
+ "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
394
+ "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
395
+ "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
396
+ "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
397
+ "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
398
+ "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
399
+ "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00003.safetensors",
400
+ "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00003.safetensors",
401
+ "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00003.safetensors",
402
+ "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00003.safetensors",
403
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00003.safetensors",
404
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00003.safetensors",
405
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00003.safetensors",
406
+ "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00003.safetensors",
407
+ "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
408
+ "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
409
+ "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
410
+ "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
411
+ "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
412
+ "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
413
+ "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
414
+ "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
415
+ "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00003.safetensors",
416
+ "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00003.safetensors",
417
+ "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00003.safetensors",
418
+ "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00003.safetensors",
419
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00003.safetensors",
420
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00003.safetensors",
421
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00003.safetensors",
422
+ "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00003.safetensors",
423
+ "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
424
+ "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
425
+ "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
426
+ "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
427
+ "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
428
+ "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
429
+ "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
430
+ "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
431
+ "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00003.safetensors",
432
+ "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00003.safetensors",
433
+ "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00003.safetensors",
434
+ "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00003.safetensors",
435
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00003.safetensors",
436
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00003.safetensors",
437
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00003.safetensors",
438
+ "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00003.safetensors",
439
+ "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
440
+ "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
441
+ "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
442
+ "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
443
+ "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
444
+ "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
445
+ "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
446
+ "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
447
+ "vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00003.safetensors",
448
+ "vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00003.safetensors",
449
+ "vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00003.safetensors",
450
+ "vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00003.safetensors",
451
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00003.safetensors",
452
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00003.safetensors",
453
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00003.safetensors",
454
+ "vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00003.safetensors",
455
+ "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
456
+ "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
457
+ "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
458
+ "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
459
+ "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
460
+ "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
461
+ "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
462
+ "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
463
+ "vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00003.safetensors",
464
+ "vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00003.safetensors",
465
+ "vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00003.safetensors",
466
+ "vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00003.safetensors",
467
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00003.safetensors",
468
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00003.safetensors",
469
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00003.safetensors",
470
+ "vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00003.safetensors",
471
+ "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
472
+ "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
473
+ "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
474
+ "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
475
+ "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
476
+ "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
477
+ "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
478
+ "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
479
+ "vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00003.safetensors",
480
+ "vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00003.safetensors",
481
+ "vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00003.safetensors",
482
+ "vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00003.safetensors",
483
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00003.safetensors",
484
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00003.safetensors",
485
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00003.safetensors",
486
+ "vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00003.safetensors",
487
+ "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
488
+ "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
489
+ "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
490
+ "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
491
+ "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
492
+ "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
493
+ "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
494
+ "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
495
+ "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00003.safetensors",
496
+ "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00003.safetensors",
497
+ "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00003.safetensors",
498
+ "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00003.safetensors",
499
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00003.safetensors",
500
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00003.safetensors",
501
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00003.safetensors",
502
+ "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00003.safetensors",
503
+ "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
504
+ "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
505
+ "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
506
+ "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
507
+ "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
508
+ "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
509
+ "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
510
+ "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
511
+ "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00003.safetensors",
512
+ "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00003.safetensors",
513
+ "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00003.safetensors",
514
+ "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00003.safetensors",
515
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00003.safetensors",
516
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00003.safetensors",
517
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00003.safetensors",
518
+ "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00003.safetensors",
519
+ "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
520
+ "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
521
+ "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
522
+ "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
523
+ "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
524
+ "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
525
+ "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
526
+ "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
527
+ "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00003.safetensors",
528
+ "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00003.safetensors",
529
+ "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00003.safetensors",
530
+ "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00003.safetensors",
531
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00003.safetensors",
532
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00003.safetensors",
533
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00003.safetensors",
534
+ "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00003.safetensors",
535
+ "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
536
+ "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
537
+ "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
538
+ "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
539
+ "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
540
+ "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
541
+ "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
542
+ "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
543
+ "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00003.safetensors",
544
+ "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00003.safetensors",
545
+ "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00003.safetensors",
546
+ "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00003.safetensors",
547
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00003.safetensors",
548
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00003.safetensors",
549
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00003.safetensors",
550
+ "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00003.safetensors",
551
+ "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
552
+ "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
553
+ "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
554
+ "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
555
+ "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
556
+ "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
557
+ "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
558
+ "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
559
+ "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00003.safetensors",
560
+ "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00003.safetensors",
561
+ "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00003.safetensors",
562
+ "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00003.safetensors",
563
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00003.safetensors",
564
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00003.safetensors",
565
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00003.safetensors",
566
+ "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00003.safetensors",
567
+ "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
568
+ "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
569
+ "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
570
+ "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
571
+ "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
572
+ "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
573
+ "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
574
+ "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
575
+ "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00003.safetensors",
576
+ "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00003.safetensors",
577
+ "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00003.safetensors",
578
+ "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00003.safetensors",
579
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00003.safetensors",
580
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00003.safetensors",
581
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00003.safetensors",
582
+ "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00003.safetensors",
583
+ "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
584
+ "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
585
+ "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
586
+ "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
587
+ "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
588
+ "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
589
+ "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
590
+ "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
591
+ "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00003.safetensors",
592
+ "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00003.safetensors",
593
+ "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00003.safetensors",
594
+ "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00003.safetensors",
595
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00003.safetensors",
596
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00003.safetensors",
597
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00003.safetensors",
598
+ "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00003.safetensors",
599
+ "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
600
+ "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
601
+ "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors",
602
+ "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors",
603
+ "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
604
+ "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
605
+ "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
606
+ "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
607
+ "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00003.safetensors",
608
+ "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00003.safetensors"
609
+ }
610
+ }
Paligemma 3B PT 224 README.md ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: transformers
4
+ extra_gated_heading: Access PaliGemma on Hugging Face
5
+ extra_gated_prompt: To access PaliGemma on Hugging Face, you’re required to review
6
+ and agree to Google’s usage license. To do this, please ensure you’re logged-in
7
+ to Hugging Face and click below. Requests are processed immediately.
8
+ extra_gated_button_content: Acknowledge license
9
+ pipeline_tag: image-text-to-text
10
+ ---
11
+ # PaliGemma model card
12
+
13
+ **Model page:** [PaliGemma](https://ai.google.dev/gemma/docs/paligemma)
14
+
15
+ Transformers PaliGemma 3B weights, pre-trained with 224*224 input images and 128 token input/output text sequences. The models are available in float32, bfloat16 and float16 formats for fine-tuning.
16
+
17
+ **Resources and technical documentation:**
18
+
19
+ * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible)
20
+ * [PaliGemma on Kaggle](https://www.kaggle.com/models/google/paligemma)
21
+ * [PaliGemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/363)
22
+
23
+ **Terms of Use:** [Terms](https://ai.google.dev/gemma/terms)
24
+
25
+ **Authors:** Google
26
+
27
+ ## Model information
28
+
29
+ ### Model summary
30
+
31
+ #### Description
32
+
33
+ PaliGemma is a versatile and lightweight vision-language model (VLM) inspired by
34
+ [PaLI-3](https://arxiv.org/abs/2310.09199) and based on open components such as
35
+ the [SigLIP vision model](https://arxiv.org/abs/2303.15343) and the [Gemma
36
+ language model](https://arxiv.org/abs/2403.08295). It takes both image and text
37
+ as input and generates text as output, supporting multiple languages. It is designed for class-leading fine-tune performance on a wide range of vision-language tasks such as image and short video caption, visual question answering, text reading, object detection and object segmentation.
38
+
39
+ #### Model architecture
40
+
41
+ PaliGemma is the composition of a [Transformer
42
+ decoder](https://arxiv.org/abs/1706.03762) and a [Vision Transformer image
43
+ encoder](https://arxiv.org/abs/2010.11929), with a total of 3 billion
44
+ params. The text decoder is initialized from
45
+ [Gemma-2B](https://www.kaggle.com/models/google/gemma). The image encoder is
46
+ initialized from
47
+ [SigLIP-So400m/14](https://colab.research.google.com/github/google-research/big_vision/blob/main/big_vision/configs/proj/image_text/SigLIP_demo.ipynb).
48
+ PaliGemma is trained following the PaLI-3 recipes.
49
+
50
+ #### Inputs and outputs
51
+
52
+ * **Input:** Image and text string, such as a prompt to caption the image, or
53
+ a question.
54
+ * **Output:** Generated text in response to the input, such as a caption of
55
+ the image, an answer to a question, a list of object bounding box
56
+ coordinates, or segmentation codewords.
57
+
58
+ ### Model data
59
+
60
+ #### Pre-train datasets
61
+
62
+ PaliGemma is pre-trained on the following mixture of datasets:
63
+
64
+ * **WebLI:** [WebLI (Web Language Image)](https://arxiv.org/abs/2209.06794) is
65
+ a web-scale multilingual image-text dataset built from the public web. A
66
+ wide range of WebLI splits are used to acquire versatile model capabilities,
67
+ such as visual semantic understanding, object localization,
68
+ visually-situated text understanding, multilinguality, etc.
69
+ * **CC3M-35L:** Curated English image-alt_text pairs from webpages ([Sharma et
70
+ al., 2018](https://aclanthology.org/P18-1238/)). We used the [Google Cloud
71
+ Translation API](https://cloud.google.com/translate) to translate into 34
72
+ additional languages.
73
+ * **VQ²A-CC3M-35L/VQG-CC3M-35L:** A subset of VQ2A-CC3M ([Changpinyo et al.,
74
+ 2022a](https://aclanthology.org/2022.naacl-main.142/)), translated into the
75
+ same additional 34 languages as CC3M-35L, using the [Google Cloud
76
+ Translation API](https://cloud.google.com/translate).
77
+ * **OpenImages:** Detection and object-aware questions and answers
78
+ ([Piergiovanni et al. 2022](https://arxiv.org/abs/2209.04372)) generated by
79
+ handcrafted rules on the [OpenImages dataset].
80
+ * **WIT:** Images and texts collected from Wikipedia ([Srinivasan et al.,
81
+ 2021](https://arxiv.org/abs/2103.01913)).
82
+
83
+ [OpenImages dataset]: https://storage.googleapis.com/openimages/web/factsfigures_v7.html
84
+
85
+ #### Data responsibility filtering
86
+
87
+ The following filters are applied to WebLI, with the goal of training PaliGemma
88
+ on clean data:
89
+
90
+ * **Pornographic image filtering:** This filter removes images deemed to be of
91
+ pornographic nature.
92
+ * **Text safety filtering:** We identify and filter out images that are paired
93
+ with unsafe text. Unsafe text is any text deemed to contain or be about
94
+ CSAI, pornography, vulgarities, or otherwise offensive.
95
+ * **Text toxicity filtering:** We further use the [Perspective
96
+ API](https://perspectiveapi.com/) to identify and filter out images that are
97
+ paired with text deemed insulting, obscene, hateful or otherwise toxic.
98
+ * **Text personal information filtering:** We filtered certain personal information and other sensitive data using [Cloud Data Loss Prevention (DLP)
99
+ API](https://cloud.google.com/security/products/dlp) to protect the privacy
100
+ of individuals. Identifiers such as social security numbers and [other sensitive information types] were removed.
101
+ * **Additional methods:** Filtering based on content quality and safety in
102
+ line with our policies and practices.
103
+
104
+ [other sensitive information types]: https://cloud.google.com/sensitive-data-protection/docs/high-sensitivity-infotypes-reference?_gl=1*jg604m*_ga*ODk5MzA3ODQyLjE3MTAzMzQ3NTk.*_ga_WH2QY8WWF5*MTcxMDUxNTkxMS4yLjEuMTcxMDUxNjA2NC4wLjAuMA..&_ga=2.172110058.-899307842.1710334759
105
+
106
+
107
+
108
+ ## How to Use
109
+
110
+ PaliGemma is a single-turn vision language model not meant for conversational use,
111
+ and it works best when fine-tuning to a specific use case.
112
+
113
+ You can configure which task the model will solve by conditioning it with task prefixes,
114
+ such as “detect” or “segment”. The pretrained models were trained in this fashion to imbue
115
+ them with a rich set of capabilities (question answering, captioning, segmentation, etc.).
116
+ However, they are not designed to be used directly, but to be transferred (by fine-tuning)
117
+ to specific tasks using a similar prompt structure. For interactive testing, you can use
118
+ the "mix" family of models, which have been fine-tuned on a mixture of tasks. To see model
119
+ [google/paligemma-3b-mix-448](https://huggingface.co/google/paligemma-3b-mix-448) in action,
120
+ check [this Space that uses the Transformers codebase](https://huggingface.co/spaces/big-vision/paligemma-hf).
121
+
122
+ Please, refer to the [usage and limitations section](#usage-and-limitations) for intended
123
+ use cases, or visit the [blog post](https://huggingface.co/blog/paligemma-google-vlm) for
124
+ additional details and examples.
125
+
126
+ ## Use in Transformers
127
+
128
+ The following snippets use model `google/paligemma-3b-mix-224` for reference purposes.
129
+ The model in this repo you are now browsing may have been trained for other tasks, please
130
+ make sure you use appropriate inputs for the task at hand.
131
+
132
+ ### Running the default precision (`float32`) on CPU
133
+
134
+ ```python
135
+ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
136
+ from PIL import Image
137
+ import requests
138
+ import torch
139
+
140
+ model_id = "google/paligemma-3b-mix-224"
141
+
142
+ url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
143
+ image = Image.open(requests.get(url, stream=True).raw)
144
+
145
+ model = PaliGemmaForConditionalGeneration.from_pretrained(model_id).eval()
146
+ processor = AutoProcessor.from_pretrained(model_id)
147
+
148
+ # Instruct the model to create a caption in Spanish
149
+ prompt = "caption es"
150
+ model_inputs = processor(text=prompt, images=image, return_tensors="pt")
151
+ input_len = model_inputs["input_ids"].shape[-1]
152
+
153
+ with torch.inference_mode():
154
+ generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
155
+ generation = generation[0][input_len:]
156
+ decoded = processor.decode(generation, skip_special_tokens=True)
157
+ print(decoded)
158
+ ```
159
+
160
+ Output: `Un auto azul estacionado frente a un edificio.`
161
+
162
+ ### Running other precisions on CUDA
163
+
164
+ For convenience, the repos contain revisions of the weights already converted to `bfloat16` and `float16`,
165
+ so you can use them to reduce the download size and avoid casting on your local computer.
166
+
167
+ This is how you'd run `bfloat16` on an nvidia CUDA card.
168
+
169
+ ```python
170
+ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
171
+ from PIL import Image
172
+ import requests
173
+ import torch
174
+
175
+ model_id = "google/paligemma-3b-mix-224"
176
+ device = "cuda:0"
177
+ dtype = torch.bfloat16
178
+
179
+ url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
180
+ image = Image.open(requests.get(url, stream=True).raw)
181
+
182
+ model = PaliGemmaForConditionalGeneration.from_pretrained(
183
+ model_id,
184
+ torch_dtype=dtype,
185
+ device_map=device,
186
+ revision="bfloat16",
187
+ ).eval()
188
+ processor = AutoProcessor.from_pretrained(model_id)
189
+
190
+ # Instruct the model to create a caption in Spanish
191
+ prompt = "caption es"
192
+ model_inputs = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
193
+ input_len = model_inputs["input_ids"].shape[-1]
194
+
195
+ with torch.inference_mode():
196
+ generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
197
+ generation = generation[0][input_len:]
198
+ decoded = processor.decode(generation, skip_special_tokens=True)
199
+ print(decoded)
200
+ ```
201
+
202
+ ### Loading in 4-bit / 8-bit
203
+
204
+ You need to install `bitsandbytes` to automatically run inference using 8-bit or 4-bit precision:
205
+
206
+ ```
207
+ pip install bitsandbytes accelerate
208
+ ```
209
+
210
+ ```
211
+ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
212
+ from PIL import Image
213
+ import requests
214
+ import torch
215
+
216
+ model_id = "google/paligemma-3b-mix-224"
217
+ device = "cuda:0"
218
+ dtype = torch.bfloat16
219
+
220
+ url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
221
+ image = Image.open(requests.get(url, stream=True).raw)
222
+
223
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
224
+
225
+ model = PaliGemmaForConditionalGeneration.from_pretrained(
226
+ model_id, quantization_config=quantization_config
227
+ ).eval()
228
+ processor = AutoProcessor.from_pretrained(model_id)
229
+
230
+ # Instruct the model to create a caption in Spanish
231
+ prompt = "caption es"
232
+ model_inputs = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
233
+ input_len = model_inputs["input_ids"].shape[-1]
234
+
235
+ with torch.inference_mode():
236
+ generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
237
+ generation = generation[0][input_len:]
238
+ decoded = processor.decode(generation, skip_special_tokens=True)
239
+ print(decoded)
240
+ ```
241
+
242
+ ## Implementation information
243
+
244
+ ### Hardware
245
+
246
+ PaliGemma was trained using the latest generation of Tensor Processing Unit
247
+ (TPU) hardware (TPUv5e).
248
+
249
+ ### Software
250
+
251
+ Training was done using [JAX](https://github.com/google/jax),
252
+ [Flax](https://github.com/google/flax),
253
+ [TFDS](https://github.com/tensorflow/datasets) and
254
+ [`big_vision`](https://github.com/google-research/big_vision).
255
+
256
+ JAX allows researchers to take advantage of the latest generation of hardware,
257
+ including TPUs, for faster and more efficient training of large models.
258
+
259
+ TFDS is used to access datasets and Flax is used for model architecture. The
260
+ PaliGemma fine-tune code and inference code are released in the `big_vision`
261
+ GitHub repository.
262
+
263
+ ## Evaluation information
264
+
265
+ ### Benchmark results
266
+
267
+ In order to verify the transferability of PaliGemma to a wide variety of
268
+ academic tasks, we fine-tune the pretrained models on each task. Additionally we
269
+ train the mix model with a mixture of the transfer tasks. We report results on
270
+ different resolutions to provide an impression of which tasks benefit from
271
+ increased resolution. Importantly, none of these tasks or datasets are part of
272
+ the pretraining data mixture, and their images are explicitly removed from the
273
+ web-scale pre-training data.
274
+
275
+ #### Single task (fine-tune on single task)
276
+
277
+ <table>
278
+ <tbody><tr>
279
+ <th>Benchmark<br>(train split)</th>
280
+ <th>Metric<br>(split)</th>
281
+ <th>pt-224</th>
282
+ <th>pt-448</th>
283
+ <th>pt-896</th>
284
+ </tr>
285
+ <tr>
286
+ <th>Captioning</th>
287
+ </tr>
288
+ <tr>
289
+ <td>
290
+ <a href="https://cocodataset.org/#home">COCO captions</a><br>(train+restval)
291
+ </td>
292
+ <td>CIDEr (val)</td>
293
+ <td>141.92</td>
294
+ <td>144.60</td>
295
+ </tr>
296
+ <tr>
297
+ <td>
298
+ <a href="https://nocaps.org/">NoCaps</a><br>(Eval of COCO<br>captions transfer)
299
+ </td>
300
+ <td>CIDEr (val)</td>
301
+ <td>121.72</td>
302
+ <td>123.58</td>
303
+ </tr>
304
+ <tr>
305
+ <td>
306
+ <a href="https://arxiv.org/pdf/2205.12522">COCO-35L</a><br>(train)
307
+ </td>
308
+ <td>CIDEr dev<br>(en/avg-34/avg)</td>
309
+ <td>
310
+ 139.2<br>
311
+ 115.8<br>
312
+ 116.4
313
+ </td>
314
+ <td>
315
+ 141.2<br>
316
+ 118.0<br>
317
+ 118.6
318
+ </td>
319
+ </tr>
320
+ <tr>
321
+ <td>
322
+ <a href="https://arxiv.org/pdf/2205.12522">XM3600</a><br>(Eval of COCO-35L transfer)
323
+ </td>
324
+ <td>CIDEr dev<br>(en/avg-34/avg)</td>
325
+ <td>
326
+ 78.1<br>
327
+ 41.3<br>
328
+ 42.4
329
+ </td>
330
+ <td>
331
+ 80.0<br>
332
+ 41.9<br>
333
+ 42.9
334
+ </td>
335
+ </tr>
336
+ <tr>
337
+ <td>
338
+ <a href="https://textvqa.org/textcaps/">TextCaps</a><br>(train)
339
+ </td>
340
+ <td>CIDEr (val)</td>
341
+ <td>127.48</td>
342
+ <td>153.94</td>
343
+ </tr>
344
+ <tr>
345
+ <td>
346
+ <a href="https://arxiv.org/abs/2110.11624">SciCap</a><br>(first sentence, no subfigure)<br>(train+val)
347
+ </td>
348
+ <td>CIDEr/BLEU-4<br>(test)</td>
349
+ <td>
350
+ 162.25<br>
351
+ 0.192<br>
352
+ </td>
353
+ <td>
354
+ 181.49<br>
355
+ 0.211<br>
356
+ </td>
357
+ </tr>
358
+ <tr>
359
+ <td>
360
+ <a href="https://arxiv.org/abs/2108.03353">Screen2words</a><br>(train+dev)
361
+ </td>
362
+ <td>CIDEr (test)</td>
363
+ <td>117.57</td>
364
+ <td>119.59</td>
365
+ </tr>
366
+ <tr>
367
+ <td>
368
+ <a href="https://arxiv.org/abs/2010.04295">Widget Captioning</a><br>(train+dev)
369
+ </td>
370
+ <td>CIDEr (test)</td>
371
+ <td>136.07</td>
372
+ <td>148.36</td>
373
+ </tr>
374
+ <tr>
375
+ <th>Question answering</th>
376
+ </tr>
377
+ <tr>
378
+ <td>
379
+ <a href="https://visualqa.org/index.html">VQAv2</a><br>(train+validation)
380
+ </td>
381
+ <td>Accuracy<br>(Test server - std)</td>
382
+ <td>83.19</td>
383
+ <td>85.64</td>
384
+ </tr>
385
+ <tr>
386
+ <td>
387
+ <a href="https://arxiv.org/abs/2401.06209">MMVP</a><br>(Eval of VQAv2 transfer)
388
+ </td>
389
+ <td>Paired Accuracy</td>
390
+ <td>47.33</td>
391
+ <td>45.33</td>
392
+ </tr>
393
+ <tr>
394
+ <td>
395
+ <a href="https://arxiv.org/abs/2305.10355">POPE</a><br>(Eval of VQAv2 transfer)
396
+ </td>
397
+ <td>Accuracy<br>(random/popular/<br>adversarial)</td>
398
+ <td>
399
+ 87.80<br>
400
+ 85.87<br>
401
+ 84.27
402
+ </td>
403
+ <td>
404
+ 88.23<br>
405
+ 86.77<br>
406
+ 85.90
407
+ </td>
408
+ </tr>
409
+ <tr>
410
+ <td>
411
+ <a href="https://okvqa.allenai.org/">OKVQA</a><br>(train)
412
+ </td>
413
+ <td>Accuracy (val)</td>
414
+ <td>63.54</td>
415
+ <td>63.15</td>
416
+ </tr>
417
+ <tr>
418
+ <td>
419
+ <a href="https://allenai.org/project/a-okvqa/home">A-OKVQA</a> (MC)<br>(train+val)
420
+ </td>
421
+ <td>Accuracy<br>(Test server)</td>
422
+ <td>76.37</td>
423
+ <td>76.90</td>
424
+ </tr>
425
+ <tr>
426
+ <td>
427
+ <a href="https://allenai.org/project/a-okvqa/home">A-OKVQA</a> (DA)<br>(train+val)
428
+ </td>
429
+ <td>Accuracy<br>(Test server)</td>
430
+ <td>61.85</td>
431
+ <td>63.22</td>
432
+ </tr>
433
+ <tr>
434
+ <td>
435
+ <a href="https://cs.stanford.edu/people/dorarad/gqa/about.html">GQA</a><br>(train_balanced+<br>val_balanced)
436
+ </td>
437
+ <td>Accuracy<br>(testdev balanced)</td>
438
+ <td>65.61</td>
439
+ <td>67.03</td>
440
+ </tr>
441
+ <tr>
442
+ <td>
443
+ <a href="https://aclanthology.org/2022.findings-acl.196/">xGQA</a><br>(Eval of GQA transfer)
444
+ </td>
445
+ <td>Mean Accuracy<br>(bn, de, en, id,<br>ko, pt, ru, zh)</td>
446
+ <td>58.37</td>
447
+ <td>59.07</td>
448
+ </tr>
449
+ <tr>
450
+ <td>
451
+ <a href="https://lil.nlp.cornell.edu/nlvr/">NLVR2</a><br>(train+dev)
452
+ </td>
453
+ <td>Accuracy (test)</td>
454
+ <td>90.02</td>
455
+ <td>88.93</td>
456
+ </tr>
457
+ <tr>
458
+ <td>
459
+ <a href="https://marvl-challenge.github.io/">MaRVL</a><br>(Eval of NLVR2 transfer)
460
+ </td>
461
+ <td>Mean Accuracy<br>(test)<br>(id, sw, ta, tr, zh)</td>
462
+ <td>80.57</td>
463
+ <td>76.78</td>
464
+ </tr>
465
+ <tr>
466
+ <td>
467
+ <a href="https://allenai.org/data/diagrams">AI2D</a><br>(train)
468
+ </td>
469
+ <td>Accuracy (test)</td>
470
+ <td>72.12</td>
471
+ <td>73.28</td>
472
+ </tr>
473
+ <tr>
474
+ <td>
475
+ <a href="https://scienceqa.github.io/">ScienceQA</a><br>(Img subset, no CoT)<br>(train+val)
476
+ </td>
477
+ <td>Accuracy (test)</td>
478
+ <td>95.39</td>
479
+ <td>95.93</td>
480
+ </tr>
481
+ <tr>
482
+ <td>
483
+ <a href="https://zenodo.org/records/6344334">RSVQA-LR</a> (Non numeric)<br>(train+val)
484
+ </td>
485
+ <td>Mean Accuracy<br>(test)</td>
486
+ <td>92.65</td>
487
+ <td>93.11</td>
488
+ </tr>
489
+ <tr>
490
+ <td>
491
+ <a href="https://zenodo.org/records/6344367">RSVQA-HR</a> (Non numeric)<br>(train+val)
492
+ </td>
493
+ <td>Mean Accuracy<br>(test/test2)</td>
494
+ <td>
495
+ 92.61<br>
496
+ 90.58
497
+ </td>
498
+ <td>
499
+ 92.79<br>
500
+ 90.54
501
+ </td>
502
+ </tr>
503
+ <tr>
504
+ <td>
505
+ <a href="https://arxiv.org/abs/2203.10244">ChartQA</a><br>(human+aug)x(train+val)
506
+ </td>
507
+ <td>Mean Relaxed<br>Accuracy<br>(test_human,<br>test_aug)</td>
508
+ <td>57.08</td>
509
+ <td>71.36</td>
510
+ </tr>
511
+ <tr>
512
+ <td>
513
+ <a href="https://vizwiz.org/tasks-and-datasets/vqa/">VizWiz VQA</a><br>(train+val)
514
+ </td>
515
+ <td>Accuracy<br>(Test server - std)</td>
516
+ <td>
517
+ 73.7
518
+ </td>
519
+ <td>
520
+ 75.52
521
+ </td>
522
+ </tr>
523
+ <tr>
524
+ <td>
525
+ <a href="https://arxiv.org/abs/1810.12440">TallyQA</a><br>(train)
526
+ </td>
527
+ <td>Accuracy<br>(test_simple/<br>test_complex)</td>
528
+ <td>
529
+ 81.72<br>
530
+ 69.56
531
+ </td>
532
+ <td>
533
+ 84.86<br>
534
+ 72.27
535
+ </td>
536
+ </tr>
537
+ <tr>
538
+ <td>
539
+ <a href="https://ocr-vqa.github.io/">OCR-VQA</a><br>(train+val)
540
+ </td>
541
+ <td>Accuracy (test)</td>
542
+ <td>72.32</td>
543
+ <td>74.61</td>
544
+ <td>74.93</td>
545
+ </tr>
546
+ <tr>
547
+ <td>
548
+ <a href="https://textvqa.org/">TextVQA</a><br>(train+val)
549
+ </td>
550
+ <td>Accuracy<br>(Test server - std)</td>
551
+ <td>55.47</td>
552
+ <td>73.15</td>
553
+ <td>76.48</td>
554
+ </tr>
555
+ <tr>
556
+ <td>
557
+ <a href="https://www.docvqa.org/">DocVQA</a><br>(train+val)
558
+ </td>
559
+ <td>ANLS (Test server)</td>
560
+ <td>43.74</td>
561
+ <td>78.02</td>
562
+ <td>84.77</td>
563
+ </tr>
564
+ <tr>
565
+ <td>
566
+ <a href="https://openaccess.thecvf.com/content/WACV2022/papers/Mathew_InfographicVQA_WACV_2022_paper.pdf">Infographic VQA</a><br>(train+val)
567
+ </td>
568
+ <td>ANLS (Test server)</td>
569
+ <td>28.46</td>
570
+ <td>40.47</td>
571
+ <td>47.75</td>
572
+ </tr>
573
+ <tr>
574
+ <td>
575
+ <a href="https://arxiv.org/abs/1905.13648">SceneText VQA</a><br>(train+val)
576
+ </td>
577
+ <td>ANLS (Test server)</td>
578
+ <td>63.29</td>
579
+ <td>81.82</td>
580
+ <td>84.40</td>
581
+ </tr>
582
+ <tr>
583
+ <th>Segmentation</th>
584
+ </tr>
585
+ <tr>
586
+ <td>
587
+ <a href="https://arxiv.org/abs/1608.00272">RefCOCO</a><br>(combined refcoco, refcoco+,<br>refcocog excluding val<br>and test images)
588
+ </td>
589
+ <td>MIoU<br>(validation)<br>refcoco/refcoco+/<br>refcocog</td>
590
+ <td>
591
+ 73.40<br>
592
+ 68.32<br>
593
+ 67.65
594
+ </td>
595
+ <td>
596
+ 75.57<br>
597
+ 69.76<br>
598
+ 70.17
599
+ </td>
600
+ <td>
601
+ 76.94<br>
602
+ 72.18<br>
603
+ 72.22
604
+ </td>
605
+ </tr>
606
+ <tr>
607
+ <th>Video tasks (Caption/QA)</th>
608
+ </tr>
609
+ <tr>
610
+ <td>MSR-VTT (Captioning)</td>
611
+ <td>CIDEr (test)</td>
612
+ <td>70.54</td>
613
+ </tr>
614
+ <tr>
615
+ <td>MSR-VTT (QA)</td>
616
+ <td>Accuracy (test)</td>
617
+ <td>50.09</td>
618
+ </tr>
619
+ <tr>
620
+ <td>ActivityNet (Captioning)</td>
621
+ <td>CIDEr (test)</td>
622
+ <td>34.62</td>
623
+ </tr>
624
+ <tr>
625
+ <td>ActivityNet (QA)</td>
626
+ <td>Accuracy (test)</td>
627
+ <td>50.78</td>
628
+ </tr>
629
+ <tr>
630
+ <td>VATEX (Captioning)</td>
631
+ <td>CIDEr (test)</td>
632
+ <td>79.73</td>
633
+ </tr>
634
+ <tr>
635
+ <td>MSVD (QA)</td>
636
+ <td>Accuracy (test)</td>
637
+ <td>60.22</td>
638
+ </tr>
639
+ </tbody></table>
640
+
641
+ #### Mix model (fine-tune on mixture of transfer tasks)
642
+
643
+ <table>
644
+ <tbody><tr>
645
+ <th>Benchmark</th>
646
+ <th>Metric (split)</th>
647
+ <th>mix-224</th>
648
+ <th>mix-448</th>
649
+ </tr>
650
+ <tr>
651
+ <td><a href="https://arxiv.org/abs/2401.06209">MMVP</a></td>
652
+ <td>Paired Accuracy</td>
653
+ <td>46.00</td>
654
+ <td>45.33</td>
655
+ </tr>
656
+ <tr>
657
+ <td><a href="https://arxiv.org/abs/2305.10355">POPE</a></td>
658
+ <td>Accuracy<br>(random/popular/adversarial)</td>
659
+ <td>
660
+ 88.00<br>
661
+ 86.63<br>
662
+ 85.67
663
+ </td>
664
+ <td>
665
+ 89.37<br>
666
+ 88.40<br>
667
+ 87.47
668
+ </td>
669
+ </tr>
670
+ </tbody></table>
671
+
672
+ ## Ethics and safety
673
+
674
+ ### Evaluation approach
675
+
676
+ Our evaluation methods include structured evaluations and internal red-teaming
677
+ testing of relevant content policies. Red-teaming was conducted by a number of
678
+ different teams, each with different goals and human evaluation metrics. These
679
+ models were evaluated against a number of different categories relevant to
680
+ ethics and safety, including:
681
+
682
+ * Human evaluation on prompts covering child safety, content safety and
683
+ representational harms. See the [Gemma model
684
+ card](https://ai.google.dev/gemma/docs/model_card#evaluation_approach) for
685
+ more details on evaluation approach, but with image captioning and visual
686
+ question answering setups.
687
+ * Image-to-Text benchmark evaluation: Benchmark against relevant academic
688
+ datasets such as FairFace Dataset ([Karkkainen et al.,
689
+ 2021](https://arxiv.org/abs/1908.04913)).
690
+
691
+ ### Evaluation results
692
+
693
+ * The human evaluation results of ethics and safety evaluations are within
694
+ acceptable thresholds for meeting [internal
695
+ policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11)
696
+ for categories such as child safety, content safety and representational
697
+ harms.
698
+ * On top of robust internal evaluations, we also use the Perspective API
699
+ (threshold of 0.8) to measure toxicity, profanity, and other potential
700
+ issues in the generated captions for images sourced from the FairFace
701
+ dataset. We report the maximum and median values observed across subgroups
702
+ for each of the perceived gender, ethnicity, and age attributes.
703
+
704
+
705
+ <table>
706
+ <tbody><tr>
707
+ </tr></tbody><tbody><tr><th>Metric</th>
708
+ <th>Perceived<br>gender</th>
709
+ <th></th>
710
+ <th>Ethnicity</th>
711
+ <th></th>
712
+ <th>Age group</th>
713
+ <th></th>
714
+ </tr>
715
+ <tr>
716
+ <th></th>
717
+ <th>Maximum</th>
718
+ <th>Median</th>
719
+ <th>Maximum</th>
720
+ <th>Median</th>
721
+ <th>Maximum</th>
722
+ <th>Median</th>
723
+ </tr>
724
+ <tr>
725
+ <td>Toxicity</td>
726
+ <td>0.04%</td>
727
+ <td>0.03%</td>
728
+ <td>0.08%</td>
729
+ <td>0.00%</td>
730
+ <td>0.09%</td>
731
+ <td>0.00%</td>
732
+ </tr>
733
+ <tr>
734
+ <td>Identity Attack</td>
735
+ <td>0.00%</td>
736
+ <td>0.00%</td>
737
+ <td>0.00%</td>
738
+ <td>0.00%</td>
739
+ <td>0.00%</td>
740
+ <td>0.00%</td>
741
+ </tr>
742
+ <tr>
743
+ <td>Insult</td>
744
+ <td>0.06%</td>
745
+ <td>0.04%</td>
746
+ <td>0.09%</td>
747
+ <td>0.07%</td>
748
+ <td>0.16%</td>
749
+ <td>0.00%</td>
750
+ </tr>
751
+ <tr>
752
+ <td>Threat</td>
753
+ <td>0.06%</td>
754
+ <td>0.05%</td>
755
+ <td>0.14%</td>
756
+ <td>0.05%</td>
757
+ <td>0.17%</td>
758
+ <td>0.00%</td>
759
+ </tr>
760
+ <tr>
761
+ <td>Profanity</td>
762
+ <td>0.00%</td>
763
+ <td>0.00%</td>
764
+ <td>0.00%</td>
765
+ <td>0.00%</td>
766
+ <td>0.00%</td>
767
+ <td>0.00%</td>
768
+ </tr>
769
+ </tbody></table>
770
+
771
+ ## Usage and limitations
772
+
773
+ ### Intended usage
774
+
775
+ Open Vision Language Models (VLMs) have a wide range of applications across
776
+ various industries and domains. The following list of potential uses is not
777
+ comprehensive. The purpose of this list is to provide contextual information
778
+ about the possible use-cases that the model creators considered as part of model
779
+ training and development.
780
+
781
+ Fine-tune on specific vision-language task:
782
+
783
+ * The pre-trained models can be fine-tuned on a wide range of vision-language
784
+ tasks such as: image captioning, short video caption, visual question
785
+ answering, text reading, object detection and object segmentation.
786
+ * The pre-trained models can be fine-tuned for specific domains such as remote
787
+ sensing question answering, visual questions from people who are blind,
788
+ science question answering, describe UI element functionalities.
789
+ * The pre-trained models can be fine-tuned for tasks with non-textual outputs
790
+ such as bounding boxes or segmentation masks.
791
+
792
+ Vision-language research:
793
+
794
+ * The pre-trained models and fine-tuned models can serve as a foundation for researchers to experiment with VLM
795
+ techniques, develop algorithms, and contribute to the advancement of the
796
+ field.
797
+
798
+ ### Ethical considerations and risks
799
+
800
+ The development of vision-language models (VLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following:
801
+
802
+ * Bias and Fairness
803
+ * VLMs trained on large-scale, real-world image-text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card.
804
+ * Misinformation and Misuse
805
+ * VLMs can be misused to generate text that is false, misleading, or harmful.
806
+ * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](https://ai.google.dev/responsible).
807
+ * Transparency and Accountability
808
+ * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes.
809
+ * A responsibly developed open model offers the opportunity to share innovation by making VLM technology accessible to developers and researchers across the AI ecosystem.
810
+
811
+
812
+ Risks identified and mitigations:
813
+
814
+ * **Perpetuation of biases:** It's encouraged to perform continuous monitoring
815
+ (using evaluation metrics, human review) and the exploration of de-biasing
816
+ techniques during model training, fine-tuning, and other use cases.
817
+ * **Generation of harmful content:** Mechanisms and guidelines for content
818
+ safety are essential. Developers are encouraged to exercise caution and
819
+ implement appropriate content safety safeguards based on their specific
820
+ product policies and application use cases.
821
+ * **Misuse for malicious purposes:** Technical limitations and developer and
822
+ end-user education can help mitigate against malicious applications of LLMs.
823
+ Educational resources and reporting mechanisms for users to flag misuse are
824
+ provided. Prohibited uses of Gemma models are outlined in the [Gemma
825
+ Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy).
826
+ * **Privacy violations:** Models were trained on data filtered to remove certain personal information and sensitive data. Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques.
827
+
828
+ ### Limitations
829
+
830
+ * Most limitations inherited from the underlying Gemma model still apply:
831
+ * VLMs are better at tasks that can be framed with clear prompts and
832
+ instructions. Open-ended or highly complex tasks might be challenging.
833
+ * Natural language is inherently complex. VLMs might struggle to grasp
834
+ subtle nuances, sarcasm, or figurative language.
835
+ * VLMs generate responses based on information they learned from their
836
+ training datasets, but they are not knowledge bases. They may generate
837
+ incorrect or outdated factual statements.
838
+ * VLMs rely on statistical patterns in language and images. They might
839
+ lack the ability to apply common sense reasoning in certain situations.
840
+ * PaliGemma was designed first and foremost to serve as a general pre-trained
841
+ model for transfer to specialized tasks. Hence, its "out of the box" or
842
+ "zero-shot" performance might lag behind models designed specifically for
843
+ that.
844
+ * PaliGemma is not a multi-turn chatbot. It is designed for a single round of
845
+ image and text input.
Paligemma 3B PT 224 gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
Paligemma Preprocessor Config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format",
15
+ "do_convert_rgb"
16
+ ],
17
+ "do_convert_rgb": null,
18
+ "do_normalize": true,
19
+ "do_rescale": true,
20
+ "do_resize": true,
21
+ "image_mean": [
22
+ 0.5,
23
+ 0.5,
24
+ 0.5
25
+ ],
26
+ "image_processor_type": "SiglipImageProcessor",
27
+ "image_seq_length": 256,
28
+ "image_std": [
29
+ 0.5,
30
+ 0.5,
31
+ 0.5
32
+ ],
33
+ "processor_class": "PaliGemmaProcessor",
34
+ "resample": 3,
35
+ "rescale_factor": 0.00392156862745098,
36
+ "size": {
37
+ "height": 224,
38
+ "width": 224
39
+ }
40
+ }
Special Tokens Map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<image>"
4
+ ],
5
+ "bos_token": {
6
+ "content": "<bos>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "content": "<eos>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
paligemma-3b-pt-224 config (1).json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "final-hf/paligemma-3b-pt-224-main",
3
+ "architectures": [
4
+ "PaliGemmaForConditionalGeneration"
5
+ ],
6
+ "bos_token_id": 2,
7
+ "eos_token_id": 1,
8
+ "hidden_size": 2048,
9
+ "ignore_index": -100,
10
+ "image_token_index": 257152,
11
+ "model_type": "paligemma",
12
+ "pad_token_id": 0,
13
+ "projection_dim": 2048,
14
+ "text_config": {
15
+ "hidden_size": 2048,
16
+ "intermediate_size": 16384,
17
+ "model_type": "gemma",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 18,
20
+ "num_image_tokens": 256,
21
+ "num_key_value_heads": 1,
22
+ "torch_dtype": "float32",
23
+ "vocab_size": 257216
24
+ },
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.41.0.dev0",
27
+ "vision_config": {
28
+ "hidden_size": 1152,
29
+ "intermediate_size": 4304,
30
+ "model_type": "siglip_vision_model",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 27,
33
+ "num_image_tokens": 256,
34
+ "patch_size": 14,
35
+ "projection_dim": 2048,
36
+ "projector_hidden_act": "gelu_fast",
37
+ "vision_use_head": false
38
+ },
39
+ "vocab_size": 257216
40
+ }
paligemma-3b-pt-224 config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.41.0.dev0"
7
+ }