Commit
•
09067ad
0
Parent(s):
Duplicate from HuggingFaceM4/VLM_WebSight_finetuned
Browse filesCo-authored-by: Victor Sanh <[email protected]>
- .gitattributes +35 -0
- README.md +106 -0
- added_tokens.json +4 -0
- config.json +61 -0
- configuration_vmistral.py +308 -0
- generation_config.json +7 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +803 -0
- modeling_vmistral.py +1764 -0
- preprocessor_config.json +20 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +56 -0
- vision.py +652 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
datasets:
|
4 |
+
- HuggingFaceM4/WebSight
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
tags:
|
8 |
+
- code
|
9 |
+
---
|
10 |
+
|
11 |
+
|
12 |
+
**Try out the [demo](https://huggingface.co/spaces/HuggingFaceM4/screenshot2html)!**
|
13 |
+
|
14 |
+
# Model Description
|
15 |
+
|
16 |
+
This model converts screenshots of website components into HTML/CSS codes.
|
17 |
+
|
18 |
+
It is based on a very early checkpoint of our forthcoming vision-language foundation model, which has been fine-tuned using the [Websight](https://huggingface.co/datasets/HuggingFaceM4/Websight) dataset.
|
19 |
+
|
20 |
+
This is very much an alpha version. The goal is to kick off an effort to develop improved models capable of converting a website screenshot into actual code.
|
21 |
+
|
22 |
+
# Code snippet
|
23 |
+
|
24 |
+
```python
|
25 |
+
import torch
|
26 |
+
|
27 |
+
from PIL import Image
|
28 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
29 |
+
|
30 |
+
from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
|
31 |
+
from transformers.image_transforms import resize, to_channel_dimension_format
|
32 |
+
|
33 |
+
DEVICE = torch.device("cuda")
|
34 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
35 |
+
"HuggingFaceM4/VLM_WebSight_finetuned",
|
36 |
+
token=API_TOKEN,
|
37 |
+
)
|
38 |
+
MODEL = AutoModelForCausalLM.from_pretrained(
|
39 |
+
"HuggingFaceM4/VLM_WebSight_finetuned",
|
40 |
+
token=API_TOKEN,
|
41 |
+
trust_remote_code=True,
|
42 |
+
torch_dtype=torch.bfloat16,
|
43 |
+
).to(DEVICE)
|
44 |
+
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
45 |
+
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
46 |
+
BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
|
47 |
+
|
48 |
+
|
49 |
+
def convert_to_rgb(image):
|
50 |
+
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
51 |
+
# for transparent images. The call to `alpha_composite` handles this case
|
52 |
+
if image.mode == "RGB":
|
53 |
+
return image
|
54 |
+
|
55 |
+
image_rgba = image.convert("RGBA")
|
56 |
+
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
57 |
+
alpha_composite = Image.alpha_composite(background, image_rgba)
|
58 |
+
alpha_composite = alpha_composite.convert("RGB")
|
59 |
+
return alpha_composite
|
60 |
+
|
61 |
+
# The processor is the same as the Idefics processor except for the BILINEAR interpolation,
|
62 |
+
# so this is a hack in order to redefine ONLY the transform method
|
63 |
+
def custom_transform(x):
|
64 |
+
x = convert_to_rgb(x)
|
65 |
+
x = to_numpy_array(x)
|
66 |
+
x = resize(x, (960, 960), resample=PILImageResampling.BILINEAR)
|
67 |
+
x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
|
68 |
+
x = PROCESSOR.image_processor.normalize(
|
69 |
+
x,
|
70 |
+
mean=PROCESSOR.image_processor.image_mean,
|
71 |
+
std=PROCESSOR.image_processor.image_std
|
72 |
+
)
|
73 |
+
x = to_channel_dimension_format(x, ChannelDimension.FIRST)
|
74 |
+
x = torch.tensor(x)
|
75 |
+
return x
|
76 |
+
|
77 |
+
inputs = PROCESSOR.tokenizer(
|
78 |
+
f"{BOS_TOKEN}<fake_token_around_image>{'<image>' * image_seq_len}<fake_token_around_image>",
|
79 |
+
return_tensors="pt",
|
80 |
+
add_special_tokens=False,
|
81 |
+
)
|
82 |
+
inputs["pixel_values"] = PROCESSOR.image_processor([image], transform=custom_transform)
|
83 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
84 |
+
generated_ids = MODEL.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_length=4096)
|
85 |
+
generated_text = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
86 |
+
|
87 |
+
print(generated_text)
|
88 |
+
```
|
89 |
+
|
90 |
+
# Model Details
|
91 |
+
|
92 |
+
- **Developed by:** Hugging Face
|
93 |
+
- **Model type:** Multi-modal model (screenshot of website component to HTML/CSS code)
|
94 |
+
- **Language(s) (NLP):** en
|
95 |
+
- **License:** see [License section](#license)
|
96 |
+
- **Parent Models:** [SigLIP](https://github.com/huggingface/transformers/pull/26522) and [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
97 |
+
- **Resources for more information:**
|
98 |
+
<!-- - [GitHub Repo](https://github.com/huggingface/m4/) -->
|
99 |
+
- Websight dataset: [Dataset card](https://huggingface.co/datasets/HuggingFaceM4/Websight)
|
100 |
+
- Websight technical report: [Report](https://arxiv.org/abs/2403.09029)
|
101 |
+
|
102 |
+
# License
|
103 |
+
|
104 |
+
The model is built on top of two pre-trained models: [SigLIP](https://github.com/huggingface/transformers/pull/26522) and [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1), which are delivered under an Apache-2.0 license. As such, users should comply with the licenses of these models.
|
105 |
+
|
106 |
+
The two pre-trained models are connected to each other with newly initialized parameters that we train. These are not based on any of the two base frozen models forming the composite model. We release the additional weights we trained under an Apache-2.0 license.
|
added_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<fake_token_around_image>": 32000,
|
3 |
+
"<image>": 32001
|
4 |
+
}
|
config.json
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": null,
|
3 |
+
"_name_or_path": "None",
|
4 |
+
"additional_vocab_size": 2,
|
5 |
+
"alpha_initializer": "zeros",
|
6 |
+
"alpha_type": "float",
|
7 |
+
"alphas_initializer_range": 0.0,
|
8 |
+
"architectures": [
|
9 |
+
"VMistralForVisionText2Text"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.0,
|
12 |
+
"auto_map": {
|
13 |
+
"AutoConfig": "configuration_vmistral.VMistralConfig",
|
14 |
+
"AutoModelForCausalLM": "modeling_vmistral.VMistralForVisionText2Text"
|
15 |
+
},
|
16 |
+
"bos_token_id": 1,
|
17 |
+
"cross_layer_interval": 1,
|
18 |
+
"eos_token_id": 2,
|
19 |
+
"freeze_lm_head": false,
|
20 |
+
"freeze_text_layers": false,
|
21 |
+
"freeze_text_module_exceptions": [],
|
22 |
+
"freeze_vision_layers": false,
|
23 |
+
"freeze_vision_module_exceptions": [],
|
24 |
+
"hidden_act": "silu",
|
25 |
+
"hidden_size": 4096,
|
26 |
+
"image_token_id": 32001,
|
27 |
+
"initializer_range": 0.02,
|
28 |
+
"intermediate_size": 14336,
|
29 |
+
"max_position_embeddings": 32768,
|
30 |
+
"model_type": "vmistral",
|
31 |
+
"num_attention_heads": 32,
|
32 |
+
"num_hidden_layers": 32,
|
33 |
+
"num_key_value_heads": 8,
|
34 |
+
"pad_token_id": 0,
|
35 |
+
"perceiver_config": {
|
36 |
+
"resampler_depth": 3,
|
37 |
+
"resampler_head_dim": 96,
|
38 |
+
"resampler_n_heads": 16,
|
39 |
+
"resampler_n_latents": 64,
|
40 |
+
"qk_layer_norms_perceiver": true
|
41 |
+
},
|
42 |
+
"qk_layer_norms": true,
|
43 |
+
"rms_norm_eps": 1e-05,
|
44 |
+
"rope_theta": 10000.0,
|
45 |
+
"sliding_window": 4096,
|
46 |
+
"tie_word_embeddings": false,
|
47 |
+
"torch_dtype": "bfloat16",
|
48 |
+
"transformers_version": "4.34.0.dev0",
|
49 |
+
"use_cache": true,
|
50 |
+
"use_resampler": true,
|
51 |
+
"vision_config": {
|
52 |
+
"hidden_size": 1152,
|
53 |
+
"image_size": 960,
|
54 |
+
"intermediate_size": 4304,
|
55 |
+
"model_type": "vmistral",
|
56 |
+
"num_attention_heads": 16,
|
57 |
+
"num_hidden_layers": 27,
|
58 |
+
"patch_size": 14
|
59 |
+
},
|
60 |
+
"vocab_size": 32000
|
61 |
+
}
|
configuration_vmistral.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" VMistral model configuration"""
|
16 |
+
from transformers.configuration_utils import PretrainedConfig
|
17 |
+
from transformers.utils import logging
|
18 |
+
|
19 |
+
|
20 |
+
logger = logging.get_logger(__name__)
|
21 |
+
|
22 |
+
MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
23 |
+
"HuggingFaceM4/VLM_WebSight_finetuned": "https://huggingface.co/HuggingFaceM4/VLM_WebSight_finetuned/resolve/main/config.json",
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
class VMistralVisionConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
"""
|
30 |
+
model_type = "vmistral"
|
31 |
+
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
hidden_size=768,
|
35 |
+
intermediate_size=3072,
|
36 |
+
num_hidden_layers=12,
|
37 |
+
num_attention_heads=12,
|
38 |
+
num_channels=3,
|
39 |
+
image_size=224,
|
40 |
+
patch_size=32,
|
41 |
+
hidden_act="gelu_pytorch_tanh",
|
42 |
+
layer_norm_eps=1e-6,
|
43 |
+
attention_dropout=0.0,
|
44 |
+
initializer_range=0.02,
|
45 |
+
initializer_factor=1.0,
|
46 |
+
_flash_attn_2_enabled=True,
|
47 |
+
**kwargs,
|
48 |
+
):
|
49 |
+
super().__init__(**kwargs)
|
50 |
+
|
51 |
+
self.hidden_size = hidden_size
|
52 |
+
self.intermediate_size = intermediate_size
|
53 |
+
self.num_hidden_layers = num_hidden_layers
|
54 |
+
self.num_attention_heads = num_attention_heads
|
55 |
+
self.num_channels = num_channels
|
56 |
+
self.patch_size = patch_size
|
57 |
+
self.image_size = image_size
|
58 |
+
self.initializer_range = initializer_range
|
59 |
+
self.initializer_factor = initializer_factor
|
60 |
+
self.attention_dropout = attention_dropout
|
61 |
+
self.layer_norm_eps = layer_norm_eps
|
62 |
+
self.hidden_act = hidden_act
|
63 |
+
self._flash_attn_2_enabled = _flash_attn_2_enabled
|
64 |
+
|
65 |
+
|
66 |
+
class VMistralPerceiverConfig(PretrainedConfig):
|
67 |
+
r"""
|
68 |
+
TThis is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
69 |
+
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
70 |
+
with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
|
71 |
+
|
72 |
+
[mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
73 |
+
[mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
|
74 |
+
|
75 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
76 |
+
documentation from [`PretrainedConfig`] for more information.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
use_resampler (`bool`, *optional*, defaults to `False`):
|
80 |
+
Whether or not to use the resampler
|
81 |
+
resampler_n_latents (`int`, *optional*, defaults to ):
|
82 |
+
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
83 |
+
resampler_depth (`int`, *optional*, defaults to 6):
|
84 |
+
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
85 |
+
resampler_n_heads (`int`, *optional*, defaults to 16):
|
86 |
+
Number of heads in each Transformer block (for multi-headed self-attention).
|
87 |
+
resampler_head_dim (`int`, *optional*, defaults to 96):
|
88 |
+
Dimensionality of each head projection in the Transformer block.
|
89 |
+
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
|
90 |
+
Whether or not to use qk layer norms in perceiver
|
91 |
+
"""
|
92 |
+
model_type = "vmistral"
|
93 |
+
|
94 |
+
def __init__(
|
95 |
+
self,
|
96 |
+
resampler_n_latents=64,
|
97 |
+
resampler_depth=6,
|
98 |
+
resampler_n_heads=16,
|
99 |
+
resampler_head_dim=96,
|
100 |
+
qk_layer_norms_perceiver=False,
|
101 |
+
**kwargs,
|
102 |
+
):
|
103 |
+
self.resampler_n_latents = resampler_n_latents
|
104 |
+
self.resampler_depth = resampler_depth
|
105 |
+
self.resampler_n_heads = resampler_n_heads
|
106 |
+
self.resampler_head_dim = resampler_head_dim
|
107 |
+
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
|
108 |
+
|
109 |
+
super().__init__(**kwargs)
|
110 |
+
|
111 |
+
|
112 |
+
class VMistralConfig(PretrainedConfig):
|
113 |
+
r"""
|
114 |
+
This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
115 |
+
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
116 |
+
with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
|
117 |
+
|
118 |
+
[mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
119 |
+
[mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
|
120 |
+
|
121 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
122 |
+
documentation from [`PretrainedConfig`] for more information.
|
123 |
+
|
124 |
+
Args:
|
125 |
+
additional_vocab_size (`int`, *optional`, defaults to 0):
|
126 |
+
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
|
127 |
+
are always trainable whereas regular vocab tokens can be frozen or not.
|
128 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
129 |
+
Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
|
130 |
+
`inputs_ids` passed when calling [`MistralModel`]
|
131 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
132 |
+
Dimension of the hidden representations.
|
133 |
+
intermediate_size (`int`, *optional*, defaults to 14336):
|
134 |
+
Dimension of the MLP representations.
|
135 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
136 |
+
Number of hidden layers in the Transformer encoder.
|
137 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
138 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
139 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
140 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
141 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
142 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
143 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
144 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
145 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
|
146 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
147 |
+
The non-linear activation function (function or string) in the decoder.
|
148 |
+
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
149 |
+
The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
|
150 |
+
allows sequence of up to 4096*32 tokens.
|
151 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
152 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
153 |
+
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
|
154 |
+
Initialization type for the alphas.
|
155 |
+
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
|
156 |
+
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
|
157 |
+
Attention.
|
158 |
+
alpha_type (`str`, *optional*, defaults to `"float"`):
|
159 |
+
Whether the gating alphas should be vectors or single floats.
|
160 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
161 |
+
The epsilon used by the rms normalization layers.
|
162 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
163 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
164 |
+
relevant if `config.is_decoder=True`.
|
165 |
+
pad_token_id (`int`, *optional*):
|
166 |
+
The id of the padding token.
|
167 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
168 |
+
The id of the "beginning-of-sequence" token.
|
169 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
170 |
+
The id of the "end-of-sequence" token.
|
171 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
172 |
+
Whether the model's input and output word embeddings should be tied.
|
173 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
174 |
+
The base period of the RoPE embeddings.
|
175 |
+
sliding_window (`int`, *optional*, defaults to 4096):
|
176 |
+
Sliding window attention window size. If not specified, will default to `4096`.
|
177 |
+
cross_layer_interval (`int`, *optional*, default to 1)
|
178 |
+
Interval for cross attention (from text to image) layers.
|
179 |
+
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
|
180 |
+
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
|
181 |
+
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
182 |
+
Exceptions to freezing text layers when `freeze_text_layers` is `True`
|
183 |
+
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
|
184 |
+
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
|
185 |
+
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
186 |
+
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
|
187 |
+
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
|
188 |
+
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
|
189 |
+
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
|
190 |
+
|
191 |
+
Example:
|
192 |
+
```python
|
193 |
+
>>> from transformers import MistralModel, MistralConfig
|
194 |
+
|
195 |
+
>>> # Initializing a Mistral 7B style configuration
|
196 |
+
>>> configuration = MistralConfig()
|
197 |
+
|
198 |
+
>>> # Initializing a model from the Mistral 7B style configuration
|
199 |
+
>>> model = MistralModel(configuration)
|
200 |
+
|
201 |
+
>>> # Accessing the model configuration
|
202 |
+
>>> configuration = model.config
|
203 |
+
```"""
|
204 |
+
model_type = "vmistral"
|
205 |
+
is_composition = False
|
206 |
+
|
207 |
+
def __init__(
|
208 |
+
self,
|
209 |
+
additional_vocab_size=0,
|
210 |
+
vocab_size=32000,
|
211 |
+
hidden_size=4096,
|
212 |
+
intermediate_size=14336,
|
213 |
+
num_hidden_layers=32,
|
214 |
+
num_attention_heads=32,
|
215 |
+
num_key_value_heads=8,
|
216 |
+
hidden_act="silu",
|
217 |
+
max_position_embeddings=4096 * 32,
|
218 |
+
initializer_range=0.02,
|
219 |
+
alpha_initializer="zeros",
|
220 |
+
alphas_initializer_range=0.0,
|
221 |
+
alpha_type="float",
|
222 |
+
rms_norm_eps=1e-6,
|
223 |
+
use_cache=True,
|
224 |
+
pad_token_id=0, # None in the original configuration_mistral, we set it to the unk_token_id
|
225 |
+
bos_token_id=1,
|
226 |
+
eos_token_id=2,
|
227 |
+
image_token_id=32_001,
|
228 |
+
tie_word_embeddings=False,
|
229 |
+
rope_theta=10000.0,
|
230 |
+
sliding_window=4096,
|
231 |
+
cross_layer_interval=1,
|
232 |
+
qk_layer_norms=False,
|
233 |
+
freeze_text_layers=True,
|
234 |
+
freeze_text_module_exceptions=[],
|
235 |
+
freeze_lm_head=False,
|
236 |
+
freeze_vision_layers=True,
|
237 |
+
freeze_vision_module_exceptions=[],
|
238 |
+
attention_dropout=0.0,
|
239 |
+
_flash_attn_2_enabled=True,
|
240 |
+
use_resampler=False,
|
241 |
+
vision_config=None,
|
242 |
+
perceiver_config=None,
|
243 |
+
**kwargs,
|
244 |
+
):
|
245 |
+
self.vocab_size = vocab_size
|
246 |
+
self.additional_vocab_size = additional_vocab_size
|
247 |
+
self.image_token_id = image_token_id
|
248 |
+
self.max_position_embeddings = max_position_embeddings
|
249 |
+
self.hidden_size = hidden_size
|
250 |
+
self.intermediate_size = intermediate_size
|
251 |
+
self.num_hidden_layers = num_hidden_layers
|
252 |
+
self.num_attention_heads = num_attention_heads
|
253 |
+
self.sliding_window = sliding_window
|
254 |
+
|
255 |
+
# for backward compatibility
|
256 |
+
if num_key_value_heads is None:
|
257 |
+
num_key_value_heads = num_attention_heads
|
258 |
+
|
259 |
+
self.num_key_value_heads = num_key_value_heads
|
260 |
+
self.hidden_act = hidden_act
|
261 |
+
self.initializer_range = initializer_range
|
262 |
+
self.alpha_initializer = alpha_initializer
|
263 |
+
self.alphas_initializer_range = alphas_initializer_range
|
264 |
+
self.alpha_type = alpha_type
|
265 |
+
self.rms_norm_eps = rms_norm_eps
|
266 |
+
self.use_cache = use_cache
|
267 |
+
self.rope_theta = rope_theta
|
268 |
+
|
269 |
+
self.cross_layer_interval = cross_layer_interval
|
270 |
+
self.qk_layer_norms = qk_layer_norms
|
271 |
+
self.freeze_vision_layers = freeze_vision_layers
|
272 |
+
|
273 |
+
self.freeze_text_layers = freeze_text_layers
|
274 |
+
self.freeze_text_module_exceptions = freeze_text_module_exceptions
|
275 |
+
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
|
276 |
+
self.freeze_lm_head = freeze_lm_head
|
277 |
+
|
278 |
+
self.use_resampler = use_resampler
|
279 |
+
self._flash_attn_2_enabled = _flash_attn_2_enabled
|
280 |
+
self.attention_dropout = attention_dropout
|
281 |
+
|
282 |
+
if perceiver_config is None:
|
283 |
+
self.perceiver_config = VMistralPerceiverConfig()
|
284 |
+
elif isinstance(perceiver_config, dict):
|
285 |
+
self.perceiver_config = VMistralPerceiverConfig(**perceiver_config)
|
286 |
+
elif isinstance(perceiver_config, VMistralPerceiverConfig):
|
287 |
+
self.perceiver_config = perceiver_config
|
288 |
+
|
289 |
+
if vision_config is None:
|
290 |
+
self.vision_config = VMistralVisionConfig()
|
291 |
+
elif isinstance(vision_config, dict):
|
292 |
+
self.vision_config = VMistralVisionConfig(**vision_config)
|
293 |
+
elif isinstance(vision_config, VMistralVisionConfig):
|
294 |
+
self.vision_config = vision_config
|
295 |
+
|
296 |
+
super().__init__(
|
297 |
+
pad_token_id=pad_token_id,
|
298 |
+
bos_token_id=bos_token_id,
|
299 |
+
eos_token_id=eos_token_id,
|
300 |
+
tie_word_embeddings=tie_word_embeddings,
|
301 |
+
**kwargs,
|
302 |
+
)
|
303 |
+
|
304 |
+
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
|
305 |
+
# PretrainedConfig.from_dict first instantiates the class with the config dict and only then
|
306 |
+
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
|
307 |
+
# of this object many attributes have default values and haven't yet been overridden.
|
308 |
+
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.35.2"
|
7 |
+
}
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5d11a2b45c427d05c692e05b7af387e6a88cd8f7e1ccb5a82f27470cfe81351
|
3 |
+
size 4895986336
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d12edfca04ef9401da56c2383dff1971c17fd6d9a17227f9aacb160170ea8fb3
|
3 |
+
size 4915916144
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9576d63b4c2169d804b677defd1d1f19b6f644f797ca6156abb731199f1c880f
|
3 |
+
size 4915916176
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:024a11e6b5272e8f6693aa5c4ff7e1801a463babb7464a52e54729619cf86f89
|
3 |
+
size 1688301256
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,803 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 16416014464
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.additional_fc.weight": "model-00004-of-00004.safetensors",
|
7 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
8 |
+
"model.embed_tokens.additional_embedding.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
26 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
27 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
28 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
29 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
30 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
31 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
32 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
71 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
72 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
73 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
74 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
75 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
82 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
85 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
86 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
87 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
88 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
89 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
91 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
92 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
93 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
95 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
96 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
98 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
101 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
102 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
103 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
104 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
105 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
106 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
107 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
108 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
109 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
110 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
111 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
112 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
113 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
114 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
115 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
116 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
117 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
118 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
119 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
120 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
121 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
122 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
123 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
124 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
125 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
126 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
127 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
128 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
131 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
132 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
134 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
135 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
136 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
137 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
138 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
139 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
140 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
153 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
154 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
155 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
156 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
157 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
158 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
159 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
160 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
161 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
162 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
163 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
164 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
165 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
170 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
171 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
172 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
173 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
174 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
175 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
176 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
177 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
178 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
179 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
180 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
181 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
182 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
183 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
184 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
185 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
186 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
187 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
190 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
191 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
193 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
194 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
195 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
196 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
197 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
199 |
+
"model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
200 |
+
"model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
201 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
204 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
206 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
207 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
209 |
+
"model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
210 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
211 |
+
"model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
212 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
213 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
214 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
215 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
216 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
217 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
218 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
219 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
220 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
221 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
222 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
223 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
224 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
225 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
226 |
+
"model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
227 |
+
"model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
228 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
229 |
+
"model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
230 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
231 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
232 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
233 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
234 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
235 |
+
"model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
236 |
+
"model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
237 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
238 |
+
"model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
239 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
240 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
241 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
242 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
243 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
244 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
245 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
246 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
247 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
248 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
249 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
250 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
251 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
252 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
253 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
254 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
255 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
256 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
257 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
258 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
259 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
260 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
261 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
262 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
263 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
264 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
265 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
266 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
267 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
268 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
269 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
270 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
271 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
272 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
273 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
274 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
275 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
276 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
277 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
278 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
279 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
280 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
281 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
282 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
283 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
284 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
285 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
286 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
287 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
288 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
289 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
290 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
291 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
292 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
293 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
294 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
295 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
296 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
297 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
298 |
+
"model.modality_projection.act.fc1.weight": "model-00001-of-00004.safetensors",
|
299 |
+
"model.modality_projection.act.fc2.weight": "model-00001-of-00004.safetensors",
|
300 |
+
"model.modality_projection.fc1.weight": "model-00001-of-00004.safetensors",
|
301 |
+
"model.modality_projection.fc2.weight": "model-00001-of-00004.safetensors",
|
302 |
+
"model.norm.weight": "model-00004-of-00004.safetensors",
|
303 |
+
"model.perceiver_resampler.blocks.0.0.context_layer_norm.bias": "model-00001-of-00004.safetensors",
|
304 |
+
"model.perceiver_resampler.blocks.0.0.context_layer_norm.weight": "model-00001-of-00004.safetensors",
|
305 |
+
"model.perceiver_resampler.blocks.0.0.k_layer_norm.bias": "model-00001-of-00004.safetensors",
|
306 |
+
"model.perceiver_resampler.blocks.0.0.k_layer_norm.weight": "model-00001-of-00004.safetensors",
|
307 |
+
"model.perceiver_resampler.blocks.0.0.k_proj.weight": "model-00001-of-00004.safetensors",
|
308 |
+
"model.perceiver_resampler.blocks.0.0.latents_layer_norm.bias": "model-00001-of-00004.safetensors",
|
309 |
+
"model.perceiver_resampler.blocks.0.0.latents_layer_norm.weight": "model-00001-of-00004.safetensors",
|
310 |
+
"model.perceiver_resampler.blocks.0.0.output_proj.weight": "model-00001-of-00004.safetensors",
|
311 |
+
"model.perceiver_resampler.blocks.0.0.q_layer_norm.bias": "model-00001-of-00004.safetensors",
|
312 |
+
"model.perceiver_resampler.blocks.0.0.q_layer_norm.weight": "model-00001-of-00004.safetensors",
|
313 |
+
"model.perceiver_resampler.blocks.0.0.q_proj.weight": "model-00001-of-00004.safetensors",
|
314 |
+
"model.perceiver_resampler.blocks.0.0.v_proj.weight": "model-00001-of-00004.safetensors",
|
315 |
+
"model.perceiver_resampler.blocks.0.1.c_proj.weight": "model-00001-of-00004.safetensors",
|
316 |
+
"model.perceiver_resampler.blocks.0.1.fc.weight": "model-00001-of-00004.safetensors",
|
317 |
+
"model.perceiver_resampler.blocks.0.1.ln.bias": "model-00001-of-00004.safetensors",
|
318 |
+
"model.perceiver_resampler.blocks.0.1.ln.weight": "model-00001-of-00004.safetensors",
|
319 |
+
"model.perceiver_resampler.blocks.1.0.context_layer_norm.bias": "model-00001-of-00004.safetensors",
|
320 |
+
"model.perceiver_resampler.blocks.1.0.context_layer_norm.weight": "model-00001-of-00004.safetensors",
|
321 |
+
"model.perceiver_resampler.blocks.1.0.k_layer_norm.bias": "model-00001-of-00004.safetensors",
|
322 |
+
"model.perceiver_resampler.blocks.1.0.k_layer_norm.weight": "model-00001-of-00004.safetensors",
|
323 |
+
"model.perceiver_resampler.blocks.1.0.k_proj.weight": "model-00001-of-00004.safetensors",
|
324 |
+
"model.perceiver_resampler.blocks.1.0.latents_layer_norm.bias": "model-00001-of-00004.safetensors",
|
325 |
+
"model.perceiver_resampler.blocks.1.0.latents_layer_norm.weight": "model-00001-of-00004.safetensors",
|
326 |
+
"model.perceiver_resampler.blocks.1.0.output_proj.weight": "model-00001-of-00004.safetensors",
|
327 |
+
"model.perceiver_resampler.blocks.1.0.q_layer_norm.bias": "model-00001-of-00004.safetensors",
|
328 |
+
"model.perceiver_resampler.blocks.1.0.q_layer_norm.weight": "model-00001-of-00004.safetensors",
|
329 |
+
"model.perceiver_resampler.blocks.1.0.q_proj.weight": "model-00001-of-00004.safetensors",
|
330 |
+
"model.perceiver_resampler.blocks.1.0.v_proj.weight": "model-00001-of-00004.safetensors",
|
331 |
+
"model.perceiver_resampler.blocks.1.1.c_proj.weight": "model-00001-of-00004.safetensors",
|
332 |
+
"model.perceiver_resampler.blocks.1.1.fc.weight": "model-00001-of-00004.safetensors",
|
333 |
+
"model.perceiver_resampler.blocks.1.1.ln.bias": "model-00001-of-00004.safetensors",
|
334 |
+
"model.perceiver_resampler.blocks.1.1.ln.weight": "model-00001-of-00004.safetensors",
|
335 |
+
"model.perceiver_resampler.blocks.2.0.context_layer_norm.bias": "model-00001-of-00004.safetensors",
|
336 |
+
"model.perceiver_resampler.blocks.2.0.context_layer_norm.weight": "model-00001-of-00004.safetensors",
|
337 |
+
"model.perceiver_resampler.blocks.2.0.k_layer_norm.bias": "model-00001-of-00004.safetensors",
|
338 |
+
"model.perceiver_resampler.blocks.2.0.k_layer_norm.weight": "model-00001-of-00004.safetensors",
|
339 |
+
"model.perceiver_resampler.blocks.2.0.k_proj.weight": "model-00001-of-00004.safetensors",
|
340 |
+
"model.perceiver_resampler.blocks.2.0.latents_layer_norm.bias": "model-00001-of-00004.safetensors",
|
341 |
+
"model.perceiver_resampler.blocks.2.0.latents_layer_norm.weight": "model-00001-of-00004.safetensors",
|
342 |
+
"model.perceiver_resampler.blocks.2.0.output_proj.weight": "model-00001-of-00004.safetensors",
|
343 |
+
"model.perceiver_resampler.blocks.2.0.q_layer_norm.bias": "model-00001-of-00004.safetensors",
|
344 |
+
"model.perceiver_resampler.blocks.2.0.q_layer_norm.weight": "model-00001-of-00004.safetensors",
|
345 |
+
"model.perceiver_resampler.blocks.2.0.q_proj.weight": "model-00001-of-00004.safetensors",
|
346 |
+
"model.perceiver_resampler.blocks.2.0.v_proj.weight": "model-00001-of-00004.safetensors",
|
347 |
+
"model.perceiver_resampler.blocks.2.1.c_proj.weight": "model-00001-of-00004.safetensors",
|
348 |
+
"model.perceiver_resampler.blocks.2.1.fc.weight": "model-00001-of-00004.safetensors",
|
349 |
+
"model.perceiver_resampler.blocks.2.1.ln.bias": "model-00001-of-00004.safetensors",
|
350 |
+
"model.perceiver_resampler.blocks.2.1.ln.weight": "model-00001-of-00004.safetensors",
|
351 |
+
"model.perceiver_resampler.latents": "model-00001-of-00004.safetensors",
|
352 |
+
"model.perceiver_resampler.layer_norm.bias": "model-00001-of-00004.safetensors",
|
353 |
+
"model.perceiver_resampler.layer_norm.weight": "model-00001-of-00004.safetensors",
|
354 |
+
"model.vision_model.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
|
355 |
+
"model.vision_model.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
356 |
+
"model.vision_model.vision_model.embeddings.position_embedding.weight": "model-00001-of-00004.safetensors",
|
357 |
+
"model.vision_model.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
358 |
+
"model.vision_model.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
359 |
+
"model.vision_model.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
360 |
+
"model.vision_model.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
361 |
+
"model.vision_model.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
362 |
+
"model.vision_model.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
363 |
+
"model.vision_model.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
364 |
+
"model.vision_model.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
365 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
366 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
367 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
368 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
369 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
370 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
371 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
372 |
+
"model.vision_model.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
373 |
+
"model.vision_model.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
374 |
+
"model.vision_model.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
375 |
+
"model.vision_model.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
376 |
+
"model.vision_model.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
377 |
+
"model.vision_model.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
378 |
+
"model.vision_model.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
379 |
+
"model.vision_model.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
380 |
+
"model.vision_model.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
381 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
382 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
383 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
384 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
385 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
386 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
387 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
388 |
+
"model.vision_model.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
389 |
+
"model.vision_model.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
390 |
+
"model.vision_model.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
391 |
+
"model.vision_model.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
392 |
+
"model.vision_model.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
393 |
+
"model.vision_model.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
394 |
+
"model.vision_model.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
395 |
+
"model.vision_model.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
396 |
+
"model.vision_model.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
397 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
398 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
399 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
400 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
401 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
402 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
403 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
404 |
+
"model.vision_model.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
405 |
+
"model.vision_model.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
406 |
+
"model.vision_model.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
407 |
+
"model.vision_model.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
408 |
+
"model.vision_model.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
409 |
+
"model.vision_model.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
410 |
+
"model.vision_model.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
411 |
+
"model.vision_model.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
412 |
+
"model.vision_model.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
413 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
414 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
415 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
416 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
417 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
418 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
419 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
420 |
+
"model.vision_model.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
421 |
+
"model.vision_model.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
422 |
+
"model.vision_model.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
423 |
+
"model.vision_model.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
424 |
+
"model.vision_model.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
425 |
+
"model.vision_model.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
426 |
+
"model.vision_model.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
427 |
+
"model.vision_model.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
428 |
+
"model.vision_model.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
429 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
430 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
431 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
432 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
433 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
434 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
435 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
436 |
+
"model.vision_model.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
437 |
+
"model.vision_model.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
438 |
+
"model.vision_model.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
439 |
+
"model.vision_model.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
440 |
+
"model.vision_model.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
441 |
+
"model.vision_model.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
442 |
+
"model.vision_model.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
443 |
+
"model.vision_model.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
444 |
+
"model.vision_model.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
445 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
446 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
447 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
448 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
449 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
450 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
451 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
452 |
+
"model.vision_model.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
453 |
+
"model.vision_model.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
454 |
+
"model.vision_model.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
455 |
+
"model.vision_model.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
456 |
+
"model.vision_model.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
457 |
+
"model.vision_model.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
458 |
+
"model.vision_model.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
459 |
+
"model.vision_model.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
460 |
+
"model.vision_model.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
461 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
462 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
463 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
464 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
465 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
466 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
467 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
468 |
+
"model.vision_model.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
469 |
+
"model.vision_model.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
470 |
+
"model.vision_model.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
471 |
+
"model.vision_model.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
472 |
+
"model.vision_model.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
473 |
+
"model.vision_model.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
474 |
+
"model.vision_model.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
475 |
+
"model.vision_model.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
476 |
+
"model.vision_model.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
477 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
478 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
479 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
480 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
481 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
482 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
483 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
484 |
+
"model.vision_model.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
485 |
+
"model.vision_model.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
486 |
+
"model.vision_model.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
487 |
+
"model.vision_model.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
488 |
+
"model.vision_model.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
489 |
+
"model.vision_model.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
490 |
+
"model.vision_model.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
491 |
+
"model.vision_model.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
492 |
+
"model.vision_model.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
493 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
494 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
495 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
496 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
497 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
498 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
499 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
500 |
+
"model.vision_model.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
501 |
+
"model.vision_model.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
502 |
+
"model.vision_model.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
503 |
+
"model.vision_model.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
504 |
+
"model.vision_model.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
505 |
+
"model.vision_model.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
506 |
+
"model.vision_model.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
507 |
+
"model.vision_model.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
508 |
+
"model.vision_model.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
509 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
510 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
511 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
512 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
513 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
514 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
515 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
516 |
+
"model.vision_model.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
517 |
+
"model.vision_model.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
518 |
+
"model.vision_model.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
519 |
+
"model.vision_model.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
520 |
+
"model.vision_model.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
521 |
+
"model.vision_model.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
522 |
+
"model.vision_model.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
523 |
+
"model.vision_model.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
524 |
+
"model.vision_model.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
525 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
526 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
527 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
528 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
529 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
530 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
531 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
532 |
+
"model.vision_model.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
533 |
+
"model.vision_model.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
534 |
+
"model.vision_model.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
535 |
+
"model.vision_model.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
536 |
+
"model.vision_model.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
537 |
+
"model.vision_model.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
538 |
+
"model.vision_model.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
539 |
+
"model.vision_model.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
540 |
+
"model.vision_model.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
541 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
542 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
543 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
544 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
545 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
546 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
547 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
548 |
+
"model.vision_model.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
549 |
+
"model.vision_model.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
550 |
+
"model.vision_model.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
551 |
+
"model.vision_model.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
552 |
+
"model.vision_model.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
553 |
+
"model.vision_model.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
554 |
+
"model.vision_model.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
555 |
+
"model.vision_model.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
556 |
+
"model.vision_model.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
557 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
558 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
559 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
560 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
561 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
562 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
563 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
564 |
+
"model.vision_model.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
565 |
+
"model.vision_model.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
566 |
+
"model.vision_model.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
567 |
+
"model.vision_model.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
568 |
+
"model.vision_model.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
569 |
+
"model.vision_model.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
570 |
+
"model.vision_model.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
571 |
+
"model.vision_model.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
572 |
+
"model.vision_model.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
573 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
574 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
575 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
576 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
577 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
578 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
579 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
580 |
+
"model.vision_model.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
581 |
+
"model.vision_model.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
582 |
+
"model.vision_model.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
583 |
+
"model.vision_model.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
584 |
+
"model.vision_model.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
585 |
+
"model.vision_model.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
586 |
+
"model.vision_model.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
587 |
+
"model.vision_model.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
588 |
+
"model.vision_model.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
589 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
590 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
591 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
592 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
593 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
594 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
595 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
596 |
+
"model.vision_model.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
597 |
+
"model.vision_model.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
598 |
+
"model.vision_model.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
599 |
+
"model.vision_model.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
600 |
+
"model.vision_model.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
601 |
+
"model.vision_model.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
602 |
+
"model.vision_model.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
603 |
+
"model.vision_model.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
604 |
+
"model.vision_model.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
605 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
606 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
607 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
608 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
609 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
610 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
611 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
612 |
+
"model.vision_model.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
613 |
+
"model.vision_model.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
614 |
+
"model.vision_model.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
615 |
+
"model.vision_model.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
616 |
+
"model.vision_model.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
617 |
+
"model.vision_model.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
618 |
+
"model.vision_model.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
619 |
+
"model.vision_model.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
620 |
+
"model.vision_model.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
621 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
622 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
623 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
624 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
625 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
626 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
627 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
628 |
+
"model.vision_model.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
629 |
+
"model.vision_model.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
630 |
+
"model.vision_model.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
631 |
+
"model.vision_model.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
632 |
+
"model.vision_model.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
633 |
+
"model.vision_model.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
634 |
+
"model.vision_model.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
635 |
+
"model.vision_model.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
636 |
+
"model.vision_model.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
637 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
638 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
639 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
640 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
641 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
642 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
643 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
644 |
+
"model.vision_model.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
645 |
+
"model.vision_model.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
646 |
+
"model.vision_model.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
647 |
+
"model.vision_model.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
648 |
+
"model.vision_model.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
649 |
+
"model.vision_model.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
650 |
+
"model.vision_model.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
651 |
+
"model.vision_model.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
652 |
+
"model.vision_model.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
653 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
654 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
655 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
656 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
657 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
658 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
659 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
660 |
+
"model.vision_model.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
661 |
+
"model.vision_model.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
662 |
+
"model.vision_model.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
663 |
+
"model.vision_model.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
664 |
+
"model.vision_model.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
665 |
+
"model.vision_model.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
666 |
+
"model.vision_model.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
667 |
+
"model.vision_model.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
668 |
+
"model.vision_model.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
669 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
670 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
671 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
672 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
673 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
674 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
675 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
676 |
+
"model.vision_model.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
677 |
+
"model.vision_model.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
678 |
+
"model.vision_model.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
679 |
+
"model.vision_model.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
680 |
+
"model.vision_model.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
681 |
+
"model.vision_model.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
682 |
+
"model.vision_model.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
683 |
+
"model.vision_model.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
684 |
+
"model.vision_model.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
685 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
686 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
687 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
688 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
689 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
690 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
691 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
692 |
+
"model.vision_model.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
693 |
+
"model.vision_model.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
694 |
+
"model.vision_model.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
695 |
+
"model.vision_model.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
696 |
+
"model.vision_model.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
697 |
+
"model.vision_model.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
698 |
+
"model.vision_model.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
699 |
+
"model.vision_model.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
700 |
+
"model.vision_model.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
701 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
702 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
703 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
704 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
705 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
706 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
707 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
708 |
+
"model.vision_model.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
709 |
+
"model.vision_model.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
710 |
+
"model.vision_model.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
711 |
+
"model.vision_model.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
712 |
+
"model.vision_model.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
713 |
+
"model.vision_model.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
714 |
+
"model.vision_model.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
715 |
+
"model.vision_model.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
716 |
+
"model.vision_model.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
717 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
718 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
719 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
720 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
721 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
722 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
723 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
724 |
+
"model.vision_model.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
725 |
+
"model.vision_model.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
726 |
+
"model.vision_model.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
727 |
+
"model.vision_model.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
728 |
+
"model.vision_model.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
729 |
+
"model.vision_model.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
730 |
+
"model.vision_model.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
731 |
+
"model.vision_model.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
732 |
+
"model.vision_model.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
733 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
734 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
735 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
736 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
737 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
738 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
739 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
740 |
+
"model.vision_model.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
741 |
+
"model.vision_model.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
742 |
+
"model.vision_model.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
743 |
+
"model.vision_model.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
744 |
+
"model.vision_model.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
745 |
+
"model.vision_model.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
746 |
+
"model.vision_model.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
747 |
+
"model.vision_model.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
748 |
+
"model.vision_model.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
749 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
750 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
751 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
752 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
753 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
754 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
755 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
756 |
+
"model.vision_model.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
757 |
+
"model.vision_model.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
758 |
+
"model.vision_model.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
759 |
+
"model.vision_model.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
760 |
+
"model.vision_model.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
761 |
+
"model.vision_model.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
762 |
+
"model.vision_model.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
763 |
+
"model.vision_model.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
764 |
+
"model.vision_model.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
765 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
766 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
767 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
768 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
769 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
770 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
771 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
772 |
+
"model.vision_model.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
773 |
+
"model.vision_model.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
774 |
+
"model.vision_model.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
775 |
+
"model.vision_model.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
776 |
+
"model.vision_model.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
777 |
+
"model.vision_model.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
778 |
+
"model.vision_model.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
779 |
+
"model.vision_model.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
780 |
+
"model.vision_model.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
781 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
782 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
783 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
784 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
785 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
786 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
787 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
788 |
+
"model.vision_model.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
789 |
+
"model.vision_model.vision_model.head.attention.in_proj_bias": "model-00001-of-00004.safetensors",
|
790 |
+
"model.vision_model.vision_model.head.attention.in_proj_weight": "model-00001-of-00004.safetensors",
|
791 |
+
"model.vision_model.vision_model.head.attention.out_proj.bias": "model-00001-of-00004.safetensors",
|
792 |
+
"model.vision_model.vision_model.head.attention.out_proj.weight": "model-00001-of-00004.safetensors",
|
793 |
+
"model.vision_model.vision_model.head.layernorm.bias": "model-00001-of-00004.safetensors",
|
794 |
+
"model.vision_model.vision_model.head.layernorm.weight": "model-00001-of-00004.safetensors",
|
795 |
+
"model.vision_model.vision_model.head.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
796 |
+
"model.vision_model.vision_model.head.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
797 |
+
"model.vision_model.vision_model.head.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
798 |
+
"model.vision_model.vision_model.head.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
799 |
+
"model.vision_model.vision_model.head.probe": "model-00001-of-00004.safetensors",
|
800 |
+
"model.vision_model.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
|
801 |
+
"model.vision_model.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors"
|
802 |
+
}
|
803 |
+
}
|
modeling_vmistral.py
ADDED
@@ -0,0 +1,1764 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
""" PyTorch VMistral model."""
|
21 |
+
from dataclasses import dataclass
|
22 |
+
import inspect
|
23 |
+
import math
|
24 |
+
import warnings
|
25 |
+
from typing import List, Optional, Tuple, Union
|
26 |
+
|
27 |
+
import torch
|
28 |
+
import torch.nn.functional as F
|
29 |
+
import torch.utils.checkpoint
|
30 |
+
from torch import nn
|
31 |
+
from torch.nn import CrossEntropyLoss
|
32 |
+
from transformers.activations import ACT2FN
|
33 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
34 |
+
from transformers.utils import (
|
35 |
+
add_start_docstrings,
|
36 |
+
add_start_docstrings_to_model_forward,
|
37 |
+
is_flash_attn_2_available,
|
38 |
+
replace_return_docstrings,
|
39 |
+
)
|
40 |
+
|
41 |
+
from einops import rearrange, repeat
|
42 |
+
from transformers import PreTrainedModel
|
43 |
+
from transformers.utils import logging
|
44 |
+
from transformers.modeling_outputs import ModelOutput
|
45 |
+
|
46 |
+
from .configuration_vmistral import VMistralConfig
|
47 |
+
from .vision import SiglipVisionModel
|
48 |
+
|
49 |
+
|
50 |
+
if is_flash_attn_2_available():
|
51 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
52 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
53 |
+
|
54 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
55 |
+
|
56 |
+
logger = logging.get_logger(__name__)
|
57 |
+
|
58 |
+
_CONFIG_FOR_DOC = "VMistralConfig"
|
59 |
+
|
60 |
+
VMistral_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
61 |
+
"HuggingFaceM4/VLM_WebSight_finetuned"
|
62 |
+
]
|
63 |
+
|
64 |
+
@dataclass
|
65 |
+
class VMistralBaseModelOutputWithPast(ModelOutput):
|
66 |
+
"""
|
67 |
+
Base class for VMistral model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
68 |
+
|
69 |
+
Args:
|
70 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
71 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
72 |
+
|
73 |
+
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
|
74 |
+
hidden_size)` is output.
|
75 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
76 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
77 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
78 |
+
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
79 |
+
encoder_sequence_length, embed_size_per_head)`.
|
80 |
+
|
81 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
82 |
+
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
83 |
+
input) to speed up sequential decoding.
|
84 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
85 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
86 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
87 |
+
|
88 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
89 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
90 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
91 |
+
sequence_length)`.
|
92 |
+
|
93 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
94 |
+
heads.
|
95 |
+
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
|
96 |
+
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
|
97 |
+
sequence_length, hidden_size)`.
|
98 |
+
|
99 |
+
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
|
100 |
+
"""
|
101 |
+
|
102 |
+
last_hidden_state: torch.FloatTensor = None
|
103 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
104 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
105 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
106 |
+
image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
107 |
+
|
108 |
+
|
109 |
+
@dataclass
|
110 |
+
class VMistralCausalLMOutputWithPast(ModelOutput):
|
111 |
+
"""
|
112 |
+
Base class for VMistral causal language model (or autoregressive) outputs.
|
113 |
+
|
114 |
+
Args:
|
115 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
116 |
+
Language modeling loss (for next-token prediction).
|
117 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
118 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
119 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
120 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
121 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
122 |
+
|
123 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
124 |
+
`past_key_values` input) to speed up sequential decoding.
|
125 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
126 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
127 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
128 |
+
|
129 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
130 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
131 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
132 |
+
sequence_length)`.
|
133 |
+
|
134 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
135 |
+
heads.
|
136 |
+
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
|
137 |
+
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
|
138 |
+
sequence_length, hidden_size)`.
|
139 |
+
|
140 |
+
image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
|
141 |
+
"""
|
142 |
+
|
143 |
+
loss: Optional[torch.FloatTensor] = None
|
144 |
+
logits: torch.FloatTensor = None
|
145 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None
|
146 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
147 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
148 |
+
image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
149 |
+
|
150 |
+
|
151 |
+
def expand_inputs_for_generation(
|
152 |
+
input_ids,
|
153 |
+
expand_size=1,
|
154 |
+
is_encoder_decoder=False,
|
155 |
+
attention_mask=None,
|
156 |
+
encoder_outputs=None,
|
157 |
+
**model_kwargs,
|
158 |
+
):
|
159 |
+
expanded_return_idx = (
|
160 |
+
torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
|
161 |
+
)
|
162 |
+
input_ids = input_ids.index_select(0, expanded_return_idx)
|
163 |
+
model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
|
164 |
+
model_kwargs["image_hidden_states"] = model_kwargs.get("image_hidden_states", None)
|
165 |
+
|
166 |
+
if "token_type_ids" in model_kwargs:
|
167 |
+
token_type_ids = model_kwargs["token_type_ids"]
|
168 |
+
model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
|
169 |
+
|
170 |
+
if attention_mask is not None:
|
171 |
+
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
|
172 |
+
|
173 |
+
if model_kwargs["pixel_values"] is not None:
|
174 |
+
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
|
175 |
+
|
176 |
+
elif model_kwargs["image_hidden_states"] is not None:
|
177 |
+
model_kwargs["image_hidden_states"] = model_kwargs["image_hidden_states"].index_select(0, expanded_return_idx)
|
178 |
+
|
179 |
+
return input_ids, model_kwargs
|
180 |
+
|
181 |
+
|
182 |
+
def update_model_kwargs_for_generation(outputs, model_kwargs):
|
183 |
+
# must have this key set to at least None
|
184 |
+
if "past_key_values" in outputs:
|
185 |
+
model_kwargs["past_key_values"] = outputs.past_key_values
|
186 |
+
else:
|
187 |
+
model_kwargs["past_key_values"] = None
|
188 |
+
|
189 |
+
# update token_type_ids with last value
|
190 |
+
if "token_type_ids" in model_kwargs:
|
191 |
+
token_type_ids = model_kwargs["token_type_ids"]
|
192 |
+
model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
|
193 |
+
|
194 |
+
# update attention masks
|
195 |
+
if "attention_mask" in model_kwargs:
|
196 |
+
attention_mask = model_kwargs["attention_mask"]
|
197 |
+
model_kwargs["attention_mask"] = torch.cat(
|
198 |
+
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
199 |
+
)
|
200 |
+
|
201 |
+
# Get the precomputed image_hidden_states
|
202 |
+
model_kwargs["image_hidden_states"] = outputs.image_hidden_states
|
203 |
+
|
204 |
+
return model_kwargs
|
205 |
+
|
206 |
+
|
207 |
+
def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
|
208 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
209 |
+
# only last token for inputs_ids if past is defined in kwargs
|
210 |
+
if past_key_values:
|
211 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
212 |
+
if token_type_ids is not None:
|
213 |
+
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
|
214 |
+
|
215 |
+
attention_mask = kwargs.get("attention_mask", None)
|
216 |
+
position_ids = kwargs.get("position_ids", None)
|
217 |
+
|
218 |
+
if attention_mask is not None and position_ids is None:
|
219 |
+
# create position_ids on the fly for batch generation
|
220 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
221 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
222 |
+
if past_key_values:
|
223 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
224 |
+
|
225 |
+
pixel_values = kwargs.get("pixel_values", None)
|
226 |
+
image_hidden_states = kwargs.get("image_hidden_states", None)
|
227 |
+
|
228 |
+
return {
|
229 |
+
"input_ids": input_ids,
|
230 |
+
"past_key_values": past_key_values,
|
231 |
+
"use_cache": kwargs.get("use_cache"),
|
232 |
+
"position_ids": position_ids,
|
233 |
+
"attention_mask": attention_mask,
|
234 |
+
"token_type_ids": token_type_ids,
|
235 |
+
"pixel_values": pixel_values,
|
236 |
+
"image_hidden_states": image_hidden_states,
|
237 |
+
}
|
238 |
+
|
239 |
+
|
240 |
+
def freeze_model(model, module_exceptions=[]):
|
241 |
+
mapping = {
|
242 |
+
"LayerNorm": nn.LayerNorm,
|
243 |
+
"Linear": nn.Linear,
|
244 |
+
"Embedding": nn.Embedding,
|
245 |
+
}
|
246 |
+
module_exceptions_mapped = [mapping[m] for m in module_exceptions]
|
247 |
+
for module in model.modules():
|
248 |
+
if module_exceptions and any([isinstance(module, t) for t in module_exceptions_mapped]):
|
249 |
+
module.requires_grad_(True) # Explicitly setting it to true to avoid any mistakes
|
250 |
+
else:
|
251 |
+
module.requires_grad_(False)
|
252 |
+
return model
|
253 |
+
|
254 |
+
|
255 |
+
class DecoupledEmbedding(nn.Embedding):
|
256 |
+
# Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding
|
257 |
+
"""
|
258 |
+
Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings.
|
259 |
+
In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0, then it will create `num_additional_embeddings` additional parameters that are always trained.
|
260 |
+
If `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
|
261 |
+
"""
|
262 |
+
|
263 |
+
def __init__(
|
264 |
+
self,
|
265 |
+
num_embeddings,
|
266 |
+
num_additional_embeddings,
|
267 |
+
embedding_dim,
|
268 |
+
partially_freeze=False,
|
269 |
+
device=None,
|
270 |
+
dtype=None,
|
271 |
+
padding_idx=None,
|
272 |
+
**kwargs,
|
273 |
+
) -> None:
|
274 |
+
"""
|
275 |
+
num_additional_embeddings: int. Number of additional embeddings. Only useful when you `partially_freeze=True`.
|
276 |
+
partially_freeze: bool. If True, the regular `weight` will be frozen. `additional_weight` is never frozen.
|
277 |
+
|
278 |
+
Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`, `max_norm` or `norm_type`. We are not supporting these.
|
279 |
+
"""
|
280 |
+
if padding_idx is not None and padding_idx > num_embeddings:
|
281 |
+
raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}")
|
282 |
+
super().__init__(
|
283 |
+
num_embeddings=num_embeddings,
|
284 |
+
embedding_dim=embedding_dim,
|
285 |
+
device=device,
|
286 |
+
dtype=dtype,
|
287 |
+
padding_idx=padding_idx,
|
288 |
+
**kwargs,
|
289 |
+
)
|
290 |
+
self.num_embeddings = num_embeddings
|
291 |
+
self.padding_idx = padding_idx
|
292 |
+
self.num_additional_embeddings = num_additional_embeddings
|
293 |
+
self.partially_freeze = partially_freeze
|
294 |
+
|
295 |
+
if partially_freeze:
|
296 |
+
self.weight.requires_grad_(False)
|
297 |
+
|
298 |
+
if self.num_additional_embeddings > 0:
|
299 |
+
self.additional_embedding = nn.Embedding(
|
300 |
+
num_embeddings=self.num_additional_embeddings,
|
301 |
+
embedding_dim=embedding_dim,
|
302 |
+
device=device,
|
303 |
+
dtype=dtype,
|
304 |
+
)
|
305 |
+
|
306 |
+
def forward(self, input_ids):
|
307 |
+
"""
|
308 |
+
we have 2 embeddings, with different indices - one pretrained self.weight and another
|
309 |
+
self.additional_embedding.weight that is being trained.
|
310 |
+
|
311 |
+
in order to make a lookup of the input ids, we:
|
312 |
+
1. find out the indices of the entries belonging to the 2nd embedding
|
313 |
+
2. extract those values while subtracting the size of the first embedding (num_embeddings),
|
314 |
+
since the 2nd embedding starts from 0 and not num_embeddings
|
315 |
+
3. perform the 2nd embedding lookup
|
316 |
+
4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
|
317 |
+
5. perform the 1st embedding lookup
|
318 |
+
6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
|
319 |
+
|
320 |
+
note: for the 1st embedding lookup we could have looked up only the low indices and not do
|
321 |
+
the padding, but then we have to create a new tensor and populate it with 2 tensors that are
|
322 |
+
spread out across various indices - i.e. not a simple concat - I haven't benchmarked the
|
323 |
+
complex case if it's any faster, given that seqlens are usually relatively short it's
|
324 |
+
probably not faster or if faster not by much - but might be a good idea to measure.
|
325 |
+
|
326 |
+
"""
|
327 |
+
if self.num_additional_embeddings == 0:
|
328 |
+
return self.additional_embedding(input_ids)
|
329 |
+
|
330 |
+
# Clone so that we don't modify the original input_ids later on
|
331 |
+
input_ids = input_ids.clone()
|
332 |
+
additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
|
333 |
+
input_ids_additional_vocab = input_ids[additional_vocab_indices]
|
334 |
+
additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
|
335 |
+
|
336 |
+
# for successful lookup replace input_ids with 0, the results of these will be discarded anyway
|
337 |
+
input_ids[additional_vocab_indices] = 0
|
338 |
+
full_vector = F.embedding(input_ids, self.weight)
|
339 |
+
|
340 |
+
# overwrite the records with high indices
|
341 |
+
full_vector[additional_vocab_indices] = additional_embeddings
|
342 |
+
|
343 |
+
return full_vector
|
344 |
+
|
345 |
+
def extra_repr(self) -> str:
|
346 |
+
return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format(
|
347 |
+
self.num_embeddings,
|
348 |
+
self.num_additional_embeddings,
|
349 |
+
self.embedding_dim,
|
350 |
+
self.partially_freeze,
|
351 |
+
)
|
352 |
+
|
353 |
+
class DecoupledLinear(nn.Linear):
|
354 |
+
# Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
|
355 |
+
"""
|
356 |
+
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters.
|
357 |
+
In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0, then it will create `out_additional_features * in_features` additional parameters that are always trained.
|
358 |
+
If `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
|
359 |
+
"""
|
360 |
+
|
361 |
+
def __init__(
|
362 |
+
self,
|
363 |
+
in_features: int,
|
364 |
+
out_features: int,
|
365 |
+
out_additional_features: int = 0,
|
366 |
+
bias: bool = True,
|
367 |
+
partially_freeze: bool = True,
|
368 |
+
device=None,
|
369 |
+
dtype=None,
|
370 |
+
) -> None:
|
371 |
+
"""
|
372 |
+
out_additional_features: int. Number of additional trainable dimensions. Only makes sense when `partially_freeze=True`.
|
373 |
+
partially_freeze: bool. If True, the regular `weight` will be frozen and extra parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
|
374 |
+
"""
|
375 |
+
super().__init__(in_features, out_features, bias, device, dtype)
|
376 |
+
self.out_additional_features = out_additional_features
|
377 |
+
self.partially_freeze = partially_freeze
|
378 |
+
|
379 |
+
self.in_features = in_features
|
380 |
+
self.out_features = out_features
|
381 |
+
|
382 |
+
if partially_freeze:
|
383 |
+
self.weight.requires_grad_(False)
|
384 |
+
if bias:
|
385 |
+
self.bias.requires_grad_(False)
|
386 |
+
|
387 |
+
if out_additional_features > 0:
|
388 |
+
self.additional_fc = nn.Linear(
|
389 |
+
in_features=in_features,
|
390 |
+
out_features=out_additional_features,
|
391 |
+
bias=bias,
|
392 |
+
device=device,
|
393 |
+
dtype=dtype,
|
394 |
+
)
|
395 |
+
|
396 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
397 |
+
output = F.linear(input, self.weight, self.bias)
|
398 |
+
|
399 |
+
if self.out_additional_features > 0:
|
400 |
+
additional_features = self.additional_fc(input)
|
401 |
+
output = torch.cat((output, additional_features), -1)
|
402 |
+
|
403 |
+
return output
|
404 |
+
|
405 |
+
def extra_repr(self) -> str:
|
406 |
+
"""Overwriting `nn.Linear.extra_repr` to include new parameters."""
|
407 |
+
return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
|
408 |
+
self.in_features,
|
409 |
+
self.out_features,
|
410 |
+
self.out_additional_features,
|
411 |
+
self.bias is not None,
|
412 |
+
self.partially_freeze,
|
413 |
+
)
|
414 |
+
|
415 |
+
class SwiGLU(nn.Module):
|
416 |
+
def __init__(self, embed_dim) -> None:
|
417 |
+
super().__init__()
|
418 |
+
self.fc1 = nn.Linear(embed_dim, embed_dim, bias=False)
|
419 |
+
self.fc2 = nn.Linear(embed_dim, embed_dim, bias=False)
|
420 |
+
|
421 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
422 |
+
x_1 = self.fc1(x)
|
423 |
+
x_1 = torch.mul(x_1, torch.sigmoid(x_1))
|
424 |
+
x_2 = self.fc2(x)
|
425 |
+
x = torch.mul(x_1, x_2)
|
426 |
+
return x
|
427 |
+
|
428 |
+
|
429 |
+
class ModalityProjection(nn.Module):
|
430 |
+
def __init__(self, embed_dim_in, embed_dim_out) -> None:
|
431 |
+
super().__init__()
|
432 |
+
self.fc1 = nn.Linear(embed_dim_in, embed_dim_out, bias=False)
|
433 |
+
self.act = SwiGLU(embed_dim_out)
|
434 |
+
self.fc2 = nn.Linear(embed_dim_out, embed_dim_out, bias=False)
|
435 |
+
|
436 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
437 |
+
x = self.fc1(x)
|
438 |
+
x = self.act(x)
|
439 |
+
x = self.fc2(x)
|
440 |
+
return x
|
441 |
+
|
442 |
+
|
443 |
+
class PerceiverResampler(nn.Module):
|
444 |
+
def __init__(
|
445 |
+
self, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int, qk_layer_norms: bool
|
446 |
+
) -> None:
|
447 |
+
"""
|
448 |
+
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
|
449 |
+
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
|
450 |
+
returns a Tensor of shape [bsz, n_latents, embed_dim].
|
451 |
+
:param embed_dim: Dimensionality of embeddings being fed to the Perceiver Resampler (also dimensionality of
|
452 |
+
latent embeddings *returned* by the Perceiver Resampler. Could be e.g., VIT embed_dim, ResNet
|
453 |
+
pool dim, and so on.
|
454 |
+
:param depth: Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
455 |
+
:param n_heads: Number of heads in each Transformer block (for multi-headed self-attention).
|
456 |
+
:param head_dim: Dimensionality of each head projection in the Transformer block.
|
457 |
+
:param n_latents: Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
458 |
+
"""
|
459 |
+
super().__init__()
|
460 |
+
self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents
|
461 |
+
self.qk_layer_norms = qk_layer_norms
|
462 |
+
|
463 |
+
# Create Latents for Perceiver
|
464 |
+
self.latents = nn.Parameter(torch.ones(self.n_latents, self.embed_dim))
|
465 |
+
|
466 |
+
self.intermediate_dim = self.embed_dim * 4
|
467 |
+
# Create Transformer Blocks
|
468 |
+
self.blocks = nn.ModuleList(
|
469 |
+
[
|
470 |
+
nn.ModuleList(
|
471 |
+
[
|
472 |
+
PerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms),
|
473 |
+
MLP(self.embed_dim, self.intermediate_dim),
|
474 |
+
]
|
475 |
+
)
|
476 |
+
for _ in range(depth)
|
477 |
+
]
|
478 |
+
)
|
479 |
+
self.layer_norm = nn.LayerNorm(self.embed_dim)
|
480 |
+
|
481 |
+
def forward(self, context: torch.Tensor) -> torch.Tensor:
|
482 |
+
"""Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
|
483 |
+
latents = repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
|
484 |
+
|
485 |
+
# Feed through Perceiver Attention blocks...
|
486 |
+
for attn, ff in self.blocks:
|
487 |
+
latents = attn(context, latents) + latents
|
488 |
+
latents = ff(latents) + latents
|
489 |
+
|
490 |
+
return self.layer_norm(latents)
|
491 |
+
|
492 |
+
|
493 |
+
class PerceiverAttention(nn.Module):
|
494 |
+
def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None:
|
495 |
+
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
|
496 |
+
super().__init__()
|
497 |
+
self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
|
498 |
+
self.qk_layer_norms = qk_layer_norms
|
499 |
+
# Normalization & Scaling
|
500 |
+
self.context_layer_norm = nn.LayerNorm(self.embed_dim)
|
501 |
+
self.latents_layer_norm = nn.LayerNorm(self.embed_dim)
|
502 |
+
if self.qk_layer_norms:
|
503 |
+
self.q_layer_norm = nn.LayerNorm(self.head_dim)
|
504 |
+
self.k_layer_norm = nn.LayerNorm(self.head_dim)
|
505 |
+
|
506 |
+
self.qk_scale = self.head_dim**-0.5
|
507 |
+
|
508 |
+
# Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
|
509 |
+
self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
|
510 |
+
self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
|
511 |
+
self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
|
512 |
+
|
513 |
+
self.output_proj = nn.Linear(self.n_heads * self.head_dim, self.embed_dim, bias=False)
|
514 |
+
|
515 |
+
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
|
516 |
+
"""
|
517 |
+
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
|
518 |
+
:param context: Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
|
519 |
+
:param latents: Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
|
520 |
+
:return: Tensor of shape [bsz, n_latents, embed_dim] representing attention over latents w/ cross from context.
|
521 |
+
"""
|
522 |
+
context = self.context_layer_norm(context)
|
523 |
+
latents = self.latents_layer_norm(latents)
|
524 |
+
|
525 |
+
# Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
|
526 |
+
# Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
|
527 |
+
q = self.q_proj(latents)
|
528 |
+
k = self.k_proj(torch.cat([context, latents], dim=-2))
|
529 |
+
v = self.v_proj(torch.cat([context, latents], dim=-2))
|
530 |
+
|
531 |
+
# Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
|
532 |
+
# =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
|
533 |
+
q, k, v = [rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads) for x in (q, k, v)]
|
534 |
+
if self.qk_layer_norms:
|
535 |
+
q = self.q_layer_norm(q)
|
536 |
+
k = self.k_layer_norm(k)
|
537 |
+
|
538 |
+
scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
|
539 |
+
stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
|
540 |
+
attn = stabilized_scores.softmax(dim=-1)
|
541 |
+
|
542 |
+
# Attend & project back to output...
|
543 |
+
resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
|
544 |
+
return self.output_proj(
|
545 |
+
rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
|
546 |
+
)
|
547 |
+
|
548 |
+
|
549 |
+
class MLP(nn.Module):
|
550 |
+
def __init__(self, embed_dim, intermediate_size):
|
551 |
+
"""Simple MLP block with intermediate_size and embedding size"""
|
552 |
+
super().__init__()
|
553 |
+
self.embed_dim = embed_dim
|
554 |
+
self.ln = nn.LayerNorm(self.embed_dim)
|
555 |
+
self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False)
|
556 |
+
self.act = nn.ReLU()
|
557 |
+
self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False)
|
558 |
+
|
559 |
+
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
|
560 |
+
hidden_states = self.ln(hidden_states)
|
561 |
+
hidden_states = self.fc(hidden_states)
|
562 |
+
hidden_states = self.act(hidden_states)
|
563 |
+
hidden_states = self.c_proj(hidden_states)
|
564 |
+
|
565 |
+
return hidden_states
|
566 |
+
|
567 |
+
|
568 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
569 |
+
def _get_unpad_data(attention_mask):
|
570 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
571 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
572 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
573 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
574 |
+
return (
|
575 |
+
indices,
|
576 |
+
cu_seqlens,
|
577 |
+
max_seqlen_in_batch,
|
578 |
+
)
|
579 |
+
|
580 |
+
|
581 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral
|
582 |
+
class MistralRMSNorm(nn.Module):
|
583 |
+
def __init__(self, hidden_size, eps=1e-6):
|
584 |
+
"""
|
585 |
+
MistralRMSNorm is equivalent to T5LayerNorm
|
586 |
+
"""
|
587 |
+
super().__init__()
|
588 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
589 |
+
self.variance_epsilon = eps
|
590 |
+
|
591 |
+
def forward(self, hidden_states):
|
592 |
+
input_dtype = hidden_states.dtype
|
593 |
+
hidden_states = hidden_states.to(torch.float32)
|
594 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
595 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
596 |
+
return self.weight * hidden_states.to(input_dtype)
|
597 |
+
|
598 |
+
|
599 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral
|
600 |
+
class MistralRotaryEmbedding(nn.Module):
|
601 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
602 |
+
super().__init__()
|
603 |
+
|
604 |
+
self.dim = dim
|
605 |
+
self.max_position_embeddings = max_position_embeddings
|
606 |
+
self.base = base
|
607 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
608 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
609 |
+
|
610 |
+
# Build here to make `torch.jit.trace` work.
|
611 |
+
self._set_cos_sin_cache(
|
612 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
613 |
+
)
|
614 |
+
|
615 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
616 |
+
self.max_seq_len_cached = seq_len
|
617 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
618 |
+
|
619 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
620 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
621 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
622 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
623 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
624 |
+
|
625 |
+
def forward(self, x, seq_len=None):
|
626 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
627 |
+
if seq_len > self.max_seq_len_cached:
|
628 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
629 |
+
|
630 |
+
return (
|
631 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
632 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
633 |
+
)
|
634 |
+
|
635 |
+
|
636 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
637 |
+
def rotate_half(x):
|
638 |
+
"""Rotates half the hidden dims of the input."""
|
639 |
+
x1 = x[..., : x.shape[-1] // 2]
|
640 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
641 |
+
return torch.cat((-x2, x1), dim=-1)
|
642 |
+
|
643 |
+
|
644 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
645 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
646 |
+
cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim]
|
647 |
+
sin = sin[position_ids].unsqueeze(1)
|
648 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
649 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
650 |
+
return q_embed, k_embed
|
651 |
+
|
652 |
+
|
653 |
+
class MistralMLP(nn.Module):
|
654 |
+
def __init__(self, config):
|
655 |
+
super().__init__()
|
656 |
+
self.config = config
|
657 |
+
self.hidden_size = config.hidden_size
|
658 |
+
self.intermediate_size = config.intermediate_size
|
659 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
660 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
661 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
662 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
663 |
+
|
664 |
+
def forward(self, x):
|
665 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
666 |
+
|
667 |
+
|
668 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
669 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
670 |
+
"""
|
671 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
672 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
673 |
+
"""
|
674 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
675 |
+
if n_rep == 1:
|
676 |
+
return hidden_states
|
677 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
678 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
679 |
+
|
680 |
+
|
681 |
+
class MistralAttention(nn.Module):
|
682 |
+
"""
|
683 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
684 |
+
and "Generating Long Sequences with Sparse Transformers".
|
685 |
+
"""
|
686 |
+
|
687 |
+
def __init__(self, config: VMistralConfig, qk_layer_norms: bool = False):
|
688 |
+
super().__init__()
|
689 |
+
self.config = config
|
690 |
+
self.hidden_size = config.hidden_size
|
691 |
+
self.num_heads = config.num_attention_heads
|
692 |
+
self.head_dim = self.hidden_size // self.num_heads
|
693 |
+
self.num_key_value_heads = config.num_key_value_heads
|
694 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
695 |
+
self.max_position_embeddings = config.max_position_embeddings
|
696 |
+
self.rope_theta = config.rope_theta
|
697 |
+
self.is_causal = True
|
698 |
+
|
699 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
700 |
+
raise ValueError(
|
701 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
702 |
+
f" and `num_heads`: {self.num_heads})."
|
703 |
+
)
|
704 |
+
|
705 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
706 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
707 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
708 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
709 |
+
|
710 |
+
self.qk_layer_norms = qk_layer_norms
|
711 |
+
if self.qk_layer_norms:
|
712 |
+
self.q_layer_norm = MistralRMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
713 |
+
self.k_layer_norm = MistralRMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
714 |
+
|
715 |
+
self.rotary_emb = MistralRotaryEmbedding(
|
716 |
+
self.head_dim,
|
717 |
+
max_position_embeddings=self.max_position_embeddings,
|
718 |
+
base=self.rope_theta,
|
719 |
+
)
|
720 |
+
self.attention_dropout = config.attention_dropout
|
721 |
+
|
722 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
723 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
724 |
+
|
725 |
+
def forward(
|
726 |
+
self,
|
727 |
+
hidden_states: torch.Tensor,
|
728 |
+
key_value_states: Optional[torch.Tensor] = None,
|
729 |
+
attention_mask: Optional[torch.Tensor] = None,
|
730 |
+
position_ids: Optional[torch.LongTensor] = None,
|
731 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
732 |
+
output_attentions: bool = False,
|
733 |
+
use_cache: bool = False,
|
734 |
+
**kwargs,
|
735 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
736 |
+
if "padding_mask" in kwargs:
|
737 |
+
warnings.warn(
|
738 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use"
|
739 |
+
" `attention_mask` instead.`"
|
740 |
+
)
|
741 |
+
|
742 |
+
bsz, q_len, _ = hidden_states.size()
|
743 |
+
|
744 |
+
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
745 |
+
key_states = (
|
746 |
+
self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
747 |
+
)
|
748 |
+
value_states = (
|
749 |
+
self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
750 |
+
)
|
751 |
+
|
752 |
+
kv_seq_len = key_states.shape[-2]
|
753 |
+
if past_key_value is not None:
|
754 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
755 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
756 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
757 |
+
|
758 |
+
if past_key_value is not None:
|
759 |
+
# reuse k, v, self_attention
|
760 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
761 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
762 |
+
|
763 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
764 |
+
|
765 |
+
if self.qk_layer_norms:
|
766 |
+
query_states = self.q_layer_norm(query_states)
|
767 |
+
key_states = self.k_layer_norm(key_states)
|
768 |
+
|
769 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
770 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
771 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
772 |
+
|
773 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
774 |
+
|
775 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
776 |
+
raise ValueError(
|
777 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
778 |
+
f" {attn_weights.size()}"
|
779 |
+
)
|
780 |
+
|
781 |
+
if attention_mask is not None:
|
782 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
783 |
+
raise ValueError(
|
784 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
785 |
+
)
|
786 |
+
|
787 |
+
attn_weights = attn_weights + attention_mask
|
788 |
+
|
789 |
+
# upcast attention to fp32
|
790 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
791 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
792 |
+
|
793 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
794 |
+
raise ValueError(
|
795 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
796 |
+
f" {attn_output.size()}"
|
797 |
+
)
|
798 |
+
|
799 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
800 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
801 |
+
|
802 |
+
attn_output = self.o_proj(attn_output)
|
803 |
+
|
804 |
+
if not output_attentions:
|
805 |
+
attn_weights = None
|
806 |
+
|
807 |
+
return attn_output, attn_weights, past_key_value
|
808 |
+
|
809 |
+
|
810 |
+
class MistralFlashAttention2(MistralAttention):
|
811 |
+
"""
|
812 |
+
Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays
|
813 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
814 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
815 |
+
"""
|
816 |
+
|
817 |
+
def forward(
|
818 |
+
self,
|
819 |
+
hidden_states: torch.Tensor,
|
820 |
+
attention_mask: Optional[torch.Tensor] = None,
|
821 |
+
position_ids: Optional[torch.LongTensor] = None,
|
822 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
823 |
+
output_attentions: bool = False,
|
824 |
+
use_cache: bool = False,
|
825 |
+
**kwargs,
|
826 |
+
):
|
827 |
+
if "padding_mask" in kwargs:
|
828 |
+
warnings.warn(
|
829 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use"
|
830 |
+
" `attention_mask` instead.`"
|
831 |
+
)
|
832 |
+
|
833 |
+
# overwrite attention_mask with padding_mask
|
834 |
+
attention_mask = kwargs.pop("padding_mask")
|
835 |
+
bsz, q_len, _ = hidden_states.size()
|
836 |
+
|
837 |
+
query_states = self.q_proj(hidden_states)
|
838 |
+
key_states = self.k_proj(hidden_states)
|
839 |
+
value_states = self.v_proj(hidden_states)
|
840 |
+
|
841 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
842 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
843 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
844 |
+
|
845 |
+
kv_seq_len = key_states.shape[-2]
|
846 |
+
if past_key_value is not None:
|
847 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
848 |
+
|
849 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
850 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
851 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
852 |
+
|
853 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
854 |
+
|
855 |
+
use_sliding_windows = (
|
856 |
+
_flash_supports_window_size
|
857 |
+
and hasattr(self.config, "sliding_window") is not None
|
858 |
+
and kv_seq_len > self.config.sliding_window
|
859 |
+
)
|
860 |
+
|
861 |
+
if not _flash_supports_window_size:
|
862 |
+
logger.warning_once(
|
863 |
+
"The current flash attention version does not support sliding window attention, for a more memory"
|
864 |
+
" efficient implementation make sure to upgrade flash-attn library."
|
865 |
+
)
|
866 |
+
|
867 |
+
if past_key_value is not None:
|
868 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
869 |
+
if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window:
|
870 |
+
slicing_tokens = kv_seq_len - self.config.sliding_window
|
871 |
+
|
872 |
+
past_key = past_key_value[0]
|
873 |
+
past_value = past_key_value[1]
|
874 |
+
|
875 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
876 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
877 |
+
|
878 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
879 |
+
raise ValueError(
|
880 |
+
"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1,"
|
881 |
+
f" head_dim`), got {past_key.shape}"
|
882 |
+
)
|
883 |
+
|
884 |
+
past_key_value = (past_key, past_value)
|
885 |
+
|
886 |
+
if attention_mask is not None:
|
887 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
888 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
889 |
+
|
890 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
891 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
892 |
+
|
893 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
894 |
+
|
895 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
896 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
897 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
898 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
899 |
+
|
900 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
901 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
902 |
+
# cast them back in float16 just to be sure everything works as expected.
|
903 |
+
input_dtype = query_states.dtype
|
904 |
+
if input_dtype == torch.float32:
|
905 |
+
# Handle the case where the model is quantized
|
906 |
+
if hasattr(self.config, "_pre_quantization_dtype"):
|
907 |
+
target_dtype = self.config._pre_quantization_dtype
|
908 |
+
else:
|
909 |
+
target_dtype = self.q_proj.weight.dtype
|
910 |
+
|
911 |
+
logger.warning_once(
|
912 |
+
"The input hidden states seems to be silently casted in float32, this might be related to the fact"
|
913 |
+
" you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
914 |
+
f" {target_dtype}."
|
915 |
+
)
|
916 |
+
|
917 |
+
query_states = query_states.to(target_dtype)
|
918 |
+
key_states = key_states.to(target_dtype)
|
919 |
+
value_states = value_states.to(target_dtype)
|
920 |
+
|
921 |
+
# Reashape to the expected shape for Flash Attention
|
922 |
+
query_states = query_states.transpose(1, 2)
|
923 |
+
key_states = key_states.transpose(1, 2)
|
924 |
+
value_states = value_states.transpose(1, 2)
|
925 |
+
|
926 |
+
attn_output = self._flash_attention_forward(
|
927 |
+
query_states,
|
928 |
+
key_states,
|
929 |
+
value_states,
|
930 |
+
attention_mask,
|
931 |
+
q_len,
|
932 |
+
dropout=dropout_rate,
|
933 |
+
use_sliding_windows=use_sliding_windows,
|
934 |
+
)
|
935 |
+
|
936 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
937 |
+
attn_output = self.o_proj(attn_output)
|
938 |
+
|
939 |
+
if not output_attentions:
|
940 |
+
attn_weights = None
|
941 |
+
|
942 |
+
return attn_output, attn_weights, past_key_value
|
943 |
+
|
944 |
+
def _flash_attention_forward(
|
945 |
+
self,
|
946 |
+
query_states,
|
947 |
+
key_states,
|
948 |
+
value_states,
|
949 |
+
attention_mask,
|
950 |
+
query_length,
|
951 |
+
dropout=0.0,
|
952 |
+
softmax_scale=None,
|
953 |
+
use_sliding_windows=False,
|
954 |
+
):
|
955 |
+
"""
|
956 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
957 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
958 |
+
|
959 |
+
Args:
|
960 |
+
query_states (`torch.Tensor`):
|
961 |
+
Input query states to be passed to Flash Attention API
|
962 |
+
key_states (`torch.Tensor`):
|
963 |
+
Input key states to be passed to Flash Attention API
|
964 |
+
value_states (`torch.Tensor`):
|
965 |
+
Input value states to be passed to Flash Attention API
|
966 |
+
attention_mask (`torch.Tensor`):
|
967 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
968 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
969 |
+
dropout (`int`, *optional*):
|
970 |
+
Attention dropout
|
971 |
+
softmax_scale (`float`, *optional*):
|
972 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
973 |
+
use_sliding_windows (`bool`, *optional*):
|
974 |
+
Whether to activate sliding window attention.
|
975 |
+
"""
|
976 |
+
# Contains at least one padding token in the sequence
|
977 |
+
if attention_mask is not None:
|
978 |
+
batch_size = query_states.shape[0]
|
979 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
980 |
+
query_states, key_states, value_states, attention_mask, query_length
|
981 |
+
)
|
982 |
+
|
983 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
984 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
985 |
+
|
986 |
+
if not use_sliding_windows:
|
987 |
+
attn_output_unpad = flash_attn_varlen_func(
|
988 |
+
query_states,
|
989 |
+
key_states,
|
990 |
+
value_states,
|
991 |
+
cu_seqlens_q=cu_seqlens_q,
|
992 |
+
cu_seqlens_k=cu_seqlens_k,
|
993 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
994 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
995 |
+
dropout_p=dropout,
|
996 |
+
softmax_scale=softmax_scale,
|
997 |
+
causal=self.is_causal,
|
998 |
+
)
|
999 |
+
else:
|
1000 |
+
attn_output_unpad = flash_attn_varlen_func(
|
1001 |
+
query_states,
|
1002 |
+
key_states,
|
1003 |
+
value_states,
|
1004 |
+
cu_seqlens_q=cu_seqlens_q,
|
1005 |
+
cu_seqlens_k=cu_seqlens_k,
|
1006 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
1007 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
1008 |
+
dropout_p=dropout,
|
1009 |
+
softmax_scale=softmax_scale,
|
1010 |
+
causal=self.is_causal,
|
1011 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
1012 |
+
)
|
1013 |
+
|
1014 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
1015 |
+
else:
|
1016 |
+
if not use_sliding_windows:
|
1017 |
+
attn_output = flash_attn_func(
|
1018 |
+
query_states,
|
1019 |
+
key_states,
|
1020 |
+
value_states,
|
1021 |
+
dropout,
|
1022 |
+
softmax_scale=softmax_scale,
|
1023 |
+
causal=self.is_causal,
|
1024 |
+
)
|
1025 |
+
else:
|
1026 |
+
attn_output = flash_attn_func(
|
1027 |
+
query_states,
|
1028 |
+
key_states,
|
1029 |
+
value_states,
|
1030 |
+
dropout,
|
1031 |
+
softmax_scale=softmax_scale,
|
1032 |
+
causal=self.is_causal,
|
1033 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
1034 |
+
)
|
1035 |
+
|
1036 |
+
return attn_output
|
1037 |
+
|
1038 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
1039 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
1040 |
+
|
1041 |
+
# On the first iteration we need to properly re-create the padding mask
|
1042 |
+
# by slicing it on the proper place
|
1043 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
1044 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
1045 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
1046 |
+
|
1047 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
1048 |
+
|
1049 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
1050 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
1051 |
+
|
1052 |
+
if query_length == kv_seq_len:
|
1053 |
+
query_layer = index_first_axis(
|
1054 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
1055 |
+
)
|
1056 |
+
cu_seqlens_q = cu_seqlens_k
|
1057 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
1058 |
+
indices_q = indices_k
|
1059 |
+
elif query_length == 1:
|
1060 |
+
max_seqlen_in_batch_q = 1
|
1061 |
+
cu_seqlens_q = torch.arange(
|
1062 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
1063 |
+
) # There is a memcpy here, that is very bad.
|
1064 |
+
indices_q = cu_seqlens_q[:-1]
|
1065 |
+
query_layer = query_layer.squeeze(1)
|
1066 |
+
else:
|
1067 |
+
# The -q_len: slice assumes left padding.
|
1068 |
+
attention_mask = attention_mask[:, -query_length:]
|
1069 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
1070 |
+
|
1071 |
+
return (
|
1072 |
+
query_layer,
|
1073 |
+
key_layer,
|
1074 |
+
value_layer,
|
1075 |
+
indices_q,
|
1076 |
+
(cu_seqlens_q, cu_seqlens_k),
|
1077 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
1078 |
+
)
|
1079 |
+
|
1080 |
+
|
1081 |
+
class MistralDecoderLayer(nn.Module):
|
1082 |
+
def __init__(self, config: VMistralConfig):
|
1083 |
+
super().__init__()
|
1084 |
+
self.hidden_size = config.hidden_size
|
1085 |
+
self.self_attn = (
|
1086 |
+
MistralAttention(config=config)
|
1087 |
+
if not getattr(config, "_flash_attn_2_enabled", False)
|
1088 |
+
else MistralFlashAttention2(config)
|
1089 |
+
)
|
1090 |
+
self.mlp = MistralMLP(config)
|
1091 |
+
self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1092 |
+
self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1093 |
+
|
1094 |
+
def forward(
|
1095 |
+
self,
|
1096 |
+
hidden_states: torch.Tensor,
|
1097 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1098 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1099 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
1100 |
+
output_attentions: Optional[bool] = False,
|
1101 |
+
use_cache: Optional[bool] = False,
|
1102 |
+
**kwargs,
|
1103 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
1104 |
+
if "padding_mask" in kwargs:
|
1105 |
+
warnings.warn(
|
1106 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use"
|
1107 |
+
" `attention_mask` instead.`"
|
1108 |
+
)
|
1109 |
+
"""
|
1110 |
+
Args:
|
1111 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
1112 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
1113 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
1114 |
+
output_attentions (`bool`, *optional*):
|
1115 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
1116 |
+
returned tensors for more detail.
|
1117 |
+
use_cache (`bool`, *optional*):
|
1118 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
1119 |
+
(see `past_key_values`).
|
1120 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
1121 |
+
"""
|
1122 |
+
|
1123 |
+
residual = hidden_states
|
1124 |
+
|
1125 |
+
hidden_states = self.input_layernorm(hidden_states)
|
1126 |
+
|
1127 |
+
# Self Attention
|
1128 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
1129 |
+
hidden_states=hidden_states,
|
1130 |
+
attention_mask=attention_mask,
|
1131 |
+
position_ids=position_ids,
|
1132 |
+
past_key_value=past_key_value,
|
1133 |
+
output_attentions=output_attentions,
|
1134 |
+
use_cache=use_cache,
|
1135 |
+
)
|
1136 |
+
hidden_states = residual + hidden_states
|
1137 |
+
|
1138 |
+
# Fully Connected
|
1139 |
+
residual = hidden_states
|
1140 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
1141 |
+
hidden_states = self.mlp(hidden_states)
|
1142 |
+
hidden_states = residual + hidden_states
|
1143 |
+
|
1144 |
+
outputs = (hidden_states,)
|
1145 |
+
|
1146 |
+
if output_attentions:
|
1147 |
+
outputs += (self_attn_weights,)
|
1148 |
+
|
1149 |
+
if use_cache:
|
1150 |
+
outputs += (present_key_value,)
|
1151 |
+
|
1152 |
+
return outputs
|
1153 |
+
|
1154 |
+
|
1155 |
+
MISTRAL_START_DOCSTRING = r"""
|
1156 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
1157 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
1158 |
+
etc.)
|
1159 |
+
|
1160 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
1161 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
1162 |
+
and behavior.
|
1163 |
+
|
1164 |
+
Parameters:
|
1165 |
+
config ([`VMistralConfig`]):
|
1166 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
1167 |
+
load the weights associated with the model, only the configuration. Check out the
|
1168 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
1169 |
+
"""
|
1170 |
+
|
1171 |
+
|
1172 |
+
@add_start_docstrings(
|
1173 |
+
"The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
1174 |
+
MISTRAL_START_DOCSTRING,
|
1175 |
+
)
|
1176 |
+
class VMistralPreTrainedModel(PreTrainedModel):
|
1177 |
+
config_class = VMistralConfig
|
1178 |
+
base_model_prefix = "model"
|
1179 |
+
supports_gradient_checkpointing = True
|
1180 |
+
_no_split_modules = ["MistralDecoderLayer"]
|
1181 |
+
_skip_keys_device_placement = "past_key_values"
|
1182 |
+
_supports_sdpa = False
|
1183 |
+
|
1184 |
+
def _init_weights(self, module):
|
1185 |
+
# important: this ported version of the model isn't meant for training from scratch - only
|
1186 |
+
# inference and fine-tuning - so the proper init weights code has been removed - the m4 code
|
1187 |
+
# base should be used for training from scratch and it contains the correct code.
|
1188 |
+
std = self.config.initializer_range
|
1189 |
+
if isinstance(module, nn.Linear):
|
1190 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
1191 |
+
if module.bias is not None:
|
1192 |
+
module.bias.data.zero_()
|
1193 |
+
elif isinstance(module, nn.Embedding):
|
1194 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
1195 |
+
if module.padding_idx is not None:
|
1196 |
+
module.weight.data[module.padding_idx].zero_()
|
1197 |
+
|
1198 |
+
# @classmethod
|
1199 |
+
# def override_vision_model_wrapper(cls, model, config, vision_model_name, vision_model_params, torch_dtype):
|
1200 |
+
# # this can be called via from_pretrained from a class w/ head or w/o head so we extract the beheaded model version
|
1201 |
+
# beheaded_model = model.model if hasattr(model, "model") else model
|
1202 |
+
# cls.override_vision_model(beheaded_model, vision_model_name, vision_model_params, torch_dtype)
|
1203 |
+
# beheaded_model.freeze_relevant_params(config)
|
1204 |
+
|
1205 |
+
|
1206 |
+
MISTRAL_INPUTS_DOCSTRING = r"""
|
1207 |
+
Args:
|
1208 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1209 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
1210 |
+
it.
|
1211 |
+
|
1212 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1213 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1214 |
+
|
1215 |
+
[What are input IDs?](../glossary#input-ids)
|
1216 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1217 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1218 |
+
|
1219 |
+
- 1 for tokens that are **not masked**,
|
1220 |
+
- 0 for tokens that are **masked**.
|
1221 |
+
|
1222 |
+
[What are attention masks?](../glossary#attention-mask)
|
1223 |
+
|
1224 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1225 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1226 |
+
|
1227 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
1228 |
+
`past_key_values`).
|
1229 |
+
|
1230 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
1231 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
1232 |
+
information on the default strategy.
|
1233 |
+
|
1234 |
+
- 1 indicates the head is **not masked**,
|
1235 |
+
- 0 indicates the head is **masked**.
|
1236 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1237 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
1238 |
+
config.n_positions - 1]`.
|
1239 |
+
|
1240 |
+
[What are position IDs?](../glossary#position-ids)
|
1241 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
1242 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
1243 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
1244 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
1245 |
+
|
1246 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
1247 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
1248 |
+
|
1249 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1250 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1251 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1252 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1253 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
1254 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
1255 |
+
model's internal embedding lookup matrix.
|
1256 |
+
use_cache (`bool`, *optional*):
|
1257 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1258 |
+
`past_key_values`).
|
1259 |
+
output_attentions (`bool`, *optional*):
|
1260 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
1261 |
+
tensors for more detail.
|
1262 |
+
output_hidden_states (`bool`, *optional*):
|
1263 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
1264 |
+
more detail.
|
1265 |
+
return_dict (`bool`, *optional*):
|
1266 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1267 |
+
"""
|
1268 |
+
|
1269 |
+
|
1270 |
+
@add_start_docstrings(
|
1271 |
+
"The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
1272 |
+
MISTRAL_START_DOCSTRING,
|
1273 |
+
)
|
1274 |
+
class VMistralModel(VMistralPreTrainedModel):
|
1275 |
+
"""
|
1276 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
|
1277 |
+
|
1278 |
+
Args:
|
1279 |
+
config: VMistralConfig
|
1280 |
+
"""
|
1281 |
+
|
1282 |
+
def __init__(self, config: VMistralConfig, vision_model=None):
|
1283 |
+
super().__init__(config)
|
1284 |
+
self.config = config
|
1285 |
+
self.padding_idx = config.pad_token_id
|
1286 |
+
self.vocab_size = config.vocab_size
|
1287 |
+
|
1288 |
+
self.sliding_window = config.sliding_window
|
1289 |
+
|
1290 |
+
self.embed_tokens = DecoupledEmbedding(
|
1291 |
+
num_embeddings=config.vocab_size,
|
1292 |
+
num_additional_embeddings=config.additional_vocab_size,
|
1293 |
+
embedding_dim=config.hidden_size,
|
1294 |
+
partially_freeze=config.freeze_text_layers,
|
1295 |
+
padding_idx=self.padding_idx,
|
1296 |
+
)
|
1297 |
+
|
1298 |
+
# Load an uninitialized model and later in from_pretrained will load the pre-trained model -
|
1299 |
+
# this solves the losing of weights in `from_pretrained` on the main model
|
1300 |
+
self.vision_model = SiglipVisionModel(config.vision_config)
|
1301 |
+
|
1302 |
+
# Dim projection - projecting from the vision dim to the text dim
|
1303 |
+
self.modality_projection = ModalityProjection(
|
1304 |
+
embed_dim_in=self.config.vision_config.hidden_size, embed_dim_out=self.config.hidden_size
|
1305 |
+
)
|
1306 |
+
|
1307 |
+
# Perceiver Resampler
|
1308 |
+
if config.use_resampler:
|
1309 |
+
self.perceiver_resampler = PerceiverResampler(
|
1310 |
+
config.hidden_size,
|
1311 |
+
config.perceiver_config.resampler_depth,
|
1312 |
+
config.perceiver_config.resampler_n_heads,
|
1313 |
+
config.perceiver_config.resampler_head_dim,
|
1314 |
+
config.perceiver_config.resampler_n_latents,
|
1315 |
+
config.perceiver_config.qk_layer_norms_perceiver,
|
1316 |
+
)
|
1317 |
+
|
1318 |
+
if config.use_resampler:
|
1319 |
+
self.image_seq_len = config.perceiver_config.resampler_n_latents
|
1320 |
+
else:
|
1321 |
+
self.image_seq_len = (
|
1322 |
+
config.vision_config.image_size // config.vision_config.patch_size
|
1323 |
+
) ** 2 # TODO: pretty sure that does not work for CLIP models since there is the CLS token
|
1324 |
+
self.image_token_id = self.config.image_token_id
|
1325 |
+
|
1326 |
+
self.layers = nn.ModuleList([MistralDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
1327 |
+
|
1328 |
+
self.gradient_checkpointing = False
|
1329 |
+
|
1330 |
+
self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1331 |
+
|
1332 |
+
# Initialize weights and apply final processing
|
1333 |
+
self.post_init()
|
1334 |
+
|
1335 |
+
self.freeze_relevant_params(config)
|
1336 |
+
|
1337 |
+
def freeze_relevant_params(self, config=None):
|
1338 |
+
if config is None:
|
1339 |
+
config = self.config
|
1340 |
+
|
1341 |
+
if config.freeze_text_layers:
|
1342 |
+
self.freeze_text_layers(config.freeze_text_module_exceptions)
|
1343 |
+
|
1344 |
+
if config.freeze_vision_layers:
|
1345 |
+
freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
|
1346 |
+
|
1347 |
+
def freeze_text_layers(self, module_exceptions):
|
1348 |
+
for module in [self.layers, self.norm]:
|
1349 |
+
freeze_model(module, module_exceptions=module_exceptions)
|
1350 |
+
|
1351 |
+
def get_input_embeddings(self):
|
1352 |
+
return self.embed_tokens
|
1353 |
+
|
1354 |
+
def set_input_embeddings(self, value):
|
1355 |
+
self.embed_tokens = value
|
1356 |
+
|
1357 |
+
def inputs_merger(
|
1358 |
+
self,
|
1359 |
+
input_ids: torch.LongTensor = None,
|
1360 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1361 |
+
image_hidden_states: Optional[torch.Tensor] = None,
|
1362 |
+
):
|
1363 |
+
"""
|
1364 |
+
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
|
1365 |
+
The merging happens as follows:
|
1366 |
+
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
|
1367 |
+
- We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
|
1368 |
+
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
|
1369 |
+
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
|
1370 |
+
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
|
1371 |
+
"""
|
1372 |
+
batch_size = input_ids.size(0)
|
1373 |
+
|
1374 |
+
if inputs_embeds is not None:
|
1375 |
+
new_inputs_embeds = inputs_embeds.clone()
|
1376 |
+
|
1377 |
+
if image_hidden_states is not None:
|
1378 |
+
vision_pipeline_output_seq_len = image_hidden_states.shape[1]
|
1379 |
+
vision_hidden_size = image_hidden_states.shape[2]
|
1380 |
+
# Get the number of images for each example
|
1381 |
+
num_images = (input_ids == self.image_token_id).sum(dim=-1) // self.image_seq_len
|
1382 |
+
cum_num_images = num_images.cumsum(dim=-1)
|
1383 |
+
for batch_idx in range(batch_size):
|
1384 |
+
# Get the number of images for this particular example
|
1385 |
+
example_num_images = num_images[batch_idx]
|
1386 |
+
# Get the image_hidden_states corresponding to True images for the example, so get rid of the padding images.
|
1387 |
+
start = 0 if batch_idx == 0 else cum_num_images[batch_idx - 1]
|
1388 |
+
end = cum_num_images[batch_idx]
|
1389 |
+
example_true_image_hidden_states = image_hidden_states[start:end]
|
1390 |
+
if (
|
1391 |
+
new_inputs_embeds[batch_idx][input_ids[batch_idx] == self.image_token_id].shape[0]
|
1392 |
+
!= example_num_images * vision_pipeline_output_seq_len
|
1393 |
+
):
|
1394 |
+
raise ValueError(
|
1395 |
+
"new_inputs_embeds to replace has shape[0]:"
|
1396 |
+
f" {new_inputs_embeds[batch_idx][input_ids[batch_idx] == self.image_token_id].shape[0]} but"
|
1397 |
+
" should have shape[0]:"
|
1398 |
+
f" {example_num_images}*{vision_pipeline_output_seq_len}={example_num_images * vision_pipeline_output_seq_len} "
|
1399 |
+
)
|
1400 |
+
# Insert the image_hidden_states
|
1401 |
+
new_inputs_embeds[batch_idx][input_ids[batch_idx] == self.image_token_id] = (
|
1402 |
+
example_true_image_hidden_states.view(
|
1403 |
+
example_num_images * vision_pipeline_output_seq_len,
|
1404 |
+
vision_hidden_size,
|
1405 |
+
)
|
1406 |
+
)
|
1407 |
+
|
1408 |
+
return_dict = {}
|
1409 |
+
if inputs_embeds is not None:
|
1410 |
+
return_dict["inputs_embeds"] = new_inputs_embeds
|
1411 |
+
|
1412 |
+
return return_dict
|
1413 |
+
|
1414 |
+
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
1415 |
+
def forward(
|
1416 |
+
self,
|
1417 |
+
input_ids: torch.LongTensor = None,
|
1418 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1419 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1420 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1421 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1422 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1423 |
+
image_hidden_states: Optional[torch.FloatTensor] = None,
|
1424 |
+
use_cache: Optional[bool] = None,
|
1425 |
+
output_attentions: Optional[bool] = None,
|
1426 |
+
output_hidden_states: Optional[bool] = None,
|
1427 |
+
return_dict: Optional[bool] = None,
|
1428 |
+
) -> Union[Tuple, VMistralBaseModelOutputWithPast]:
|
1429 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1430 |
+
|
1431 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1432 |
+
output_hidden_states = (
|
1433 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1434 |
+
)
|
1435 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1436 |
+
|
1437 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1438 |
+
|
1439 |
+
# retrieve input_ids and inputs_embeds
|
1440 |
+
if input_ids is not None and inputs_embeds is not None:
|
1441 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
1442 |
+
elif input_ids is not None:
|
1443 |
+
batch_size, seq_length = input_ids.shape
|
1444 |
+
elif inputs_embeds is not None:
|
1445 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
1446 |
+
else:
|
1447 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
1448 |
+
|
1449 |
+
seq_length_with_past = seq_length
|
1450 |
+
past_key_values_length = 0
|
1451 |
+
|
1452 |
+
if past_key_values is not None:
|
1453 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
1454 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
1455 |
+
|
1456 |
+
if position_ids is None:
|
1457 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1458 |
+
position_ids = torch.arange(
|
1459 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
1460 |
+
)
|
1461 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
1462 |
+
else:
|
1463 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
1464 |
+
|
1465 |
+
if inputs_embeds is None:
|
1466 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1467 |
+
|
1468 |
+
# START VISUAL INPUTS INTEGRATION
|
1469 |
+
if pixel_values is not None and image_hidden_states is not None:
|
1470 |
+
raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
|
1471 |
+
elif pixel_values is not None:
|
1472 |
+
pixel_values = pixel_values.to(dtype=self.dtype, device=input_ids.device) # fp16 compatibility
|
1473 |
+
batch_size, num_images = pixel_values.size(0), pixel_values.size(1)
|
1474 |
+
pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
|
1475 |
+
# Remove padding images - padding images are full 0.
|
1476 |
+
real_images_inds = pixel_values.sum(dim=(-1, -2, -3)) != 0.0
|
1477 |
+
pixel_values = pixel_values[real_images_inds]
|
1478 |
+
# Get sequence from the vision encoder
|
1479 |
+
image_hidden_states = self.vision_model(pixel_values=pixel_values).last_hidden_state
|
1480 |
+
|
1481 |
+
# Modality projection
|
1482 |
+
image_hidden_states = self.modality_projection(image_hidden_states)
|
1483 |
+
|
1484 |
+
if self.config.use_resampler:
|
1485 |
+
image_hidden_states = self.perceiver_resampler(image_hidden_states)
|
1486 |
+
elif image_hidden_states is not None:
|
1487 |
+
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
|
1488 |
+
|
1489 |
+
if past_key_values is None:
|
1490 |
+
# When we generate, we don't want to replace the potential image_token_id that we generated by images
|
1491 |
+
# that simply don't exist
|
1492 |
+
new_inp = self.inputs_merger(
|
1493 |
+
input_ids=input_ids,
|
1494 |
+
inputs_embeds=inputs_embeds,
|
1495 |
+
image_hidden_states=image_hidden_states,
|
1496 |
+
)
|
1497 |
+
inputs_embeds = new_inp["inputs_embeds"]
|
1498 |
+
|
1499 |
+
# Can do add some token types embeddings here (image token vs text token)
|
1500 |
+
# something like inputs_embeds += self.token_types(token_types)
|
1501 |
+
|
1502 |
+
# embed positions
|
1503 |
+
if (
|
1504 |
+
attention_mask is not None
|
1505 |
+
and hasattr(self.config, "_flash_attn_2_enabled")
|
1506 |
+
and self.config._flash_attn_2_enabled
|
1507 |
+
and past_key_values is not None
|
1508 |
+
):
|
1509 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
1510 |
+
if is_padding_right:
|
1511 |
+
raise ValueError(
|
1512 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
1513 |
+
" this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
|
1514 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
1515 |
+
)
|
1516 |
+
|
1517 |
+
if getattr(self.config, "_flash_attn_2_enabled", False):
|
1518 |
+
# 2d mask is passed through the layers
|
1519 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
1520 |
+
else:
|
1521 |
+
# 4d mask is passed through the layers
|
1522 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1523 |
+
attention_mask,
|
1524 |
+
(batch_size, seq_length),
|
1525 |
+
inputs_embeds,
|
1526 |
+
past_key_values_length,
|
1527 |
+
sliding_window=self.config.sliding_window,
|
1528 |
+
)
|
1529 |
+
attention_mask[attention_mask == -float("inf")] = torch.finfo(self.dtype).min
|
1530 |
+
|
1531 |
+
hidden_states = inputs_embeds
|
1532 |
+
|
1533 |
+
if self.gradient_checkpointing and self.training:
|
1534 |
+
if use_cache:
|
1535 |
+
logger.warning_once(
|
1536 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
1537 |
+
)
|
1538 |
+
use_cache = False
|
1539 |
+
|
1540 |
+
# decoder layers
|
1541 |
+
all_hidden_states = () if output_hidden_states else None
|
1542 |
+
all_self_attns = () if output_attentions else None
|
1543 |
+
next_decoder_cache = () if use_cache else None
|
1544 |
+
|
1545 |
+
for idx, decoder_layer in enumerate(self.layers):
|
1546 |
+
if output_hidden_states:
|
1547 |
+
all_hidden_states += (hidden_states,)
|
1548 |
+
|
1549 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
1550 |
+
|
1551 |
+
if self.gradient_checkpointing and self.training:
|
1552 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1553 |
+
decoder_layer.__call__,
|
1554 |
+
hidden_states,
|
1555 |
+
attention_mask,
|
1556 |
+
position_ids,
|
1557 |
+
past_key_value,
|
1558 |
+
output_attentions,
|
1559 |
+
use_cache,
|
1560 |
+
)
|
1561 |
+
else:
|
1562 |
+
layer_outputs = decoder_layer(
|
1563 |
+
hidden_states,
|
1564 |
+
attention_mask=attention_mask,
|
1565 |
+
position_ids=position_ids,
|
1566 |
+
past_key_value=past_key_value,
|
1567 |
+
output_attentions=output_attentions,
|
1568 |
+
use_cache=use_cache,
|
1569 |
+
)
|
1570 |
+
|
1571 |
+
hidden_states = layer_outputs[0]
|
1572 |
+
|
1573 |
+
if use_cache:
|
1574 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
1575 |
+
|
1576 |
+
if output_attentions:
|
1577 |
+
all_self_attns += (layer_outputs[1],)
|
1578 |
+
|
1579 |
+
hidden_states = self.norm(hidden_states)
|
1580 |
+
|
1581 |
+
# add hidden states from the last decoder layer
|
1582 |
+
if output_hidden_states:
|
1583 |
+
all_hidden_states += (hidden_states,)
|
1584 |
+
|
1585 |
+
next_cache = next_decoder_cache if use_cache else None
|
1586 |
+
if not return_dict:
|
1587 |
+
return tuple(
|
1588 |
+
v
|
1589 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
|
1590 |
+
if v is not None
|
1591 |
+
)
|
1592 |
+
return VMistralBaseModelOutputWithPast(
|
1593 |
+
last_hidden_state=hidden_states,
|
1594 |
+
past_key_values=next_cache,
|
1595 |
+
hidden_states=all_hidden_states,
|
1596 |
+
attentions=all_self_attns,
|
1597 |
+
image_hidden_states=image_hidden_states,
|
1598 |
+
)
|
1599 |
+
|
1600 |
+
|
1601 |
+
class VMistralForVisionText2Text(VMistralPreTrainedModel):
|
1602 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1603 |
+
|
1604 |
+
def __init__(self, config, vision_model=None):
|
1605 |
+
super().__init__(config)
|
1606 |
+
self.model = VMistralModel(config, vision_model=vision_model)
|
1607 |
+
self.image_token_id = self.config.image_token_id
|
1608 |
+
self.lm_head = DecoupledLinear(
|
1609 |
+
in_features=config.hidden_size,
|
1610 |
+
out_features=config.vocab_size,
|
1611 |
+
out_additional_features=config.additional_vocab_size,
|
1612 |
+
bias=False,
|
1613 |
+
partially_freeze=config.freeze_lm_head,
|
1614 |
+
)
|
1615 |
+
|
1616 |
+
# Initialize weights and apply final processing
|
1617 |
+
self.post_init()
|
1618 |
+
|
1619 |
+
def get_input_embeddings(self):
|
1620 |
+
return self.model.embed_tokens
|
1621 |
+
|
1622 |
+
def set_input_embeddings(self, value):
|
1623 |
+
self.model.embed_tokens = value
|
1624 |
+
|
1625 |
+
def get_output_embeddings(self):
|
1626 |
+
return self.lm_head
|
1627 |
+
|
1628 |
+
def set_output_embeddings(self, new_embeddings):
|
1629 |
+
self.lm_head = new_embeddings
|
1630 |
+
|
1631 |
+
def set_decoder(self, decoder):
|
1632 |
+
self.model = decoder
|
1633 |
+
|
1634 |
+
def get_decoder(self):
|
1635 |
+
return self.model
|
1636 |
+
|
1637 |
+
def tie_weights(self):
|
1638 |
+
"""
|
1639 |
+
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
|
1640 |
+
"""
|
1641 |
+
output_embeddings = self.get_output_embeddings()
|
1642 |
+
input_embeddings = self.get_input_embeddings()
|
1643 |
+
|
1644 |
+
if getattr(self.config, "tie_word_embeddings", True):
|
1645 |
+
output_embeddings.weight = input_embeddings.weight
|
1646 |
+
if input_embeddings.num_additional_embeddings > 0:
|
1647 |
+
assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
|
1648 |
+
output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
|
1649 |
+
|
1650 |
+
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
|
1651 |
+
output_embeddings.out_features = input_embeddings.num_embeddings
|
1652 |
+
if hasattr(output_embeddings, "out_additional_features") and hasattr(
|
1653 |
+
input_embeddings, "num_additional_embeddings"
|
1654 |
+
):
|
1655 |
+
output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
|
1656 |
+
|
1657 |
+
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
1658 |
+
@replace_return_docstrings(output_type=VMistralCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1659 |
+
def forward(
|
1660 |
+
self,
|
1661 |
+
input_ids: torch.LongTensor = None,
|
1662 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1663 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1664 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1665 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1666 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1667 |
+
image_hidden_states: Optional[torch.FloatTensor] = None,
|
1668 |
+
labels: Optional[torch.LongTensor] = None,
|
1669 |
+
use_cache: Optional[bool] = None,
|
1670 |
+
output_attentions: Optional[bool] = None,
|
1671 |
+
output_hidden_states: Optional[bool] = None,
|
1672 |
+
return_dict: Optional[bool] = None,
|
1673 |
+
) -> Union[Tuple, VMistralCausalLMOutputWithPast]:
|
1674 |
+
r"""
|
1675 |
+
Args:
|
1676 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1677 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1678 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1679 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1680 |
+
|
1681 |
+
Returns:
|
1682 |
+
|
1683 |
+
"""
|
1684 |
+
|
1685 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1686 |
+
output_hidden_states = (
|
1687 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1688 |
+
)
|
1689 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1690 |
+
|
1691 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1692 |
+
outputs = self.model(
|
1693 |
+
input_ids=input_ids,
|
1694 |
+
attention_mask=attention_mask,
|
1695 |
+
position_ids=position_ids,
|
1696 |
+
past_key_values=past_key_values,
|
1697 |
+
inputs_embeds=inputs_embeds,
|
1698 |
+
pixel_values=pixel_values,
|
1699 |
+
image_hidden_states=image_hidden_states,
|
1700 |
+
use_cache=use_cache,
|
1701 |
+
output_attentions=output_attentions,
|
1702 |
+
output_hidden_states=output_hidden_states,
|
1703 |
+
return_dict=return_dict,
|
1704 |
+
)
|
1705 |
+
|
1706 |
+
hidden_states = outputs[0]
|
1707 |
+
logits = self.lm_head(hidden_states)
|
1708 |
+
logits = logits.float()
|
1709 |
+
|
1710 |
+
loss = None
|
1711 |
+
if labels is not None:
|
1712 |
+
labels = labels.to(logits.device)
|
1713 |
+
# Shift so that tokens < n predict n
|
1714 |
+
if attention_mask is not None:
|
1715 |
+
shift_attention_mask = attention_mask[..., 1:].to(logits.device)
|
1716 |
+
shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
|
1717 |
+
shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
|
1718 |
+
else:
|
1719 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1720 |
+
shift_labels = labels[..., 1:].contiguous()
|
1721 |
+
# Flatten the tokens
|
1722 |
+
loss_fct = CrossEntropyLoss(ignore_index=self.image_token_id)
|
1723 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
1724 |
+
|
1725 |
+
if not return_dict:
|
1726 |
+
output = (logits,) + outputs[1:]
|
1727 |
+
return (loss,) + output if loss is not None else output
|
1728 |
+
|
1729 |
+
return VMistralCausalLMOutputWithPast(
|
1730 |
+
loss=loss,
|
1731 |
+
logits=logits,
|
1732 |
+
past_key_values=outputs.past_key_values,
|
1733 |
+
hidden_states=outputs.hidden_states,
|
1734 |
+
attentions=outputs.attentions,
|
1735 |
+
image_hidden_states=outputs.image_hidden_states,
|
1736 |
+
)
|
1737 |
+
|
1738 |
+
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
|
1739 |
+
image_hidden_states = kwargs.pop("image_hidden_states", None)
|
1740 |
+
if image_hidden_states is not None:
|
1741 |
+
kwargs["pixel_values"] = None
|
1742 |
+
inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
|
1743 |
+
unwanted_kwargs = ["token_type_ids"]
|
1744 |
+
for kwarg in unwanted_kwargs:
|
1745 |
+
inputs.pop(kwarg, None)
|
1746 |
+
return inputs
|
1747 |
+
|
1748 |
+
@staticmethod
|
1749 |
+
def _expand_inputs_for_generation(
|
1750 |
+
*args,
|
1751 |
+
**model_kwargs,
|
1752 |
+
):
|
1753 |
+
return expand_inputs_for_generation(*args, **model_kwargs)
|
1754 |
+
|
1755 |
+
@staticmethod
|
1756 |
+
def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder):
|
1757 |
+
return update_model_kwargs_for_generation(outputs, model_kwargs)
|
1758 |
+
|
1759 |
+
@staticmethod
|
1760 |
+
def _reorder_cache(past, beam_idx):
|
1761 |
+
reordered_past = ()
|
1762 |
+
for layer_past in past:
|
1763 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
1764 |
+
return reordered_past
|
preprocessor_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_map": {
|
3 |
+
"AutoProcessor": "IdeficsProcessor",
|
4 |
+
"AutoImageProcessor": "IdeficsImageProcessor"
|
5 |
+
},
|
6 |
+
"image_num_channels": 3,
|
7 |
+
"image_mean": [
|
8 |
+
0.5,
|
9 |
+
0.5,
|
10 |
+
0.5
|
11 |
+
],
|
12 |
+
"image_processor_type": "IdeficsImageProcessor",
|
13 |
+
"image_size": 960,
|
14 |
+
"image_std": [
|
15 |
+
0.5,
|
16 |
+
0.5,
|
17 |
+
0.5
|
18 |
+
],
|
19 |
+
"processor_class": "IdeficsProcessor"
|
20 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<unk>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<s>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"32000": {
|
28 |
+
"content": "<fake_token_around_image>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"32001": {
|
36 |
+
"content": "<image>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"additional_special_tokens": [],
|
45 |
+
"bos_token": "<s>",
|
46 |
+
"clean_up_tokenization_spaces": false,
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"legacy": false,
|
49 |
+
"model_max_length": 1000000000000000019884624838656,
|
50 |
+
"pad_token": "<unk>",
|
51 |
+
"sp_model_kwargs": {},
|
52 |
+
"spaces_between_special_tokens": false,
|
53 |
+
"tokenizer_class": "LlamaTokenizer",
|
54 |
+
"unk_token": "<unk>",
|
55 |
+
"use_default_system_prompt": true
|
56 |
+
}
|
vision.py
ADDED
@@ -0,0 +1,652 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Google AI and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" A simplified copy of https://huggingface.co/HuggingFaceM4/siglip-so400m-14-384-flash-attn2 """
|
16 |
+
|
17 |
+
|
18 |
+
from dataclasses import dataclass
|
19 |
+
from typing import Any, Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.nn.functional as F
|
23 |
+
import torch.utils.checkpoint
|
24 |
+
from torch import nn
|
25 |
+
from transformers.activations import ACT2FN
|
26 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
27 |
+
from transformers.utils import (
|
28 |
+
ModelOutput,
|
29 |
+
is_flash_attn_2_available,
|
30 |
+
logging,)
|
31 |
+
|
32 |
+
from .configuration_vmistral import VMistralVisionConfig
|
33 |
+
|
34 |
+
|
35 |
+
logger = logging.get_logger(__name__)
|
36 |
+
|
37 |
+
|
38 |
+
if is_flash_attn_2_available():
|
39 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
40 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
41 |
+
|
42 |
+
|
43 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
44 |
+
def _get_unpad_data(attention_mask):
|
45 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
46 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
47 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
48 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
49 |
+
return (
|
50 |
+
indices,
|
51 |
+
cu_seqlens,
|
52 |
+
max_seqlen_in_batch,
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
@dataclass
|
57 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
|
58 |
+
class SiglipVisionModelOutput(ModelOutput):
|
59 |
+
"""
|
60 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
64 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
65 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
66 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
67 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
68 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
69 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
70 |
+
|
71 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
72 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
73 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
74 |
+
sequence_length)`.
|
75 |
+
|
76 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
77 |
+
heads.
|
78 |
+
"""
|
79 |
+
|
80 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
81 |
+
last_hidden_state: torch.FloatTensor = None
|
82 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
83 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
84 |
+
|
85 |
+
|
86 |
+
class SiglipVisionEmbeddings(nn.Module):
|
87 |
+
def __init__(self, config: VMistralVisionConfig):
|
88 |
+
super().__init__()
|
89 |
+
self.config = config
|
90 |
+
self.embed_dim = config.hidden_size
|
91 |
+
self.image_size = config.image_size
|
92 |
+
self.patch_size = config.patch_size
|
93 |
+
|
94 |
+
self.patch_embedding = nn.Conv2d(
|
95 |
+
in_channels=config.num_channels,
|
96 |
+
out_channels=self.embed_dim,
|
97 |
+
kernel_size=self.patch_size,
|
98 |
+
stride=self.patch_size,
|
99 |
+
padding="valid",
|
100 |
+
)
|
101 |
+
|
102 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
103 |
+
self.num_positions = self.num_patches
|
104 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
105 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
106 |
+
|
107 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
108 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
|
109 |
+
embeddings = patch_embeds.flatten(2).transpose(1, 2)
|
110 |
+
|
111 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
112 |
+
return embeddings
|
113 |
+
|
114 |
+
|
115 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Siglip
|
116 |
+
class SiglipAttention(nn.Module):
|
117 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
118 |
+
|
119 |
+
def __init__(self, config):
|
120 |
+
super().__init__()
|
121 |
+
self.config = config
|
122 |
+
self.embed_dim = config.hidden_size
|
123 |
+
self.num_heads = config.num_attention_heads
|
124 |
+
self.head_dim = self.embed_dim // self.num_heads
|
125 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
126 |
+
raise ValueError(
|
127 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
128 |
+
f" {self.num_heads})."
|
129 |
+
)
|
130 |
+
self.scale = self.head_dim**-0.5
|
131 |
+
self.dropout = config.attention_dropout
|
132 |
+
|
133 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
134 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
135 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
136 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
137 |
+
|
138 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
139 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
140 |
+
|
141 |
+
def forward(
|
142 |
+
self,
|
143 |
+
hidden_states: torch.Tensor,
|
144 |
+
attention_mask: Optional[torch.Tensor] = None,
|
145 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
146 |
+
output_attentions: Optional[bool] = False,
|
147 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
148 |
+
"""Input shape: Batch x Time x Channel"""
|
149 |
+
|
150 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
151 |
+
|
152 |
+
# get query proj
|
153 |
+
query_states = self.q_proj(hidden_states) * self.scale
|
154 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
155 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
156 |
+
|
157 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
158 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
159 |
+
key_states = key_states.view(*proj_shape)
|
160 |
+
value_states = value_states.view(*proj_shape)
|
161 |
+
|
162 |
+
src_len = key_states.size(1)
|
163 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
164 |
+
|
165 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
166 |
+
raise ValueError(
|
167 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
168 |
+
f" {attn_weights.size()}"
|
169 |
+
)
|
170 |
+
|
171 |
+
# apply the causal_attention_mask first
|
172 |
+
if causal_attention_mask is not None:
|
173 |
+
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
174 |
+
raise ValueError(
|
175 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
176 |
+
f" {causal_attention_mask.size()}"
|
177 |
+
)
|
178 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
|
179 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
180 |
+
|
181 |
+
if attention_mask is not None:
|
182 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
183 |
+
raise ValueError(
|
184 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
185 |
+
)
|
186 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
187 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
188 |
+
|
189 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
190 |
+
|
191 |
+
if output_attentions:
|
192 |
+
# this operation is a bit akward, but it's required to
|
193 |
+
# make sure that attn_weights keeps its gradient.
|
194 |
+
# In order to do so, attn_weights have to reshaped
|
195 |
+
# twice and have to be reused in the following
|
196 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
197 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
198 |
+
else:
|
199 |
+
attn_weights_reshaped = None
|
200 |
+
|
201 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
202 |
+
|
203 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
204 |
+
|
205 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
206 |
+
raise ValueError(
|
207 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
208 |
+
f" {attn_output.size()}"
|
209 |
+
)
|
210 |
+
|
211 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
212 |
+
attn_output = attn_output.transpose(1, 2)
|
213 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
214 |
+
|
215 |
+
attn_output = self.out_proj(attn_output)
|
216 |
+
|
217 |
+
return attn_output, attn_weights_reshaped
|
218 |
+
|
219 |
+
|
220 |
+
class SiglipFlashAttention2(SiglipAttention):
|
221 |
+
"""
|
222 |
+
Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
|
223 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
224 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
225 |
+
"""
|
226 |
+
|
227 |
+
def __init__(self, *args, **kwargs):
|
228 |
+
super().__init__(*args, **kwargs)
|
229 |
+
self.is_causal = False # Hack to make sure we don't use a causal mask
|
230 |
+
|
231 |
+
def forward(
|
232 |
+
self,
|
233 |
+
hidden_states: torch.Tensor,
|
234 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
235 |
+
position_ids: Optional[torch.LongTensor] = None,
|
236 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
237 |
+
output_attentions: bool = False,
|
238 |
+
use_cache: bool = False,
|
239 |
+
**kwargs,
|
240 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
241 |
+
output_attentions = False
|
242 |
+
|
243 |
+
bsz, q_len, _ = hidden_states.size()
|
244 |
+
|
245 |
+
query_states = self.q_proj(hidden_states)
|
246 |
+
key_states = self.k_proj(hidden_states)
|
247 |
+
value_states = self.v_proj(hidden_states)
|
248 |
+
|
249 |
+
# Flash attention requires the input to have the shape
|
250 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
251 |
+
# therefore we just need to keep the original shape
|
252 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
253 |
+
key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
254 |
+
value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
255 |
+
|
256 |
+
kv_seq_len = key_states.shape[-2]
|
257 |
+
if past_key_value is not None:
|
258 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
259 |
+
# cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
260 |
+
# query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
261 |
+
|
262 |
+
# if past_key_value is not None:
|
263 |
+
# cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
264 |
+
# key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
265 |
+
|
266 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
267 |
+
# to be able to avoid many of these transpose/reshape/view.
|
268 |
+
query_states = query_states.transpose(1, 2)
|
269 |
+
key_states = key_states.transpose(1, 2)
|
270 |
+
value_states = value_states.transpose(1, 2)
|
271 |
+
|
272 |
+
dropout_rate = self.dropout if self.training else 0.0
|
273 |
+
|
274 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
275 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
276 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
277 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
278 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
279 |
+
|
280 |
+
input_dtype = query_states.dtype
|
281 |
+
if input_dtype == torch.float32:
|
282 |
+
if torch.is_autocast_enabled():
|
283 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
284 |
+
# Handle the case where the model is quantized
|
285 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
286 |
+
target_dtype = self.config._pre_quantization_dtype
|
287 |
+
else:
|
288 |
+
target_dtype = self.q_proj.weight.dtype
|
289 |
+
|
290 |
+
logger.warning_once(
|
291 |
+
"The input hidden states seems to be silently casted in float32, this might be related to the fact"
|
292 |
+
" you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
293 |
+
f" {target_dtype}."
|
294 |
+
)
|
295 |
+
|
296 |
+
query_states = query_states.to(target_dtype)
|
297 |
+
key_states = key_states.to(target_dtype)
|
298 |
+
value_states = value_states.to(target_dtype)
|
299 |
+
|
300 |
+
attn_output = self._flash_attention_forward(
|
301 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
302 |
+
)
|
303 |
+
|
304 |
+
attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
|
305 |
+
attn_output = self.out_proj(attn_output)
|
306 |
+
|
307 |
+
if not output_attentions:
|
308 |
+
attn_weights = None
|
309 |
+
|
310 |
+
return attn_output, attn_weights
|
311 |
+
|
312 |
+
def _flash_attention_forward(
|
313 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
314 |
+
):
|
315 |
+
"""
|
316 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
317 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
318 |
+
|
319 |
+
Args:
|
320 |
+
query_states (`torch.Tensor`):
|
321 |
+
Input query states to be passed to Flash Attention API
|
322 |
+
key_states (`torch.Tensor`):
|
323 |
+
Input key states to be passed to Flash Attention API
|
324 |
+
value_states (`torch.Tensor`):
|
325 |
+
Input value states to be passed to Flash Attention API
|
326 |
+
attention_mask (`torch.Tensor`):
|
327 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
328 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
329 |
+
dropout (`int`, *optional*):
|
330 |
+
Attention dropout
|
331 |
+
softmax_scale (`float`, *optional*):
|
332 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
333 |
+
"""
|
334 |
+
|
335 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
336 |
+
causal = self.is_causal and query_length != 1
|
337 |
+
|
338 |
+
# Contains at least one padding token in the sequence
|
339 |
+
if attention_mask is not None:
|
340 |
+
batch_size = query_states.shape[0]
|
341 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
342 |
+
query_states, key_states, value_states, attention_mask, query_length
|
343 |
+
)
|
344 |
+
|
345 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
346 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
347 |
+
|
348 |
+
attn_output_unpad = flash_attn_varlen_func(
|
349 |
+
query_states,
|
350 |
+
key_states,
|
351 |
+
value_states,
|
352 |
+
cu_seqlens_q=cu_seqlens_q,
|
353 |
+
cu_seqlens_k=cu_seqlens_k,
|
354 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
355 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
356 |
+
dropout_p=dropout,
|
357 |
+
softmax_scale=softmax_scale,
|
358 |
+
causal=causal,
|
359 |
+
)
|
360 |
+
|
361 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
362 |
+
else:
|
363 |
+
attn_output = flash_attn_func(
|
364 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
365 |
+
)
|
366 |
+
|
367 |
+
return attn_output
|
368 |
+
|
369 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
370 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
371 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
372 |
+
|
373 |
+
key_layer = index_first_axis(
|
374 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
375 |
+
)
|
376 |
+
value_layer = index_first_axis(
|
377 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
378 |
+
)
|
379 |
+
if query_length == kv_seq_len:
|
380 |
+
query_layer = index_first_axis(
|
381 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
382 |
+
)
|
383 |
+
cu_seqlens_q = cu_seqlens_k
|
384 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
385 |
+
indices_q = indices_k
|
386 |
+
elif query_length == 1:
|
387 |
+
max_seqlen_in_batch_q = 1
|
388 |
+
cu_seqlens_q = torch.arange(
|
389 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
390 |
+
) # There is a memcpy here, that is very bad.
|
391 |
+
indices_q = cu_seqlens_q[:-1]
|
392 |
+
query_layer = query_layer.squeeze(1)
|
393 |
+
else:
|
394 |
+
# The -q_len: slice assumes left padding.
|
395 |
+
attention_mask = attention_mask[:, -query_length:]
|
396 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
397 |
+
|
398 |
+
return (
|
399 |
+
query_layer,
|
400 |
+
key_layer,
|
401 |
+
value_layer,
|
402 |
+
indices_q,
|
403 |
+
(cu_seqlens_q, cu_seqlens_k),
|
404 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
405 |
+
)
|
406 |
+
|
407 |
+
|
408 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
409 |
+
class SiglipMLP(nn.Module):
|
410 |
+
def __init__(self, config):
|
411 |
+
super().__init__()
|
412 |
+
self.config = config
|
413 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
414 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
415 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
416 |
+
|
417 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
418 |
+
hidden_states = self.fc1(hidden_states)
|
419 |
+
hidden_states = self.activation_fn(hidden_states)
|
420 |
+
hidden_states = self.fc2(hidden_states)
|
421 |
+
return hidden_states
|
422 |
+
|
423 |
+
|
424 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
|
425 |
+
class SiglipEncoderLayer(nn.Module):
|
426 |
+
def __init__(self, config: VMistralVisionConfig):
|
427 |
+
super().__init__()
|
428 |
+
self.embed_dim = config.hidden_size
|
429 |
+
self.self_attn = (
|
430 |
+
SiglipAttention(config)
|
431 |
+
if not getattr(config, "_flash_attn_2_enabled", False)
|
432 |
+
else SiglipFlashAttention2(config)
|
433 |
+
)
|
434 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
435 |
+
self.mlp = SiglipMLP(config)
|
436 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
437 |
+
|
438 |
+
def forward(
|
439 |
+
self,
|
440 |
+
hidden_states: torch.Tensor,
|
441 |
+
attention_mask: torch.Tensor,
|
442 |
+
causal_attention_mask: torch.Tensor,
|
443 |
+
output_attentions: Optional[bool] = False,
|
444 |
+
) -> Tuple[torch.FloatTensor]:
|
445 |
+
"""
|
446 |
+
Args:
|
447 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
448 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
449 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
450 |
+
`(config.encoder_attention_heads,)`.
|
451 |
+
output_attentions (`bool`, *optional*):
|
452 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
453 |
+
returned tensors for more detail.
|
454 |
+
"""
|
455 |
+
residual = hidden_states
|
456 |
+
|
457 |
+
hidden_states = self.layer_norm1(hidden_states)
|
458 |
+
hidden_states, attn_weights = self.self_attn(
|
459 |
+
hidden_states=hidden_states,
|
460 |
+
attention_mask=attention_mask,
|
461 |
+
causal_attention_mask=causal_attention_mask,
|
462 |
+
output_attentions=output_attentions,
|
463 |
+
)
|
464 |
+
hidden_states = residual + hidden_states
|
465 |
+
|
466 |
+
residual = hidden_states
|
467 |
+
hidden_states = self.layer_norm2(hidden_states)
|
468 |
+
hidden_states = self.mlp(hidden_states)
|
469 |
+
hidden_states = residual + hidden_states
|
470 |
+
|
471 |
+
outputs = (hidden_states,)
|
472 |
+
|
473 |
+
if output_attentions:
|
474 |
+
outputs += (attn_weights,)
|
475 |
+
|
476 |
+
return outputs
|
477 |
+
|
478 |
+
|
479 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
|
480 |
+
class SiglipEncoder(nn.Module):
|
481 |
+
"""
|
482 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
483 |
+
[`SiglipEncoderLayer`].
|
484 |
+
|
485 |
+
Args:
|
486 |
+
config: SiglipConfig
|
487 |
+
"""
|
488 |
+
|
489 |
+
def __init__(self, config):
|
490 |
+
super().__init__()
|
491 |
+
self.config = config
|
492 |
+
self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
493 |
+
self.gradient_checkpointing = False
|
494 |
+
|
495 |
+
def forward(
|
496 |
+
self,
|
497 |
+
inputs_embeds,
|
498 |
+
attention_mask: Optional[torch.Tensor] = None,
|
499 |
+
causal_attention_mask: Optional[torch.Tensor] = None,
|
500 |
+
output_attentions: Optional[bool] = None,
|
501 |
+
output_hidden_states: Optional[bool] = None,
|
502 |
+
return_dict: Optional[bool] = None,
|
503 |
+
) -> Union[Tuple, BaseModelOutput]:
|
504 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
505 |
+
output_hidden_states = (
|
506 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
507 |
+
)
|
508 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
509 |
+
|
510 |
+
encoder_states = () if output_hidden_states else None
|
511 |
+
all_attentions = () if output_attentions else None
|
512 |
+
|
513 |
+
hidden_states = inputs_embeds
|
514 |
+
for idx, encoder_layer in enumerate(self.layers):
|
515 |
+
if output_hidden_states:
|
516 |
+
encoder_states = encoder_states + (hidden_states,)
|
517 |
+
if self.gradient_checkpointing and self.training:
|
518 |
+
|
519 |
+
def create_custom_forward(module):
|
520 |
+
def custom_forward(*inputs):
|
521 |
+
return module(*inputs, output_attentions)
|
522 |
+
|
523 |
+
return custom_forward
|
524 |
+
|
525 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
526 |
+
create_custom_forward(encoder_layer),
|
527 |
+
hidden_states,
|
528 |
+
attention_mask,
|
529 |
+
causal_attention_mask,
|
530 |
+
)
|
531 |
+
else:
|
532 |
+
layer_outputs = encoder_layer(
|
533 |
+
hidden_states,
|
534 |
+
attention_mask,
|
535 |
+
causal_attention_mask,
|
536 |
+
output_attentions=output_attentions,
|
537 |
+
)
|
538 |
+
|
539 |
+
hidden_states = layer_outputs[0]
|
540 |
+
|
541 |
+
if output_attentions:
|
542 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
543 |
+
|
544 |
+
if output_hidden_states:
|
545 |
+
encoder_states = encoder_states + (hidden_states,)
|
546 |
+
|
547 |
+
if not return_dict:
|
548 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
549 |
+
return BaseModelOutput(
|
550 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
551 |
+
)
|
552 |
+
|
553 |
+
|
554 |
+
class SiglipVisionTransformer(nn.Module):
|
555 |
+
def __init__(self, config: VMistralVisionConfig):
|
556 |
+
super().__init__()
|
557 |
+
self.config = config
|
558 |
+
embed_dim = config.hidden_size
|
559 |
+
|
560 |
+
self.embeddings = SiglipVisionEmbeddings(config)
|
561 |
+
self.encoder = SiglipEncoder(config)
|
562 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
563 |
+
self.head = SiglipMultiheadAttentionPoolingHead(config)
|
564 |
+
|
565 |
+
def forward(
|
566 |
+
self,
|
567 |
+
pixel_values,
|
568 |
+
output_attentions: Optional[bool] = None,
|
569 |
+
output_hidden_states: Optional[bool] = None,
|
570 |
+
return_dict: Optional[bool] = None,
|
571 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
572 |
+
r"""
|
573 |
+
Returns:
|
574 |
+
|
575 |
+
"""
|
576 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
577 |
+
output_hidden_states = (
|
578 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
579 |
+
)
|
580 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
581 |
+
|
582 |
+
hidden_states = self.embeddings(pixel_values)
|
583 |
+
|
584 |
+
encoder_outputs = self.encoder(
|
585 |
+
inputs_embeds=hidden_states,
|
586 |
+
output_attentions=output_attentions,
|
587 |
+
output_hidden_states=output_hidden_states,
|
588 |
+
return_dict=return_dict,
|
589 |
+
)
|
590 |
+
|
591 |
+
last_hidden_state = encoder_outputs[0]
|
592 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
593 |
+
|
594 |
+
pooled_output = self.head(last_hidden_state)
|
595 |
+
|
596 |
+
if not return_dict:
|
597 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
598 |
+
|
599 |
+
return BaseModelOutputWithPooling(
|
600 |
+
last_hidden_state=last_hidden_state,
|
601 |
+
pooler_output=pooled_output,
|
602 |
+
hidden_states=encoder_outputs.hidden_states,
|
603 |
+
attentions=encoder_outputs.attentions,
|
604 |
+
)
|
605 |
+
|
606 |
+
|
607 |
+
class SiglipMultiheadAttentionPoolingHead(nn.Module):
|
608 |
+
"""Multihead Attention Pooling."""
|
609 |
+
|
610 |
+
def __init__(self, config: VMistralVisionConfig):
|
611 |
+
super().__init__()
|
612 |
+
|
613 |
+
self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
|
614 |
+
self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
|
615 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
616 |
+
self.mlp = SiglipMLP(config)
|
617 |
+
|
618 |
+
def forward(self, hidden_state):
|
619 |
+
batch_size = hidden_state.shape[0]
|
620 |
+
probe = self.probe.repeat(batch_size, 1, 1)
|
621 |
+
|
622 |
+
hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
|
623 |
+
|
624 |
+
residual = hidden_state
|
625 |
+
hidden_state = self.layernorm(hidden_state)
|
626 |
+
hidden_state = residual + self.mlp(hidden_state)
|
627 |
+
|
628 |
+
return hidden_state[:, 0]
|
629 |
+
|
630 |
+
|
631 |
+
class SiglipVisionModel(nn.Module):
|
632 |
+
def __init__(self, config: VMistralVisionConfig):
|
633 |
+
super().__init__()
|
634 |
+
|
635 |
+
self.config = config
|
636 |
+
self.vision_model = SiglipVisionTransformer(config)
|
637 |
+
|
638 |
+
def forward(
|
639 |
+
self,
|
640 |
+
pixel_values,
|
641 |
+
output_attentions: Optional[bool] = None,
|
642 |
+
output_hidden_states: Optional[bool] = None,
|
643 |
+
return_dict: Optional[bool] = None,
|
644 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
645 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
646 |
+
|
647 |
+
return self.vision_model(
|
648 |
+
pixel_values=pixel_values,
|
649 |
+
output_attentions=output_attentions,
|
650 |
+
output_hidden_states=output_hidden_states,
|
651 |
+
return_dict=return_dict,
|
652 |
+
)
|