gent commited on
Commit
5b5b080
1 Parent(s): 79ea85b

Add model files

Browse files
Files changed (3) hide show
  1. README.md +46 -0
  2. configuration_gmml.py +130 -0
  3. modeling_gmml.py +890 -0
README.md CHANGED
@@ -1,3 +1,49 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ tags:
4
+ - self-supervised learning
5
+ - vision
6
+ - GMML
7
+ inference: false
8
+
9
  ---
10
+
11
+ # Model description
12
+
13
+ GMML is a self-supervised learning model that learns to group masked pixels in an image. The model is trained on ImageNet-1K.
14
+
15
+
16
+ # Model Sources
17
+
18
+ - https://github.com/Sara-Ahmed/GMML
19
+ - https://arxiv.org/abs/2205.14986
20
+
21
+
22
+ # Model Card Authors
23
+ Sara Atito, Muhammad Awais, Josef Kittler
24
+
25
+ # How to use
26
+
27
+ ```python
28
+ from transformers import BertConfig, BertModel
29
+
30
+ config = BertConfig()
31
+ model = BertModel(config)
32
+
33
+ model.push_to_hub("nielsr/my-awesome-bert-model")
34
+
35
+ # reload
36
+ model = BertModel.from_pretrained("nielsr/my-awesome-bert-model")
37
+ ```
38
+
39
+ # BibTeX entry and citation info
40
+ ```
41
+ @inproceedings{atito2023gmml,
42
+ title={GMML is all you need},
43
+ author={Atito, Sara and Awais, Muhammed and Nandam, Srinivasa and Kittler, Josef},
44
+ booktitle={2023 IEEE International Conference on Image Processing (ICIP)},
45
+ pages={2125--2129},
46
+ year={2023},
47
+ organization={IEEE}
48
+ }
49
+ ```
configuration_gmml.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ViT GMML model configuration"""
16
+
17
+ from transformers import PretrainedConfig
18
+ from transformers import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ VIT_GMML_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "erow/vit-GMML-base": "https://huggingface.co/erow/GMML/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class ViTGMMLConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ViTGMMLModel`]. It is used to instantiate an ViT
31
+ GMML model according to the specified arguments, defining the model architecture. Instantiating a configuration with
32
+ the defaults will yield a similar configuration to that of the ViT
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ num_hidden_layers (`int`, *optional*, defaults to 12):
42
+ Number of hidden layers in the Transformer encoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 12):
44
+ Number of attention heads for each attention layer in the Transformer encoder.
45
+ intermediate_size (`int`, *optional*, defaults to 3072):
46
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
47
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
48
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
49
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
50
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
51
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
52
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
53
+ The dropout ratio for the attention probabilities.
54
+ initializer_range (`float`, *optional*, defaults to 0.02):
55
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
57
+ The epsilon used by the layer normalization layers.
58
+ image_size (`int`, *optional*, defaults to 224):
59
+ The size (resolution) of each image.
60
+ patch_size (`int`, *optional*, defaults to 16):
61
+ The size (resolution) of each patch.
62
+ num_channels (`int`, *optional*, defaults to 3):
63
+ The number of input channels.
64
+ qkv_bias (`bool`, *optional*, defaults to `True`):
65
+ Whether to add a bias to the queries, keys and values.
66
+ decoder_num_attention_heads (`int`, *optional*, defaults to 16):
67
+ Number of attention heads for each attention layer in the decoder.
68
+ decoder_hidden_size (`int`, *optional*, defaults to 512):
69
+ Dimensionality of the decoder.
70
+ decoder_num_hidden_layers (`int`, *optional*, defaults to 8):
71
+ Number of hidden layers in the decoder.
72
+ decoder_intermediate_size (`int`, *optional*, defaults to 2048):
73
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
74
+ mask_ratio (`float`, *optional*, defaults to 0.75):
75
+ The ratio of the number of masked tokens in the input sequence.
76
+ norm_pix_loss (`bool`, *optional*, defaults to `False`):
77
+ Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved
78
+ representation quality in the experiments of the authors.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import ViTGMMLConfig, ViTGMMLModel
84
+
85
+ >>> # Initializing a ViT GMML vit-GMML-base style configuration
86
+ >>> configuration = ViTGMMLConfig()
87
+
88
+ >>> # Initializing a model (with random weights) from the vit-GMML-base style configuration
89
+ >>> model = ViTGMMLModel(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "vit_gmml"
96
+
97
+ def __init__(
98
+ self,
99
+ hidden_size=768,
100
+ num_hidden_layers=12,
101
+ num_attention_heads=12,
102
+ intermediate_size=3072,
103
+ hidden_act="gelu",
104
+ hidden_dropout_prob=0.0,
105
+ attention_probs_dropout_prob=0.0,
106
+ initializer_range=0.02,
107
+ layer_norm_eps=1e-12,
108
+ image_size=224,
109
+ patch_size=16,
110
+ num_channels=3,
111
+ qkv_bias=True,
112
+ mask_ratio=0.75,
113
+ **kwargs,
114
+ ):
115
+ super().__init__(**kwargs)
116
+
117
+ self.hidden_size = hidden_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.intermediate_size = intermediate_size
121
+ self.hidden_act = hidden_act
122
+ self.hidden_dropout_prob = hidden_dropout_prob
123
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
124
+ self.initializer_range = initializer_range
125
+ self.layer_norm_eps = layer_norm_eps
126
+ self.image_size = image_size
127
+ self.patch_size = patch_size
128
+ self.num_channels = num_channels
129
+ self.qkv_bias = qkv_bias
130
+ self.mask_ratio = mask_ratio
modeling_gmml.py ADDED
@@ -0,0 +1,890 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ViT GMML (masked autoencoder) model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from copy import deepcopy
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Set, Tuple, Union
23
+
24
+ import numpy as np
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import BaseModelOutput
31
+ from transformers.modeling_utils import PreTrainedModel
32
+ from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
33
+ from transformers.utils import (
34
+ ModelOutput,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from configuration_gmml import ViTGMMLConfig
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CONFIG_FOR_DOC = "ViTGMMLConfig"
46
+ _CHECKPOINT_FOR_DOC = "erow/vit-gmml-base"
47
+
48
+ VIT_GMML_PRETRAINED_MODEL_ARCHIVE_LIST = [
49
+ "erow/vit-gmml-base",
50
+ # See all ViTGMML models at https://huggingface.co/models?filter=vit_gmml
51
+ ]
52
+
53
+
54
+ @dataclass
55
+ class ViTGMMLModelOutput(ModelOutput):
56
+ """
57
+ Class for ViTGMMLModel's outputs, with potential hidden states and attentions.
58
+
59
+ Args:
60
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
61
+ Sequence of hidden-states at the output of the last layer of the model.
62
+ mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
63
+ Tensor indicating which patches are masked (1) and which are not (0).
64
+ ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
65
+ Tensor containing the original index of the (shuffled) masked patches.
66
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
67
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
68
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
69
+ plus the initial embedding outputs.
70
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
73
+ the self-attention heads.
74
+ """
75
+
76
+ last_hidden_state: torch.FloatTensor = None
77
+ noise: torch.LongTensor = None
78
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
79
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
80
+
81
+
82
+
83
+ @dataclass
84
+ class ViTGMMLForPreTrainingOutput(ModelOutput):
85
+ """
86
+ Class for ViTGMMLForPreTraining's outputs, with potential hidden states and attentions.
87
+
88
+ Args:
89
+ loss (`torch.FloatTensor` of shape `(1,)`):
90
+ Pixel reconstruction loss.
91
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
92
+ Pixel reconstruction logits.
93
+ mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
94
+ Tensor indicating which patches are masked (1) and which are not (0).
95
+ ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
96
+ Tensor containing the original index of the (shuffled) masked patches.
97
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
98
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
99
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
100
+ plus the initial embedding outputs.
101
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
102
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
103
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
104
+ the self-attention heads.
105
+ """
106
+
107
+ loss: Optional[torch.FloatTensor] = None
108
+ logits: torch.FloatTensor = None
109
+ noise: torch.LongTensor = None
110
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
111
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
112
+
113
+
114
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):
115
+ """
116
+ Create 2D sin/cos positional embeddings.
117
+
118
+ Args:
119
+ embed_dim (`int`):
120
+ Embedding dimension.
121
+ grid_size (`int`):
122
+ The grid height and width.
123
+ add_cls_token (`bool`, *optional*, defaults to `False`):
124
+ Whether or not to add a classification (CLS) token.
125
+
126
+ Returns:
127
+ (`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the
128
+ position embeddings (with or without classification token)
129
+ """
130
+ grid_h = np.arange(grid_size, dtype=np.float32)
131
+ grid_w = np.arange(grid_size, dtype=np.float32)
132
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
133
+ grid = np.stack(grid, axis=0)
134
+
135
+ grid = grid.reshape([2, 1, grid_size, grid_size])
136
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
137
+ if add_cls_token:
138
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
139
+ return pos_embed
140
+
141
+
142
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
143
+ if embed_dim % 2 != 0:
144
+ raise ValueError("embed_dim must be even")
145
+
146
+ # use half of dimensions to encode grid_h
147
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
148
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
149
+
150
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
151
+ return emb
152
+
153
+
154
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
155
+ """
156
+ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
157
+ """
158
+ if embed_dim % 2 != 0:
159
+ raise ValueError("embed_dim must be even")
160
+
161
+ omega = np.arange(embed_dim // 2, dtype=float)
162
+ omega /= embed_dim / 2.0
163
+ omega = 1.0 / 10000**omega # (D/2,)
164
+
165
+ pos = pos.reshape(-1) # (M,)
166
+ out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
167
+
168
+ emb_sin = np.sin(out) # (M, D/2)
169
+ emb_cos = np.cos(out) # (M, D/2)
170
+
171
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
172
+ return emb
173
+
174
+
175
+ class ViTGMMLEmbeddings(nn.Module):
176
+ """
177
+ Construct the CLS token, position and patch embeddings.
178
+
179
+ """
180
+
181
+ def __init__(self, config):
182
+ super().__init__()
183
+
184
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
185
+ self.patch_embeddings = ViTGMMLPatchEmbeddings(config)
186
+ self.num_patches = self.patch_embeddings.num_patches
187
+ # fixed sin-cos embedding
188
+ self.position_embeddings = nn.Parameter(
189
+ torch.zeros(1, self.num_patches + 1, config.hidden_size), requires_grad=False
190
+ )
191
+ self.config = config
192
+ self.initialize_weights()
193
+
194
+ def initialize_weights(self):
195
+ # initialize (and freeze) position embeddings by sin-cos embedding
196
+ pos_embed = get_2d_sincos_pos_embed(
197
+ self.position_embeddings.shape[-1], int(self.patch_embeddings.num_patches**0.5), add_cls_token=True
198
+ )
199
+ self.position_embeddings.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
200
+
201
+ # initialize patch_embeddings like nn.Linear (instead of nn.Conv2d)
202
+ w = self.patch_embeddings.projection.weight.data
203
+ torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
204
+
205
+ # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
206
+ torch.nn.init.normal_(self.cls_token, std=self.config.initializer_range)
207
+
208
+
209
+ def forward(self, pixel_values, noise=None):
210
+ batch_size, num_channels, height, width = pixel_values.shape
211
+ embeddings = self.patch_embeddings(pixel_values)
212
+
213
+ # add position embeddings w/o cls token
214
+ embeddings = embeddings + self.position_embeddings[:, 1:, :]
215
+
216
+ # append cls token
217
+ cls_token = self.cls_token + self.position_embeddings[:, :1, :]
218
+ cls_tokens = cls_token.expand(embeddings.shape[0], -1, -1)
219
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
220
+
221
+ return embeddings
222
+
223
+ class ViTGMMLPatchEmbeddings(nn.Module):
224
+ """
225
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
226
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
227
+ Transformer.
228
+ """
229
+
230
+ def __init__(self, config):
231
+ super().__init__()
232
+ image_size, patch_size = config.image_size, config.patch_size
233
+ num_channels, hidden_size = config.num_channels, config.hidden_size
234
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
235
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
236
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
237
+ self.image_size = image_size
238
+ self.patch_size = patch_size
239
+ self.num_channels = num_channels
240
+ self.num_patches = num_patches
241
+
242
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
243
+
244
+ def forward(self, pixel_values):
245
+ batch_size, num_channels, height, width = pixel_values.shape
246
+ if num_channels != self.num_channels:
247
+ raise ValueError(
248
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
249
+ )
250
+ if height != self.image_size[0] or width != self.image_size[1]:
251
+ raise ValueError(
252
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
253
+ )
254
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
255
+ return x
256
+
257
+
258
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention ViT->ViTGMML
259
+ class ViTGMMLSelfAttention(nn.Module):
260
+ def __init__(self, config: ViTGMMLConfig) -> None:
261
+ super().__init__()
262
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
263
+ raise ValueError(
264
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
265
+ f"heads {config.num_attention_heads}."
266
+ )
267
+
268
+ self.num_attention_heads = config.num_attention_heads
269
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
270
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
271
+
272
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
273
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
274
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
275
+
276
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
277
+
278
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
279
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
280
+ x = x.view(new_x_shape)
281
+ return x.permute(0, 2, 1, 3)
282
+
283
+ def forward(
284
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
285
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
286
+ mixed_query_layer = self.query(hidden_states)
287
+
288
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
289
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
290
+ query_layer = self.transpose_for_scores(mixed_query_layer)
291
+
292
+ # Take the dot product between "query" and "key" to get the raw attention scores.
293
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
294
+
295
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
296
+
297
+ # Normalize the attention scores to probabilities.
298
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
299
+
300
+ # This is actually dropping out entire tokens to attend to, which might
301
+ # seem a bit unusual, but is taken from the original Transformer paper.
302
+ attention_probs = self.dropout(attention_probs)
303
+
304
+ # Mask heads if we want to
305
+ if head_mask is not None:
306
+ attention_probs = attention_probs * head_mask
307
+
308
+ context_layer = torch.matmul(attention_probs, value_layer)
309
+
310
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
311
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
312
+ context_layer = context_layer.view(new_context_layer_shape)
313
+
314
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
315
+
316
+ return outputs
317
+
318
+
319
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTGMML
320
+ class ViTGMMLSelfOutput(nn.Module):
321
+ """
322
+ The residual connection is defined in ViTGMMLLayer instead of here (as is the case with other models), due to the
323
+ layernorm applied before each block.
324
+ """
325
+
326
+ def __init__(self, config: ViTGMMLConfig) -> None:
327
+ super().__init__()
328
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
329
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
330
+
331
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
332
+ hidden_states = self.dense(hidden_states)
333
+ hidden_states = self.dropout(hidden_states)
334
+
335
+ return hidden_states
336
+
337
+
338
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTGMML
339
+ class ViTGMMLAttention(nn.Module):
340
+ def __init__(self, config: ViTGMMLConfig) -> None:
341
+ super().__init__()
342
+ self.attention = ViTGMMLSelfAttention(config)
343
+ self.output = ViTGMMLSelfOutput(config)
344
+ self.pruned_heads = set()
345
+
346
+ def prune_heads(self, heads: Set[int]) -> None:
347
+ if len(heads) == 0:
348
+ return
349
+ heads, index = find_pruneable_heads_and_indices(
350
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
351
+ )
352
+
353
+ # Prune linear layers
354
+ self.attention.query = prune_linear_layer(self.attention.query, index)
355
+ self.attention.key = prune_linear_layer(self.attention.key, index)
356
+ self.attention.value = prune_linear_layer(self.attention.value, index)
357
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
358
+
359
+ # Update hyper params and store pruned heads
360
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
361
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
362
+ self.pruned_heads = self.pruned_heads.union(heads)
363
+
364
+ def forward(
365
+ self,
366
+ hidden_states: torch.Tensor,
367
+ head_mask: Optional[torch.Tensor] = None,
368
+ output_attentions: bool = False,
369
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
370
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
371
+
372
+ attention_output = self.output(self_outputs[0], hidden_states)
373
+
374
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
375
+ return outputs
376
+
377
+
378
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->ViTGMML
379
+ class ViTGMMLIntermediate(nn.Module):
380
+ def __init__(self, config: ViTGMMLConfig) -> None:
381
+ super().__init__()
382
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
383
+ if isinstance(config.hidden_act, str):
384
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
385
+ else:
386
+ self.intermediate_act_fn = config.hidden_act
387
+
388
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
389
+ hidden_states = self.dense(hidden_states)
390
+ hidden_states = self.intermediate_act_fn(hidden_states)
391
+
392
+ return hidden_states
393
+
394
+
395
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->ViTGMML
396
+ class ViTGMMLOutput(nn.Module):
397
+ def __init__(self, config: ViTGMMLConfig) -> None:
398
+ super().__init__()
399
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
400
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
401
+
402
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
403
+ hidden_states = self.dense(hidden_states)
404
+ hidden_states = self.dropout(hidden_states)
405
+
406
+ hidden_states = hidden_states + input_tensor
407
+
408
+ return hidden_states
409
+
410
+
411
+ # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTGMML
412
+ class ViTGMMLLayer(nn.Module):
413
+ """This corresponds to the Block class in the timm implementation."""
414
+
415
+ def __init__(self, config: ViTGMMLConfig) -> None:
416
+ super().__init__()
417
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
418
+ self.seq_len_dim = 1
419
+ self.attention = ViTGMMLAttention(config)
420
+ self.intermediate = ViTGMMLIntermediate(config)
421
+ self.output = ViTGMMLOutput(config)
422
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
423
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
424
+
425
+ def forward(
426
+ self,
427
+ hidden_states: torch.Tensor,
428
+ head_mask: Optional[torch.Tensor] = None,
429
+ output_attentions: bool = False,
430
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
431
+ self_attention_outputs = self.attention(
432
+ self.layernorm_before(hidden_states), # in ViTGMML, layernorm is applied before self-attention
433
+ head_mask,
434
+ output_attentions=output_attentions,
435
+ )
436
+ attention_output = self_attention_outputs[0]
437
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
438
+
439
+ # first residual connection
440
+ hidden_states = attention_output + hidden_states
441
+
442
+ # in ViTGMML, layernorm is also applied after self-attention
443
+ layer_output = self.layernorm_after(hidden_states)
444
+ layer_output = self.intermediate(layer_output)
445
+
446
+ # second residual connection is done here
447
+ layer_output = self.output(layer_output, hidden_states)
448
+
449
+ outputs = (layer_output,) + outputs
450
+
451
+ return outputs
452
+
453
+
454
+ # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTGMML
455
+ class ViTGMMLEncoder(nn.Module):
456
+ def __init__(self, config: ViTGMMLConfig) -> None:
457
+ super().__init__()
458
+ self.config = config
459
+ self.layer = nn.ModuleList([ViTGMMLLayer(config) for _ in range(config.num_hidden_layers)])
460
+ self.gradient_checkpointing = False
461
+
462
+ def forward(
463
+ self,
464
+ hidden_states: torch.Tensor,
465
+ head_mask: Optional[torch.Tensor] = None,
466
+ output_attentions: bool = False,
467
+ output_hidden_states: bool = False,
468
+ return_dict: bool = True,
469
+ ) -> Union[tuple, BaseModelOutput]:
470
+ all_hidden_states = () if output_hidden_states else None
471
+ all_self_attentions = () if output_attentions else None
472
+
473
+ for i, layer_module in enumerate(self.layer):
474
+ if output_hidden_states:
475
+ all_hidden_states = all_hidden_states + (hidden_states,)
476
+
477
+ layer_head_mask = head_mask[i] if head_mask is not None else None
478
+
479
+ if self.gradient_checkpointing and self.training:
480
+ layer_outputs = self._gradient_checkpointing_func(
481
+ layer_module.__call__,
482
+ hidden_states,
483
+ layer_head_mask,
484
+ output_attentions,
485
+ )
486
+ else:
487
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
488
+
489
+ hidden_states = layer_outputs[0]
490
+
491
+ if output_attentions:
492
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
493
+
494
+ if output_hidden_states:
495
+ all_hidden_states = all_hidden_states + (hidden_states,)
496
+
497
+ if not return_dict:
498
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
499
+ return BaseModelOutput(
500
+ last_hidden_state=hidden_states,
501
+ hidden_states=all_hidden_states,
502
+ attentions=all_self_attentions,
503
+ )
504
+
505
+
506
+ class ViTGMMLPreTrainedModel(PreTrainedModel):
507
+ """
508
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
509
+ models.
510
+ """
511
+
512
+ config_class = ViTGMMLConfig
513
+ base_model_prefix = "vit"
514
+ main_input_name = "pixel_values"
515
+ supports_gradient_checkpointing = True
516
+
517
+ def _init_weights(self, module):
518
+ """Initialize the weights"""
519
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
520
+ # Slightly different from the TF version which uses truncated_normal for initialization
521
+ # cf https://github.com/pytorch/pytorch/pull/5617
522
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
523
+ if module.bias is not None:
524
+ module.bias.data.zero_()
525
+ elif isinstance(module, nn.LayerNorm):
526
+ module.bias.data.zero_()
527
+ module.weight.data.fill_(1.0)
528
+
529
+
530
+ VIT_GMML_START_DOCSTRING = r"""
531
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
532
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
533
+ behavior.
534
+
535
+ Parameters:
536
+ config ([`ViTGMMLConfig`]): Model configuration class with all the parameters of the model.
537
+ Initializing with a config file does not load the weights associated with the model, only the
538
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
539
+ """
540
+
541
+ VIT_GMML_INPUTS_DOCSTRING = r"""
542
+ Args:
543
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
544
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
545
+ for details.
546
+
547
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
548
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
549
+
550
+ - 1 indicates the head is **not masked**,
551
+ - 0 indicates the head is **masked**.
552
+
553
+ output_attentions (`bool`, *optional*):
554
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
555
+ tensors for more detail.
556
+ output_hidden_states (`bool`, *optional*):
557
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
558
+ more detail.
559
+ return_dict (`bool`, *optional*):
560
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
561
+ """
562
+
563
+
564
+ @add_start_docstrings(
565
+ "The bare ViTGMML Model transformer outputting raw hidden-states without any specific head on top.",
566
+ VIT_GMML_START_DOCSTRING,
567
+ )
568
+ class ViTGMMLModel(ViTGMMLPreTrainedModel):
569
+ def __init__(self, config):
570
+ super().__init__(config)
571
+ self.config = config
572
+
573
+ self.embeddings = ViTGMMLEmbeddings(config)
574
+ self.encoder = ViTGMMLEncoder(config)
575
+
576
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
577
+
578
+ # Initialize weights and apply final processing
579
+ self.post_init()
580
+
581
+ def get_input_embeddings(self):
582
+ return self.embeddings.patch_embeddings
583
+
584
+ def _prune_heads(self, heads_to_prune):
585
+ """
586
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
587
+ class PreTrainedModel
588
+ """
589
+ for layer, heads in heads_to_prune.items():
590
+ self.encoder.layer[layer].attention.prune_heads(heads)
591
+
592
+ @add_start_docstrings_to_model_forward(VIT_GMML_INPUTS_DOCSTRING)
593
+ @replace_return_docstrings(output_type=ViTGMMLModelOutput, config_class=_CONFIG_FOR_DOC)
594
+ def forward(
595
+ self,
596
+ pixel_values: Optional[torch.FloatTensor] = None,
597
+ noise: Optional[torch.FloatTensor] = None,
598
+ head_mask: Optional[torch.FloatTensor] = None,
599
+ output_attentions: Optional[bool] = None,
600
+ output_hidden_states: Optional[bool] = None,
601
+ return_dict: Optional[bool] = None,
602
+ ) -> Union[Tuple, ViTGMMLModelOutput]:
603
+ r"""
604
+ Returns:
605
+
606
+ Examples:
607
+
608
+ ```python
609
+ >>> from transformers import AutoImageProcessor, ViTGMMLModel
610
+ >>> from PIL import Image
611
+ >>> import requests
612
+
613
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
614
+ >>> image = Image.open(requests.get(url, stream=True).raw)
615
+
616
+ >>> image_processor = AutoImageProcessor.from_pretrained("erow/vit-gmml-base")
617
+ >>> model = ViTGMMLModel.from_pretrained("erow/vit-gmml-base")
618
+
619
+ >>> inputs = image_processor(images=image, return_tensors="pt")
620
+ >>> outputs = model(**inputs)
621
+ >>> last_hidden_states = outputs.last_hidden_state
622
+ ```"""
623
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
624
+ output_hidden_states = (
625
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
626
+ )
627
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
628
+
629
+ if pixel_values is None:
630
+ raise ValueError("You have to specify pixel_values")
631
+
632
+ # Prepare head mask if needed
633
+ # 1.0 in head_mask indicate we keep the head
634
+ # attention_probs has shape bsz x n_heads x N x N
635
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
636
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
637
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
638
+
639
+ embedding_output = self.embeddings(pixel_values, noise=noise)
640
+
641
+ encoder_outputs = self.encoder(
642
+ embedding_output,
643
+ head_mask=head_mask,
644
+ output_attentions=output_attentions,
645
+ output_hidden_states=output_hidden_states,
646
+ return_dict=return_dict,
647
+ )
648
+ sequence_output = encoder_outputs[0]
649
+ sequence_output = self.layernorm(sequence_output)
650
+
651
+ if not return_dict:
652
+ return (sequence_output, ) + encoder_outputs[1:]
653
+
654
+ return ViTGMMLModelOutput(
655
+ last_hidden_state=sequence_output,
656
+ hidden_states=encoder_outputs.hidden_states,
657
+ attentions=encoder_outputs.attentions,
658
+ )
659
+
660
+
661
+ class GMMLDecoder(nn.Module):
662
+ def __init__(self, in_dim, in_chans=3, patch_size=16):
663
+ super().__init__()
664
+
665
+ layers = [nn.Linear(in_dim, in_dim)]
666
+ layers.append(nn.GELU())
667
+ layers.append(nn.Linear(in_dim, in_dim))
668
+ layers.append(nn.GELU())
669
+ layers.append(nn.Linear(in_dim, in_dim))
670
+ layers.append(nn.GELU())
671
+
672
+ self.mlp = nn.Sequential(*layers)
673
+ self.apply(self._init_weights)
674
+
675
+ self.convTrans = nn.ConvTranspose2d(in_dim, in_chans, kernel_size=(patch_size, patch_size),
676
+ stride=(patch_size, patch_size))
677
+
678
+
679
+ def _init_weights(self, m):
680
+ if isinstance(m, nn.Linear):
681
+ torch.nn.init.normal_(m.weight, std=.02)
682
+ if isinstance(m, nn.Linear) and m.bias is not None:
683
+ nn.init.constant_(m.bias, 0)
684
+
685
+ def forward(self, x):
686
+ x = self.mlp(x)
687
+
688
+ x_rec = x.transpose(1, 2)
689
+ out_sz = tuple( ( int(math.sqrt(x_rec.size()[2])) , int(math.sqrt(x_rec.size()[2])) ) )
690
+ x_rec = self.convTrans(x_rec.unflatten(2, out_sz))
691
+ return x_rec
692
+
693
+
694
+ @add_start_docstrings(
695
+ """The ViTGMML Model transformer with the decoder on top for self-supervised pre-training.
696
+
697
+ <Tip>
698
+
699
+ Note that we provide a script to pre-train this model on custom data in our [examples
700
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
701
+
702
+ </Tip>
703
+
704
+ """,
705
+ VIT_GMML_START_DOCSTRING,
706
+ )
707
+ class ViTGMMLForPreTraining(ViTGMMLPreTrainedModel):
708
+ def __init__(self, config):
709
+ super().__init__(config)
710
+ self.config = config
711
+
712
+ self.vit = ViTGMMLModel(config)
713
+ self.decoder = GMMLDecoder(config.hidden_size, config.num_channels, config.patch_size)
714
+
715
+ # Initialize weights and apply final processing
716
+ self.post_init()
717
+
718
+ def get_input_embeddings(self):
719
+ return self.vit.embeddings.patch_embeddings
720
+
721
+ def _prune_heads(self, heads_to_prune):
722
+ """
723
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
724
+ class PreTrainedModel
725
+ """
726
+ for layer, heads in heads_to_prune.items():
727
+ self.encoder.layer[layer].attention.prune_heads(heads)
728
+
729
+ def patchify(self, pixel_values):
730
+ """
731
+ Args:
732
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
733
+ Pixel values.
734
+
735
+ Returns:
736
+ `torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
737
+ Patchified pixel values.
738
+ """
739
+ patch_size, num_channels = self.config.patch_size, self.config.num_channels
740
+ # sanity checks
741
+ if (pixel_values.shape[2] != pixel_values.shape[3]) or (pixel_values.shape[2] % patch_size != 0):
742
+ raise ValueError("Make sure the pixel values have a squared size that is divisible by the patch size")
743
+ if pixel_values.shape[1] != num_channels:
744
+ raise ValueError(
745
+ "Make sure the number of channels of the pixel values is equal to the one set in the configuration"
746
+ )
747
+
748
+ # patchify
749
+ batch_size = pixel_values.shape[0]
750
+ num_patches_one_direction = pixel_values.shape[2] // patch_size
751
+ patchified_pixel_values = pixel_values.reshape(
752
+ batch_size, num_channels, num_patches_one_direction, patch_size, num_patches_one_direction, patch_size
753
+ )
754
+ patchified_pixel_values = torch.einsum("nchpwq->nhwpqc", patchified_pixel_values)
755
+ patchified_pixel_values = patchified_pixel_values.reshape(
756
+ batch_size, num_patches_one_direction * num_patches_one_direction, patch_size**2 * num_channels
757
+ )
758
+ return patchified_pixel_values
759
+
760
+ def unpatchify(self, patchified_pixel_values):
761
+ """
762
+ Args:
763
+ patchified_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
764
+ Patchified pixel values.
765
+
766
+ Returns:
767
+ `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`:
768
+ Pixel values.
769
+ """
770
+ patch_size, num_channels = self.config.patch_size, self.config.num_channels
771
+ num_patches_one_direction = int(patchified_pixel_values.shape[1] ** 0.5)
772
+ # sanity check
773
+ if num_patches_one_direction**2 != patchified_pixel_values.shape[1]:
774
+ raise ValueError("Make sure that the number of patches can be squared")
775
+
776
+ # unpatchify
777
+ batch_size = patchified_pixel_values.shape[0]
778
+ patchified_pixel_values = patchified_pixel_values.reshape(
779
+ batch_size,
780
+ num_patches_one_direction,
781
+ num_patches_one_direction,
782
+ patch_size,
783
+ patch_size,
784
+ num_channels,
785
+ )
786
+ patchified_pixel_values = torch.einsum("nhwpqc->nchpwq", patchified_pixel_values)
787
+ pixel_values = patchified_pixel_values.reshape(
788
+ batch_size,
789
+ num_channels,
790
+ num_patches_one_direction * patch_size,
791
+ num_patches_one_direction * patch_size,
792
+ )
793
+ return pixel_values
794
+
795
+ def forward_loss(self, pixel_values, pred, mask):
796
+ """
797
+ Args:
798
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
799
+ Pixel values.
800
+ pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
801
+ Predicted pixel values.
802
+ mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
803
+ Tensor indicating which patches are masked (1) and which are not (0).
804
+
805
+ Returns:
806
+ `torch.FloatTensor`: Pixel reconstruction loss.
807
+ """
808
+ target = pixel_values
809
+ pred = self.unpatchify(pred)
810
+
811
+ loss = (pred - target) ** 2
812
+
813
+ loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
814
+ return loss
815
+
816
+ @add_start_docstrings_to_model_forward(VIT_GMML_INPUTS_DOCSTRING)
817
+ @replace_return_docstrings(output_type=ViTGMMLForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
818
+ def forward(
819
+ self,
820
+ pixel_values: Optional[torch.FloatTensor] = None,
821
+ noise: Optional[torch.FloatTensor] = None,
822
+ head_mask: Optional[torch.FloatTensor] = None,
823
+ output_attentions: Optional[bool] = None,
824
+ output_hidden_states: Optional[bool] = None,
825
+ return_dict: Optional[bool] = None,
826
+ ) -> Union[Tuple, ViTGMMLForPreTrainingOutput]:
827
+ r"""
828
+ Returns:
829
+
830
+ Examples:
831
+
832
+ ```python
833
+ >>> from transformers import AutoImageProcessor, ViTGMMLForPreTraining
834
+ >>> from PIL import Image
835
+ >>> import requests
836
+
837
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
838
+ >>> image = Image.open(requests.get(url, stream=True).raw)
839
+
840
+ >>> image_processor = AutoImageProcessor.from_pretrained("erow/vit-gmml-base")
841
+ >>> model = ViTGMMLForPreTraining.from_pretrained("erow/vit-gmml-base")
842
+
843
+ >>> inputs = image_processor(images=image, return_tensors="pt")
844
+ >>> outputs = model(**inputs)
845
+ >>> loss = outputs.loss
846
+ >>> mask = outputs.mask
847
+ >>> ids_restore = outputs.ids_restore
848
+ ```"""
849
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
850
+
851
+ outputs = self.vit(
852
+ pixel_values,
853
+ head_mask=head_mask,
854
+ output_attentions=output_attentions,
855
+ output_hidden_states=output_hidden_states,
856
+ return_dict=return_dict,
857
+ )
858
+
859
+ latent = outputs.last_hidden_state
860
+
861
+ logits = self.decoder(latent) # shape (batch_size, num_patches, patch_size*patch_size*num_channels)
862
+
863
+ loss = self.forward_loss(pixel_values, logits, noise)
864
+
865
+ if not return_dict:
866
+ output = (logits, ) + outputs[2:]
867
+ return ((loss,) + output) if loss is not None else output
868
+
869
+ return ViTGMMLForPreTrainingOutput(
870
+ loss=loss,
871
+ logits=logits,
872
+ noise=noise,
873
+ hidden_states=outputs.hidden_states,
874
+ attentions=outputs.attentions,
875
+ )
876
+
877
+
878
+ if __name__=="__main__":
879
+
880
+ # Initializing a ViT MAE vit-mae-base style configuration
881
+ configuration = ViTGMMLConfig()
882
+
883
+ # Initializing a model (with random weights) from the vit-mae-base style configuration
884
+ model = ViTGMMLModel(configuration)
885
+
886
+ # Accessing the model configuration
887
+ configuration = model.config
888
+
889
+ x = torch.randn(1, 3, 224, 224)
890
+ output = model(x)