anicolson commited on
Commit
e0e245b
·
verified ·
1 Parent(s): b15910e

Upload model

Browse files
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_commit_hash": null,
3
  "architectures": [
4
  "SingleCXREncoderDecoderModel"
5
  ],
@@ -78,7 +77,6 @@
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
- "transformers_version": "4.31.0",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
@@ -2243,7 +2241,6 @@
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
- "transformers_version": "4.31.0",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
@@ -2251,5 +2248,5 @@
2251
  "model_type": "vision-encoder-decoder",
2252
  "tie_word_embeddings": false,
2253
  "torch_dtype": "float32",
2254
- "transformers_version": null
2255
  }
 
1
  {
 
2
  "architectures": [
3
  "SingleCXREncoderDecoderModel"
4
  ],
 
77
  "top_p": 1.0,
78
  "torch_dtype": null,
79
  "torchscript": false,
 
80
  "type_vocab_size": 2,
81
  "typical_p": 1.0,
82
  "use_bfloat16": false,
 
2241
  "top_p": 1.0,
2242
  "torch_dtype": "float32",
2243
  "torchscript": false,
 
2244
  "typical_p": 1.0,
2245
  "use_bfloat16": false
2246
  },
 
2248
  "model_type": "vision-encoder-decoder",
2249
  "tie_word_embeddings": false,
2250
  "torch_dtype": "float32",
2251
+ "transformers_version": "4.36.2"
2252
  }
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.31.0"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.36.2"
5
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:403d5035a0416014f4eb226ee57b5ec3ed79911e144b22ac7ceba34c64059370
3
+ size 449521072
modelling_single.py CHANGED
@@ -6,7 +6,7 @@ import transformers
6
  from torch.nn import CrossEntropyLoss
7
  from transformers import PreTrainedTokenizerFast, VisionEncoderDecoderModel
8
  from transformers.configuration_utils import PretrainedConfig
9
- from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
10
  from transformers.modeling_utils import PreTrainedModel
11
  from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
12
  VisionEncoderDecoderConfig
@@ -21,10 +21,6 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
21
  self.projection_size = projection_size
22
 
23
 
24
- class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
25
- last_hidden_state: torch.FloatTensor
26
-
27
-
28
  class CvtProjectionHead(torch.nn.Module):
29
 
30
  def __init__(self, config) -> None:
@@ -58,7 +54,7 @@ class CvtWithProjectionHead(transformers.CvtPreTrainedModel):
58
  pixel_values: Optional[torch.Tensor] = None,
59
  output_hidden_states: Optional[bool] = None,
60
  return_dict: Optional[bool] = None,
61
- ) -> Union[Tuple, ModelOutputWithProjectionEmbedding]:
62
 
63
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
64
 
@@ -75,7 +71,7 @@ class CvtWithProjectionHead(transformers.CvtPreTrainedModel):
75
  if not return_dict:
76
  return projection
77
 
78
- return ModelOutputWithProjectionEmbedding(
79
  last_hidden_state=projection,
80
  )
81
 
 
6
  from torch.nn import CrossEntropyLoss
7
  from transformers import PreTrainedTokenizerFast, VisionEncoderDecoderModel
8
  from transformers.configuration_utils import PretrainedConfig
9
+ from transformers.modeling_outputs import BaseModelOutput, ModelOutput, Seq2SeqLMOutput
10
  from transformers.modeling_utils import PreTrainedModel
11
  from transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder import \
12
  VisionEncoderDecoderConfig
 
21
  self.projection_size = projection_size
22
 
23
 
 
 
 
 
24
  class CvtProjectionHead(torch.nn.Module):
25
 
26
  def __init__(self, config) -> None:
 
54
  pixel_values: Optional[torch.Tensor] = None,
55
  output_hidden_states: Optional[bool] = None,
56
  return_dict: Optional[bool] = None,
57
+ ) -> Union[Tuple, ModelOutput]:
58
 
59
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
60
 
 
71
  if not return_dict:
72
  return projection
73
 
74
+ return ModelOutput(
75
  last_hidden_state=projection,
76
  )
77