justinpinkney valhalla commited on
Commit
a430b6b
1 Parent(s): 2ddbd90

use-clip-vision-head (#4)

Browse files

- use only vision model (e1f3a0855d0e0cf9eca99de331597226797ad902)


Co-authored-by: Suraj Patil <[email protected]>

feature_extractor/preprocessor_config.json CHANGED
@@ -1,8 +1,12 @@
1
  {
2
- "crop_size": 224,
 
 
 
3
  "do_center_crop": true,
4
  "do_convert_rgb": true,
5
  "do_normalize": true,
 
6
  "do_resize": true,
7
  "feature_extractor_type": "CLIPFeatureExtractor",
8
  "image_mean": [
@@ -10,11 +14,15 @@
10
  0.4578275,
11
  0.40821073
12
  ],
 
13
  "image_std": [
14
  0.26862954,
15
  0.26130258,
16
  0.27577711
17
  ],
18
  "resample": 3,
19
- "size": 224
 
 
 
20
  }
 
1
  {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
  "do_center_crop": true,
7
  "do_convert_rgb": true,
8
  "do_normalize": true,
9
+ "do_rescale": true,
10
  "do_resize": true,
11
  "feature_extractor_type": "CLIPFeatureExtractor",
12
  "image_mean": [
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
21
  0.27577711
22
  ],
23
  "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
  }
image_encoder/config.json CHANGED
@@ -1,174 +1,23 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
- "CLIPModel"
5
  ],
 
 
 
 
 
6
  "initializer_factor": 1.0,
7
- "logit_scale_init_value": 2.6592,
8
- "model_type": "clip",
 
 
 
 
 
 
9
  "projection_dim": 768,
10
- "text_config": {
11
- "_name_or_path": "",
12
- "add_cross_attention": false,
13
- "architectures": null,
14
- "attention_dropout": 0.0,
15
- "bad_words_ids": null,
16
- "bos_token_id": 0,
17
- "chunk_size_feed_forward": 0,
18
- "cross_attention_hidden_size": null,
19
- "decoder_start_token_id": null,
20
- "diversity_penalty": 0.0,
21
- "do_sample": false,
22
- "dropout": 0.0,
23
- "early_stopping": false,
24
- "encoder_no_repeat_ngram_size": 0,
25
- "eos_token_id": 2,
26
- "exponential_decay_length_penalty": null,
27
- "finetuning_task": null,
28
- "forced_bos_token_id": null,
29
- "forced_eos_token_id": null,
30
- "hidden_act": "quick_gelu",
31
- "hidden_size": 768,
32
- "id2label": {
33
- "0": "LABEL_0",
34
- "1": "LABEL_1"
35
- },
36
- "initializer_factor": 1.0,
37
- "initializer_range": 0.02,
38
- "intermediate_size": 3072,
39
- "is_decoder": false,
40
- "is_encoder_decoder": false,
41
- "label2id": {
42
- "LABEL_0": 0,
43
- "LABEL_1": 1
44
- },
45
- "layer_norm_eps": 1e-05,
46
- "length_penalty": 1.0,
47
- "max_length": 20,
48
- "max_position_embeddings": 77,
49
- "min_length": 0,
50
- "model_type": "clip_text_model",
51
- "no_repeat_ngram_size": 0,
52
- "num_attention_heads": 12,
53
- "num_beam_groups": 1,
54
- "num_beams": 1,
55
- "num_hidden_layers": 12,
56
- "num_return_sequences": 1,
57
- "output_attentions": false,
58
- "output_hidden_states": false,
59
- "output_scores": false,
60
- "pad_token_id": 1,
61
- "prefix": null,
62
- "problem_type": null,
63
- "pruned_heads": {},
64
- "remove_invalid_values": false,
65
- "repetition_penalty": 1.0,
66
- "return_dict": true,
67
- "return_dict_in_generate": false,
68
- "sep_token_id": null,
69
- "task_specific_params": null,
70
- "temperature": 1.0,
71
- "tf_legacy_loss": false,
72
- "tie_encoder_decoder": false,
73
- "tie_word_embeddings": true,
74
- "tokenizer_class": null,
75
- "top_k": 50,
76
- "top_p": 1.0,
77
- "torch_dtype": null,
78
- "torchscript": false,
79
- "transformers_version": "4.21.3",
80
- "typical_p": 1.0,
81
- "use_bfloat16": false,
82
- "vocab_size": 49408
83
- },
84
- "text_config_dict": {
85
- "hidden_size": 768,
86
- "intermediate_size": 3072,
87
- "num_attention_heads": 12,
88
- "num_hidden_layers": 12
89
- },
90
  "torch_dtype": "float32",
91
- "transformers_version": null,
92
- "vision_config": {
93
- "_name_or_path": "",
94
- "add_cross_attention": false,
95
- "architectures": null,
96
- "attention_dropout": 0.0,
97
- "bad_words_ids": null,
98
- "bos_token_id": null,
99
- "chunk_size_feed_forward": 0,
100
- "cross_attention_hidden_size": null,
101
- "decoder_start_token_id": null,
102
- "diversity_penalty": 0.0,
103
- "do_sample": false,
104
- "dropout": 0.0,
105
- "early_stopping": false,
106
- "encoder_no_repeat_ngram_size": 0,
107
- "eos_token_id": null,
108
- "exponential_decay_length_penalty": null,
109
- "finetuning_task": null,
110
- "forced_bos_token_id": null,
111
- "forced_eos_token_id": null,
112
- "hidden_act": "quick_gelu",
113
- "hidden_size": 1024,
114
- "id2label": {
115
- "0": "LABEL_0",
116
- "1": "LABEL_1"
117
- },
118
- "image_size": 224,
119
- "initializer_factor": 1.0,
120
- "initializer_range": 0.02,
121
- "intermediate_size": 4096,
122
- "is_decoder": false,
123
- "is_encoder_decoder": false,
124
- "label2id": {
125
- "LABEL_0": 0,
126
- "LABEL_1": 1
127
- },
128
- "layer_norm_eps": 1e-05,
129
- "length_penalty": 1.0,
130
- "max_length": 20,
131
- "min_length": 0,
132
- "model_type": "clip_vision_model",
133
- "no_repeat_ngram_size": 0,
134
- "num_attention_heads": 16,
135
- "num_beam_groups": 1,
136
- "num_beams": 1,
137
- "num_channels": 3,
138
- "num_hidden_layers": 24,
139
- "num_return_sequences": 1,
140
- "output_attentions": false,
141
- "output_hidden_states": false,
142
- "output_scores": false,
143
- "pad_token_id": null,
144
- "patch_size": 14,
145
- "prefix": null,
146
- "problem_type": null,
147
- "pruned_heads": {},
148
- "remove_invalid_values": false,
149
- "repetition_penalty": 1.0,
150
- "return_dict": true,
151
- "return_dict_in_generate": false,
152
- "sep_token_id": null,
153
- "task_specific_params": null,
154
- "temperature": 1.0,
155
- "tf_legacy_loss": false,
156
- "tie_encoder_decoder": false,
157
- "tie_word_embeddings": true,
158
- "tokenizer_class": null,
159
- "top_k": 50,
160
- "top_p": 1.0,
161
- "torch_dtype": null,
162
- "torchscript": false,
163
- "transformers_version": "4.21.3",
164
- "typical_p": 1.0,
165
- "use_bfloat16": false
166
- },
167
- "vision_config_dict": {
168
- "hidden_size": 1024,
169
- "intermediate_size": 4096,
170
- "num_attention_heads": 16,
171
- "num_hidden_layers": 24,
172
- "patch_size": 14
173
- }
174
  }
 
1
  {
2
+ "_name_or_path": "./lambdalabs/sd-image-variations-diffusers/image_encoder",
3
  "architectures": [
4
+ "CLIPVisionModelWithProjection"
5
  ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "quick_gelu",
9
+ "hidden_size": 1024,
10
+ "image_size": 224,
11
  "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 16,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 24,
19
+ "patch_size": 14,
20
  "projection_dim": 768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  "torch_dtype": "float32",
22
+ "transformers_version": "4.25.0.dev0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  }
image_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e33cd4b4a78c8a2f4b361665db761ab9971705bbd27706ee9299889dffd586ef
3
- size 1710660257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d2aa29b5fdf64f3ad4f45fb4227ea98bc45156bbae673b85be1af7783dbabb
3
+ size 1215993967
model_index.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "_class_name": "StableDiffusionImageEmbedPipeline",
3
- "_diffusers_version": "0.4.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
- "CLIPFeatureExtractor"
7
  ],
8
  "image_encoder": [
9
  "transformers",
10
- "CLIPModel"
11
  ],
12
  "safety_checker": [
13
  "stable_diffusion",
 
1
  {
2
+ "_class_name": "StableDiffusionImageVariationPipeline",
3
+ "_diffusers_version": "0.8.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
+ "CLIPImageProcessor"
7
  ],
8
  "image_encoder": [
9
  "transformers",
10
+ "CLIPVisionModelWithProjection"
11
  ],
12
  "safety_checker": [
13
  "stable_diffusion",
safety_checker/config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
 
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
@@ -13,6 +14,7 @@
13
  "architectures": null,
14
  "attention_dropout": 0.0,
15
  "bad_words_ids": null,
 
16
  "bos_token_id": 0,
17
  "chunk_size_feed_forward": 0,
18
  "cross_attention_hidden_size": null,
@@ -60,12 +62,14 @@
60
  "pad_token_id": 1,
61
  "prefix": null,
62
  "problem_type": null,
 
63
  "pruned_heads": {},
64
  "remove_invalid_values": false,
65
  "repetition_penalty": 1.0,
66
  "return_dict": true,
67
  "return_dict_in_generate": false,
68
  "sep_token_id": null,
 
69
  "task_specific_params": null,
70
  "temperature": 1.0,
71
  "tf_legacy_loss": false,
@@ -76,7 +80,7 @@
76
  "top_p": 1.0,
77
  "torch_dtype": null,
78
  "torchscript": false,
79
- "transformers_version": "4.21.3",
80
  "typical_p": 1.0,
81
  "use_bfloat16": false,
82
  "vocab_size": 49408
@@ -95,6 +99,7 @@
95
  "architectures": null,
96
  "attention_dropout": 0.0,
97
  "bad_words_ids": null,
 
98
  "bos_token_id": null,
99
  "chunk_size_feed_forward": 0,
100
  "cross_attention_hidden_size": null,
@@ -144,12 +149,14 @@
144
  "patch_size": 14,
145
  "prefix": null,
146
  "problem_type": null,
 
147
  "pruned_heads": {},
148
  "remove_invalid_values": false,
149
  "repetition_penalty": 1.0,
150
  "return_dict": true,
151
  "return_dict_in_generate": false,
152
  "sep_token_id": null,
 
153
  "task_specific_params": null,
154
  "temperature": 1.0,
155
  "tf_legacy_loss": false,
@@ -160,7 +167,7 @@
160
  "top_p": 1.0,
161
  "torch_dtype": null,
162
  "torchscript": false,
163
- "transformers_version": "4.21.3",
164
  "typical_p": 1.0,
165
  "use_bfloat16": false
166
  },
 
1
  {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "./lambdalabs/sd-image-variations-diffusers/safety_checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
 
14
  "architectures": null,
15
  "attention_dropout": 0.0,
16
  "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
  "bos_token_id": 0,
19
  "chunk_size_feed_forward": 0,
20
  "cross_attention_hidden_size": null,
 
62
  "pad_token_id": 1,
63
  "prefix": null,
64
  "problem_type": null,
65
+ "projection_dim": 512,
66
  "pruned_heads": {},
67
  "remove_invalid_values": false,
68
  "repetition_penalty": 1.0,
69
  "return_dict": true,
70
  "return_dict_in_generate": false,
71
  "sep_token_id": null,
72
+ "suppress_tokens": null,
73
  "task_specific_params": null,
74
  "temperature": 1.0,
75
  "tf_legacy_loss": false,
 
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
+ "transformers_version": "4.25.0.dev0",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
 
99
  "architectures": null,
100
  "attention_dropout": 0.0,
101
  "bad_words_ids": null,
102
+ "begin_suppress_tokens": null,
103
  "bos_token_id": null,
104
  "chunk_size_feed_forward": 0,
105
  "cross_attention_hidden_size": null,
 
149
  "patch_size": 14,
150
  "prefix": null,
151
  "problem_type": null,
152
+ "projection_dim": 512,
153
  "pruned_heads": {},
154
  "remove_invalid_values": false,
155
  "repetition_penalty": 1.0,
156
  "return_dict": true,
157
  "return_dict_in_generate": false,
158
  "sep_token_id": null,
159
+ "suppress_tokens": null,
160
  "task_specific_params": null,
161
  "temperature": 1.0,
162
  "tf_legacy_loss": false,
 
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
+ "transformers_version": "4.25.0.dev0",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
scheduler/scheduler_config.json CHANGED
@@ -1,11 +1,12 @@
1
  {
2
  "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.4.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "num_train_timesteps": 1000,
 
8
  "skip_prk_steps": true,
9
- "trained_betas": null,
10
- "steps_offset": 1
11
  }
 
1
  {
2
  "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.8.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "num_train_timesteps": 1000,
8
+ "set_alpha_to_one": false,
9
  "skip_prk_steps": true,
10
+ "steps_offset": 1,
11
+ "trained_betas": null
12
  }
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.4.0.dev0",
 
4
  "act_fn": "silu",
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "_name_or_path": "./lambdalabs/sd-image-variations-diffusers/unet",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.4.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -17,6 +18,7 @@
17
  "in_channels": 3,
18
  "latent_channels": 4,
19
  "layers_per_block": 2,
 
20
  "out_channels": 3,
21
  "sample_size": 512,
22
  "up_block_types": [
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "_name_or_path": "./lambdalabs/sd-image-variations-diffusers/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
18
  "in_channels": 3,
19
  "latent_channels": 4,
20
  "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
  "out_channels": 3,
23
  "sample_size": 512,
24
  "up_block_types": [