Commit
•
e1f717d
0
Parent(s):
Duplicate from nota-ai/bk-sdm-base
Browse filesCo-authored-by: Bo-Kyeong Kim <[email protected]>
- .gitattributes +35 -0
- README.md +215 -0
- feature_extractor/preprocessor_config.json +20 -0
- model_index.json +32 -0
- safety_checker/config.json +171 -0
- safety_checker/model.fp16.safetensors +3 -0
- safety_checker/model.safetensors +3 -0
- safety_checker/pytorch_model.bin +3 -0
- safety_checker/pytorch_model.fp16.bin +3 -0
- scheduler/.ipynb_checkpoints/scheduler_config-checkpoint.json +9 -0
- scheduler/scheduler_config.json +13 -0
- text_encoder/config.json +24 -0
- text_encoder/model.fp16.safetensors +3 -0
- text_encoder/model.safetensors +3 -0
- text_encoder/pytorch_model.bin +3 -0
- text_encoder/pytorch_model.fp16.bin +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +24 -0
- tokenizer/tokenizer_config.json +34 -0
- tokenizer/vocab.json +0 -0
- unet/config.json +58 -0
- unet/diffusion_pytorch_model.bin +3 -0
- unet/diffusion_pytorch_model.safetensors +3 -0
- vae/config.json +29 -0
- vae/diffusion_pytorch_model.bin +3 -0
- vae/diffusion_pytorch_model.fp16.bin +3 -0
- vae/diffusion_pytorch_model.fp16.safetensors +3 -0
- vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: creativeml-openrail-m
|
3 |
+
tags:
|
4 |
+
- stable-diffusion
|
5 |
+
- stable-diffusion-diffusers
|
6 |
+
- text-to-image
|
7 |
+
library_name: diffusers
|
8 |
+
pipeline_tag: text-to-image
|
9 |
+
extra_gated_prompt: >-
|
10 |
+
This model is open access and available to all, with a CreativeML OpenRAIL-M
|
11 |
+
license further specifying rights and usage.
|
12 |
+
|
13 |
+
The CreativeML OpenRAIL License specifies:
|
14 |
+
|
15 |
+
|
16 |
+
1. You can't use the model to deliberately produce nor share illegal or
|
17 |
+
harmful outputs or content
|
18 |
+
|
19 |
+
2. The authors claim no rights on the outputs you generate, you are free to
|
20 |
+
use them and are accountable for their use which must not go against the
|
21 |
+
provisions set in the license
|
22 |
+
|
23 |
+
3. You may re-distribute the weights and use the model commercially and/or as
|
24 |
+
a service. If you do, please be aware you have to include the same use
|
25 |
+
restrictions as the ones in the license and share a copy of the CreativeML
|
26 |
+
OpenRAIL-M to all your users (please read the license entirely and carefully)
|
27 |
+
|
28 |
+
Please read the full license carefully here:
|
29 |
+
https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
30 |
+
|
31 |
+
extra_gated_heading: Please read the LICENSE to access this model
|
32 |
+
duplicated_from: nota-ai/bk-sdm-base
|
33 |
+
---
|
34 |
+
|
35 |
+
# BK-SDM Model Card
|
36 |
+
Block-removed Knowledge-distilled Stable Diffusion Model (BK-SDM) is an architecturally compressed SDM for efficient general-purpose text-to-image synthesis. This model is bulit with (i) removing several residual and attention blocks from the U-Net of [Stable Diffusion v1.4]( https://huggingface.co/CompVis/stable-diffusion-v1-4) and (ii) distillation pretraining on only 0.22M LAION pairs (fewer than 0.1% of the full training set). Despite being trained with very limited resources, our compact model can imitate the original SDM by benefiting from transferred knowledge.
|
37 |
+
- **Resources for more information**: [Paper](https://arxiv.org/abs/2305.15798), [Demo]( https://huggingface.co/spaces/nota-ai/compressed-stable-diffusion).
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
## Examples with 🤗[Diffusers library](https://github.com/huggingface/diffusers).
|
43 |
+
|
44 |
+
An inference code with the default PNDM scheduler and 50 denoising steps is as follows.
|
45 |
+
|
46 |
+
```python
|
47 |
+
import torch
|
48 |
+
from diffusers import StableDiffusionPipeline
|
49 |
+
|
50 |
+
pipe = StableDiffusionPipeline.from_pretrained("nota-ai/bk-sdm-base", torch_dtype=torch.float16)
|
51 |
+
pipe = pipe.to("cuda")
|
52 |
+
|
53 |
+
prompt = "a tropical bird sitting on a branch of a tree"
|
54 |
+
image = pipe(prompt).images[0]
|
55 |
+
|
56 |
+
image.save("example.png")
|
57 |
+
```
|
58 |
+
|
59 |
+
The following code is also runnable, because we compressed the U-Net of [Stable Diffusion v1.4]( https://huggingface.co/CompVis/stable-diffusion-v1-4) while keeping the other parts (i.e., Text Encoder and Image Decoder) unchanged:
|
60 |
+
|
61 |
+
```python
|
62 |
+
import torch
|
63 |
+
from diffusers import StableDiffusionPipeline, UNet2DConditionModel
|
64 |
+
|
65 |
+
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
66 |
+
pipe.unet = UNet2DConditionModel.from_pretrained("nota-ai/bk-sdm-base", subfolder="unet", torch_dtype=torch.float16)
|
67 |
+
pipe = pipe.to("cuda")
|
68 |
+
|
69 |
+
prompt = "a tropical bird sitting on a branch of a tree"
|
70 |
+
image = pipe(prompt).images[0]
|
71 |
+
|
72 |
+
image.save("example.png")
|
73 |
+
```
|
74 |
+
|
75 |
+
The above examples have been tested on a single NVIDIA GeForce RTX 3090 GPU with the following versions:
|
76 |
+
|
77 |
+
```
|
78 |
+
torch 1.13.1+cu117
|
79 |
+
transformers 4.29.2
|
80 |
+
diffusers 0.15.0
|
81 |
+
```
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
## Compression Method
|
86 |
+
|
87 |
+
### U-Net Architecture
|
88 |
+
We removed several residual and attention blocks from the 0.86B-parameter U-Net in the 1.04B-param SDM-v1.4, and our compressed models are summarized as follows.
|
89 |
+
- 0.76B-param **BK-SDM-Base** (0.58B-param U-Net): obtained with ① fewer blocks in outer stages.
|
90 |
+
- 0.66B-param **BK-SDM-Small** (0.49B-param U-Net): obtained with ① and ② mid-stage removal.
|
91 |
+
- 0.50B-param **BK-SDM-Tiny** (0.33B-param U-Net): obtained with ①, ②, and ③ further inner-stage removal.
|
92 |
+
|
93 |
+
|
94 |
+
### Distillation Pretraining
|
95 |
+
The compact U-Net was trained to mimic the behavior of the original U-Net. We leveraged feature-level and output-level distillation, along with the denoising task loss.
|
96 |
+
|
97 |
+
|
98 |
+
<center>
|
99 |
+
<img alt="U-Net architectures and KD-based pretraining" img src="https://huggingface.co/spaces/nota-ai/compressed-stable-diffusion/resolve/e6fb31631f0b2948cf6ec54006ea050d6c83e940/docs/fig_model.png" width="100%">
|
100 |
+
</center>
|
101 |
+
|
102 |
+
|
103 |
+
<br/>
|
104 |
+
|
105 |
+
- **Training Data**: 212,776 image-text pairs (i.e., 0.22M pairs) from [LAION-Aesthetics V2 6.5+](https://laion.ai/blog/laion-aesthetics/).
|
106 |
+
- **Hardware:** A single NVIDIA A100 80GB GPU
|
107 |
+
- **Gradient Accumulations**: 4
|
108 |
+
- **Batch:** 256 (=4×64)
|
109 |
+
- **Optimizer:** AdamW
|
110 |
+
- **Learning Rate:** a constant learning rate of 5e-5 for 50K-iteration pretraining
|
111 |
+
|
112 |
+
|
113 |
+
## Experimental Results
|
114 |
+
|
115 |
+
The following table shows the zero-shot results on 30K samples from the MS-COCO validation split. After generating 512×512 images with the PNDM scheduler and 25 denoising steps, we downsampled them to 256×256 for evaluating generation scores. Our models were drawn at the 50K-th training iteration.
|
116 |
+
|
117 |
+
| Model | FID↓ | IS↑ | CLIP Score↑<br>(ViT-g/14) | # Params,<br>U-Net | # Params,<br>Whole SDM |
|
118 |
+
|:---:|:---:|:---:|:---:|:---:|:---:|
|
119 |
+
| Stable Diffusion v1.4 | 13.05 | 36.76 | 0.2958 | 0.86B | 1.04B |
|
120 |
+
| BK-SDM-Base (Ours) | 15.76 | 33.79 | 0.2878 | 0.58B | 0.76B |
|
121 |
+
| BK-SDM-Small (Ours) | 16.98 | 31.68 | 0.2677 | 0.49B | 0.66B |
|
122 |
+
| BK-SDM-Tiny (Ours) | 17.12 | 30.09 | 0.2653 | 0.33B | 0.50B |
|
123 |
+
|
124 |
+
<br/>
|
125 |
+
|
126 |
+
The following figure depicts synthesized images with some MS-COCO captions.
|
127 |
+
|
128 |
+
<center>
|
129 |
+
<img alt="Visual results" img src="https://huggingface.co/spaces/nota-ai/compressed-stable-diffusion/resolve/e6fb31631f0b2948cf6ec54006ea050d6c83e940/docs/fig_results.png" width="100%">
|
130 |
+
</center>
|
131 |
+
|
132 |
+
|
133 |
+
<br/>
|
134 |
+
|
135 |
+
|
136 |
+
# Uses
|
137 |
+
_Note: This section is taken from the [Stable Diffusion v1 model card]( https://huggingface.co/CompVis/stable-diffusion-v1-4) (which was based on the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini)) and applies in the same way to BK-SDMs_.
|
138 |
+
|
139 |
+
## Direct Use
|
140 |
+
The model is intended for research purposes only. Possible research areas and tasks include
|
141 |
+
- Safe deployment of models which have the potential to generate harmful content.
|
142 |
+
- Probing and understanding the limitations and biases of generative models.
|
143 |
+
- Generation of artworks and use in design and other artistic processes.
|
144 |
+
- Applications in educational or creative tools.
|
145 |
+
- Research on generative models.
|
146 |
+
|
147 |
+
Excluded uses are described below.
|
148 |
+
|
149 |
+
### Misuse, Malicious Use, and Out-of-Scope Use
|
150 |
+
The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
|
151 |
+
|
152 |
+
#### Out-of-Scope Use
|
153 |
+
The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
|
154 |
+
|
155 |
+
#### Misuse and Malicious Use
|
156 |
+
Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
|
157 |
+
|
158 |
+
- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
|
159 |
+
- Intentionally promoting or propagating discriminatory content or harmful stereotypes.
|
160 |
+
- Impersonating individuals without their consent.
|
161 |
+
- Sexual content without consent of the people who might see it.
|
162 |
+
- Mis- and disinformation
|
163 |
+
- Representations of egregious violence and gore
|
164 |
+
- Sharing of copyrighted or licensed material in violation of its terms of use.
|
165 |
+
- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
|
166 |
+
|
167 |
+
## Limitations and Bias
|
168 |
+
|
169 |
+
### Limitations
|
170 |
+
|
171 |
+
- The model does not achieve perfect photorealism
|
172 |
+
- The model cannot render legible text
|
173 |
+
- The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
|
174 |
+
- Faces and people in general may not be generated properly.
|
175 |
+
- The model was trained mainly with English captions and will not work as well in other languages.
|
176 |
+
- The autoencoding part of the model is lossy
|
177 |
+
- The model was trained on a large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material and is not fit for product use without additional safety mechanisms and considerations.
|
178 |
+
- No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data. The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images.
|
179 |
+
|
180 |
+
### Bias
|
181 |
+
|
182 |
+
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are primarily limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
|
183 |
+
|
184 |
+
### Safety Module
|
185 |
+
|
186 |
+
The intended use of this model is with the [Safety Checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) in Diffusers. This checker works by checking model outputs against known hard-coded NSFW concepts. The concepts are intentionally hidden to reduce the likelihood of reverse-engineering this filter. Specifically, the checker compares the class probability of harmful concepts in the embedding space of the `CLIPTextModel` *after generation* of the images. The concepts are passed into the model with the generated image and compared to a hand-engineered weight for each NSFW concept.
|
187 |
+
|
188 |
+
|
189 |
+
# Acknowledgments
|
190 |
+
- We express our gratitude to [Microsoft for Startups Founders Hub](https://www.microsoft.com/en-us/startups) for generously providing the Azure credits used during pretraining.
|
191 |
+
- We deeply appreciate the pioneering research on Latent/Stable Diffusion conducted by [CompVis](https://github.com/CompVis/latent-diffusion) and [Runway](https://runwayml.com/).
|
192 |
+
- Special thanks to the contributors to [Diffusers](https://github.com/huggingface/diffusers) for their valuable support.
|
193 |
+
|
194 |
+
|
195 |
+
# Citation
|
196 |
+
```bibtex
|
197 |
+
@article{kim2023architectural,
|
198 |
+
title={On Architectural Compression of Text-to-Image Diffusion Models},
|
199 |
+
author={Kim, Bo-Kyeong and Song, Hyoung-Kyu and Castells, Thibault and Choi, Shinkook},
|
200 |
+
journal={arXiv preprint arXiv:2305.15798},
|
201 |
+
year={2023},
|
202 |
+
url={https://arxiv.org/abs/2305.15798}
|
203 |
+
}
|
204 |
+
```
|
205 |
+
```bibtex
|
206 |
+
@article{Kim_2023_ICMLW,
|
207 |
+
title={BK-SDM: Architecturally Compressed Stable Diffusion for Efficient Text-to-Image Generation},
|
208 |
+
author={Kim, Bo-Kyeong and Song, Hyoung-Kyu and Castells, Thibault and Choi, Shinkook},
|
209 |
+
journal={ICML Workshop on Efficient Systems for Foundation Models (ES-FoMo)},
|
210 |
+
year={2023},
|
211 |
+
url={https://openreview.net/forum?id=bOVydU0XKC}
|
212 |
+
}
|
213 |
+
```
|
214 |
+
|
215 |
+
*This model card was written by Bo-Kyeong Kim and is based on the [Stable Diffusion v1 model card]( https://huggingface.co/CompVis/stable-diffusion-v1-4).*
|
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": 224,
|
3 |
+
"do_center_crop": true,
|
4 |
+
"do_convert_rgb": true,
|
5 |
+
"do_normalize": true,
|
6 |
+
"do_resize": true,
|
7 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
8 |
+
"image_mean": [
|
9 |
+
0.48145466,
|
10 |
+
0.4578275,
|
11 |
+
0.40821073
|
12 |
+
],
|
13 |
+
"image_std": [
|
14 |
+
0.26862954,
|
15 |
+
0.26130258,
|
16 |
+
0.27577711
|
17 |
+
],
|
18 |
+
"resample": 3,
|
19 |
+
"size": 224
|
20 |
+
}
|
model_index.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.2.2",
|
4 |
+
"feature_extractor": [
|
5 |
+
"transformers",
|
6 |
+
"CLIPImageProcessor"
|
7 |
+
],
|
8 |
+
"safety_checker": [
|
9 |
+
"stable_diffusion",
|
10 |
+
"StableDiffusionSafetyChecker"
|
11 |
+
],
|
12 |
+
"scheduler": [
|
13 |
+
"diffusers",
|
14 |
+
"PNDMScheduler"
|
15 |
+
],
|
16 |
+
"text_encoder": [
|
17 |
+
"transformers",
|
18 |
+
"CLIPTextModel"
|
19 |
+
],
|
20 |
+
"tokenizer": [
|
21 |
+
"transformers",
|
22 |
+
"CLIPTokenizer"
|
23 |
+
],
|
24 |
+
"unet": [
|
25 |
+
"diffusers",
|
26 |
+
"UNet2DConditionModel"
|
27 |
+
],
|
28 |
+
"vae": [
|
29 |
+
"diffusers",
|
30 |
+
"AutoencoderKL"
|
31 |
+
]
|
32 |
+
}
|
safety_checker/config.json
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "./safety_module",
|
3 |
+
"architectures": [
|
4 |
+
"StableDiffusionSafetyChecker"
|
5 |
+
],
|
6 |
+
"initializer_factor": 1.0,
|
7 |
+
"logit_scale_init_value": 2.6592,
|
8 |
+
"model_type": "clip",
|
9 |
+
"projection_dim": 768,
|
10 |
+
"text_config": {
|
11 |
+
"_name_or_path": "",
|
12 |
+
"add_cross_attention": false,
|
13 |
+
"architectures": null,
|
14 |
+
"attention_dropout": 0.0,
|
15 |
+
"bad_words_ids": null,
|
16 |
+
"bos_token_id": 0,
|
17 |
+
"chunk_size_feed_forward": 0,
|
18 |
+
"cross_attention_hidden_size": null,
|
19 |
+
"decoder_start_token_id": null,
|
20 |
+
"diversity_penalty": 0.0,
|
21 |
+
"do_sample": false,
|
22 |
+
"dropout": 0.0,
|
23 |
+
"early_stopping": false,
|
24 |
+
"encoder_no_repeat_ngram_size": 0,
|
25 |
+
"eos_token_id": 2,
|
26 |
+
"exponential_decay_length_penalty": null,
|
27 |
+
"finetuning_task": null,
|
28 |
+
"forced_bos_token_id": null,
|
29 |
+
"forced_eos_token_id": null,
|
30 |
+
"hidden_act": "quick_gelu",
|
31 |
+
"hidden_size": 768,
|
32 |
+
"id2label": {
|
33 |
+
"0": "LABEL_0",
|
34 |
+
"1": "LABEL_1"
|
35 |
+
},
|
36 |
+
"initializer_factor": 1.0,
|
37 |
+
"initializer_range": 0.02,
|
38 |
+
"intermediate_size": 3072,
|
39 |
+
"is_decoder": false,
|
40 |
+
"is_encoder_decoder": false,
|
41 |
+
"label2id": {
|
42 |
+
"LABEL_0": 0,
|
43 |
+
"LABEL_1": 1
|
44 |
+
},
|
45 |
+
"layer_norm_eps": 1e-05,
|
46 |
+
"length_penalty": 1.0,
|
47 |
+
"max_length": 20,
|
48 |
+
"max_position_embeddings": 77,
|
49 |
+
"min_length": 0,
|
50 |
+
"model_type": "clip_text_model",
|
51 |
+
"no_repeat_ngram_size": 0,
|
52 |
+
"num_attention_heads": 12,
|
53 |
+
"num_beam_groups": 1,
|
54 |
+
"num_beams": 1,
|
55 |
+
"num_hidden_layers": 12,
|
56 |
+
"num_return_sequences": 1,
|
57 |
+
"output_attentions": false,
|
58 |
+
"output_hidden_states": false,
|
59 |
+
"output_scores": false,
|
60 |
+
"pad_token_id": 1,
|
61 |
+
"prefix": null,
|
62 |
+
"problem_type": null,
|
63 |
+
"pruned_heads": {},
|
64 |
+
"remove_invalid_values": false,
|
65 |
+
"repetition_penalty": 1.0,
|
66 |
+
"return_dict": true,
|
67 |
+
"return_dict_in_generate": false,
|
68 |
+
"sep_token_id": null,
|
69 |
+
"task_specific_params": null,
|
70 |
+
"temperature": 1.0,
|
71 |
+
"tie_encoder_decoder": false,
|
72 |
+
"tie_word_embeddings": true,
|
73 |
+
"tokenizer_class": null,
|
74 |
+
"top_k": 50,
|
75 |
+
"top_p": 1.0,
|
76 |
+
"torch_dtype": null,
|
77 |
+
"torchscript": false,
|
78 |
+
"transformers_version": "4.21.0.dev0",
|
79 |
+
"typical_p": 1.0,
|
80 |
+
"use_bfloat16": false,
|
81 |
+
"vocab_size": 49408
|
82 |
+
},
|
83 |
+
"text_config_dict": {
|
84 |
+
"hidden_size": 768,
|
85 |
+
"intermediate_size": 3072,
|
86 |
+
"num_attention_heads": 12,
|
87 |
+
"num_hidden_layers": 12
|
88 |
+
},
|
89 |
+
"torch_dtype": "float32",
|
90 |
+
"transformers_version": null,
|
91 |
+
"vision_config": {
|
92 |
+
"_name_or_path": "",
|
93 |
+
"add_cross_attention": false,
|
94 |
+
"architectures": null,
|
95 |
+
"attention_dropout": 0.0,
|
96 |
+
"bad_words_ids": null,
|
97 |
+
"bos_token_id": null,
|
98 |
+
"chunk_size_feed_forward": 0,
|
99 |
+
"cross_attention_hidden_size": null,
|
100 |
+
"decoder_start_token_id": null,
|
101 |
+
"diversity_penalty": 0.0,
|
102 |
+
"do_sample": false,
|
103 |
+
"dropout": 0.0,
|
104 |
+
"early_stopping": false,
|
105 |
+
"encoder_no_repeat_ngram_size": 0,
|
106 |
+
"eos_token_id": null,
|
107 |
+
"exponential_decay_length_penalty": null,
|
108 |
+
"finetuning_task": null,
|
109 |
+
"forced_bos_token_id": null,
|
110 |
+
"forced_eos_token_id": null,
|
111 |
+
"hidden_act": "quick_gelu",
|
112 |
+
"hidden_size": 1024,
|
113 |
+
"id2label": {
|
114 |
+
"0": "LABEL_0",
|
115 |
+
"1": "LABEL_1"
|
116 |
+
},
|
117 |
+
"image_size": 224,
|
118 |
+
"initializer_factor": 1.0,
|
119 |
+
"initializer_range": 0.02,
|
120 |
+
"intermediate_size": 4096,
|
121 |
+
"is_decoder": false,
|
122 |
+
"is_encoder_decoder": false,
|
123 |
+
"label2id": {
|
124 |
+
"LABEL_0": 0,
|
125 |
+
"LABEL_1": 1
|
126 |
+
},
|
127 |
+
"layer_norm_eps": 1e-05,
|
128 |
+
"length_penalty": 1.0,
|
129 |
+
"max_length": 20,
|
130 |
+
"min_length": 0,
|
131 |
+
"model_type": "clip_vision_model",
|
132 |
+
"no_repeat_ngram_size": 0,
|
133 |
+
"num_attention_heads": 16,
|
134 |
+
"num_beam_groups": 1,
|
135 |
+
"num_beams": 1,
|
136 |
+
"num_hidden_layers": 24,
|
137 |
+
"num_return_sequences": 1,
|
138 |
+
"output_attentions": false,
|
139 |
+
"output_hidden_states": false,
|
140 |
+
"output_scores": false,
|
141 |
+
"pad_token_id": null,
|
142 |
+
"patch_size": 14,
|
143 |
+
"prefix": null,
|
144 |
+
"problem_type": null,
|
145 |
+
"pruned_heads": {},
|
146 |
+
"remove_invalid_values": false,
|
147 |
+
"repetition_penalty": 1.0,
|
148 |
+
"return_dict": true,
|
149 |
+
"return_dict_in_generate": false,
|
150 |
+
"sep_token_id": null,
|
151 |
+
"task_specific_params": null,
|
152 |
+
"temperature": 1.0,
|
153 |
+
"tie_encoder_decoder": false,
|
154 |
+
"tie_word_embeddings": true,
|
155 |
+
"tokenizer_class": null,
|
156 |
+
"top_k": 50,
|
157 |
+
"top_p": 1.0,
|
158 |
+
"torch_dtype": null,
|
159 |
+
"torchscript": false,
|
160 |
+
"transformers_version": "4.21.0.dev0",
|
161 |
+
"typical_p": 1.0,
|
162 |
+
"use_bfloat16": false
|
163 |
+
},
|
164 |
+
"vision_config_dict": {
|
165 |
+
"hidden_size": 1024,
|
166 |
+
"intermediate_size": 4096,
|
167 |
+
"num_attention_heads": 16,
|
168 |
+
"num_hidden_layers": 24,
|
169 |
+
"patch_size": 14
|
170 |
+
}
|
171 |
+
}
|
safety_checker/model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:08902f19b1cfebd7c989f152fc0507bef6898c706a91d666509383122324b511
|
3 |
+
size 608018440
|
safety_checker/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d6a233ff6fd5ccb9f76fd99618d73369c52dd3d8222376384d0e601911089e8
|
3 |
+
size 1215981830
|
safety_checker/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:193490b58ef62739077262e833bf091c66c29488058681ac25cf7df3d8190974
|
3 |
+
size 1216061799
|
safety_checker/pytorch_model.fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22ba87205445ad5def13e54919b038dcfb7321ec1c3f4b12487d4fba6036125f
|
3 |
+
size 608103564
|
scheduler/.ipynb_checkpoints/scheduler_config-checkpoint.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PNDMScheduler",
|
3 |
+
"_diffusers_version": "0.2.2",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"num_train_timesteps": 1000,
|
8 |
+
"skip_prk_steps": true
|
9 |
+
}
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "PNDMScheduler",
|
3 |
+
"_diffusers_version": "0.7.0.dev0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"num_train_timesteps": 1000,
|
8 |
+
"set_alpha_to_one": false,
|
9 |
+
"skip_prk_steps": true,
|
10 |
+
"steps_offset": 1,
|
11 |
+
"trained_betas": null,
|
12 |
+
"clip_sample": false
|
13 |
+
}
|
text_encoder/config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "openai/clip-vit-large-patch14",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.21.0.dev0",
|
23 |
+
"vocab_size": 49408
|
24 |
+
}
|
text_encoder/model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e
|
3 |
+
size 246144864
|
text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b3a12df205cb3c74dd4eae4354d93f606ae6b3bc29d5d06fd97921cb9ad8a81
|
3 |
+
size 492265879
|
text_encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
|
3 |
+
size 492305335
|
text_encoder/pytorch_model.fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05eee911f195625deeab86f0b22b115d7d8bc3adbfc1404f03557f7e4e6a8fd7
|
3 |
+
size 246187076
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"name_or_path": "openai/clip-vit-large-patch14",
|
23 |
+
"pad_token": "<|endoftext|>",
|
24 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
25 |
+
"tokenizer_class": "CLIPTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
unet/config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.15.0",
|
4 |
+
"_name_or_path": "/ssd2/bkkim/sdm_paper_checkpoints/BK-SDM-Base/checkpoint-50000",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"attention_head_dim": 8,
|
7 |
+
"block_out_channels": [
|
8 |
+
320,
|
9 |
+
640,
|
10 |
+
1280,
|
11 |
+
1280
|
12 |
+
],
|
13 |
+
"center_input_sample": false,
|
14 |
+
"class_embed_type": null,
|
15 |
+
"class_embeddings_concat": false,
|
16 |
+
"conv_in_kernel": 3,
|
17 |
+
"conv_out_kernel": 3,
|
18 |
+
"cross_attention_dim": 768,
|
19 |
+
"cross_attention_norm": null,
|
20 |
+
"down_block_types": [
|
21 |
+
"CrossAttnDownBlock2D",
|
22 |
+
"CrossAttnDownBlock2D",
|
23 |
+
"CrossAttnDownBlock2D",
|
24 |
+
"DownBlock2D"
|
25 |
+
],
|
26 |
+
"downsample_padding": 1,
|
27 |
+
"dual_cross_attention": false,
|
28 |
+
"encoder_hid_dim": null,
|
29 |
+
"flip_sin_to_cos": true,
|
30 |
+
"freq_shift": 0,
|
31 |
+
"in_channels": 4,
|
32 |
+
"layers_per_block": 1,
|
33 |
+
"mid_block_only_cross_attention": null,
|
34 |
+
"mid_block_scale_factor": 1,
|
35 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
36 |
+
"norm_eps": 1e-05,
|
37 |
+
"norm_num_groups": 32,
|
38 |
+
"num_class_embeds": null,
|
39 |
+
"only_cross_attention": false,
|
40 |
+
"out_channels": 4,
|
41 |
+
"projection_class_embeddings_input_dim": null,
|
42 |
+
"resnet_out_scale_factor": 1.0,
|
43 |
+
"resnet_skip_time_act": false,
|
44 |
+
"resnet_time_scale_shift": "default",
|
45 |
+
"sample_size": 64,
|
46 |
+
"time_cond_proj_dim": null,
|
47 |
+
"time_embedding_act_fn": null,
|
48 |
+
"time_embedding_type": "positional",
|
49 |
+
"timestep_post_act": null,
|
50 |
+
"up_block_types": [
|
51 |
+
"UpBlock2D",
|
52 |
+
"CrossAttnUpBlock2D",
|
53 |
+
"CrossAttnUpBlock2D",
|
54 |
+
"CrossAttnUpBlock2D"
|
55 |
+
],
|
56 |
+
"upcast_attention": false,
|
57 |
+
"use_linear_projection": false
|
58 |
+
}
|
unet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a374bc04cabffeddc470bc482cbce6932501a2b7a4394f95a015c4e96124323
|
3 |
+
size 2317727757
|
unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2249092ca3c85f70d0ce26577718c0296417bfd9da292dded42957bc232a0b49
|
3 |
+
size 2317593424
|
vae/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.2.2",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"in_channels": 3,
|
18 |
+
"latent_channels": 4,
|
19 |
+
"layers_per_block": 2,
|
20 |
+
"out_channels": 3,
|
21 |
+
"sample_size": 512,
|
22 |
+
"scaling_factor": 0.18215,
|
23 |
+
"up_block_types": [
|
24 |
+
"UpDecoderBlock2D",
|
25 |
+
"UpDecoderBlock2D",
|
26 |
+
"UpDecoderBlock2D",
|
27 |
+
"UpDecoderBlock2D"
|
28 |
+
]
|
29 |
+
}
|
vae/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
|
3 |
+
size 334707217
|
vae/diffusion_pytorch_model.fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7643b3e40b9f128eda5fe174fea73c3ef3903562651fb344a79439709c2e503
|
3 |
+
size 167405651
|
vae/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c
|
3 |
+
size 167335342
|
vae/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c
|
3 |
+
size 334643276
|