Commit
•
239314a
0
Parent(s):
Duplicate from Eugeoter/artiwaifu-diffusion-1.0
Browse filesCo-authored-by: Euge <[email protected]>
- .gitattributes +41 -0
- README.md +231 -0
- artiwaifu-diffusion-v1.safetensors +3 -0
- model_index.json +41 -0
- references/artist.csv +0 -0
- references/character.csv +0 -0
- references/style.csv +23 -0
- scheduler/scheduler_config.json +21 -0
- text_encoder/config.json +24 -0
- text_encoder/model.safetensors +3 -0
- text_encoder_2/config.json +24 -0
- text_encoder_2/model.safetensors +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +24 -0
- tokenizer/tokenizer_config.json +33 -0
- tokenizer/vocab.json +0 -0
- tokenizer_2/merges.txt +0 -0
- tokenizer_2/special_tokens_map.json +24 -0
- tokenizer_2/tokenizer_config.json +33 -0
- tokenizer_2/vocab.json +0 -0
- unet/config.json +72 -0
- unet/diffusion_pytorch_model.safetensors +3 -0
- vae/config.json +33 -0
- vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images/1.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
images/2.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
images/4.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
assets/images/1.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
assets/images/2.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
assets/images/4.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
license_name: faipl-1.0-sd
|
4 |
+
license_link: https://freedevproject.org/faipl-1.0-sd/
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
tags:
|
8 |
+
- text-to-image
|
9 |
+
- stable-diffusion
|
10 |
+
- safetensors
|
11 |
+
- stable-diffusion-xl
|
12 |
+
- diffusers
|
13 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
14 |
+
pipeline_tag: text-to-image
|
15 |
+
---
|
16 |
+
|
17 |
+
<h1 align="center"><strong style="font-size: 48px;">ArtiWaifu Diffusion 1.0</strong></h1>
|
18 |
+
|
19 |
+
<p align="center">
|
20 |
+
<img src="https://i.postimg.cc/RFN05PW0/1.png" alt="alt text" title="Cover" width="450"/>
|
21 |
+
</p>
|
22 |
+
|
23 |
+
We have released the **A**rti**Wa**ifu Diffusion V1.0 model, designed to generate aesthetically pleasing and faithfully restored anime-style illustrations.
|
24 |
+
The AWA Diffusion is an iteration of the Stable Diffusion XL model, mastering over 6000 artistic styles and more than 4000 anime characters, generating images through [trigger words](#trigger-words).
|
25 |
+
As a specialized image generation model for anime, it excels in producing high-quality anime images, especially in generating images with highly recognizable styles and characters while maintaining a consistently high-quality aesthetic expression.
|
26 |
+
|
27 |
+
## Model Details
|
28 |
+
|
29 |
+
The AWA Diffusion model is fine-tuned from Stable Diffusion XL, with a selected dataset of 1.5M high-quality anime images, covering a wide range of both popular and niche anime concepts up to April 15, 2024.
|
30 |
+
AWA Diffusion employs our most advanced training strategies, enabling users to easily induce the model to generate images of specific characters or styles while maintaining high image quality and aesthetic expression.
|
31 |
+
|
32 |
+
**Model Information**
|
33 |
+
|
34 |
+
- Developed by: [Euge](https://civitai.com/user/Euge_)
|
35 |
+
- Funded by: [Neta.art](https://nieta.art/)
|
36 |
+
- Model type: Generative text-to-image model
|
37 |
+
- Finetuned from model: [SDXL 1.0 Base](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
38 |
+
- License: [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/)
|
39 |
+
|
40 |
+
## Usage Guide
|
41 |
+
|
42 |
+
This guide will (i) introduce the model's recommended usage methods and prompt writing strategies, aiming to provide suggestions for generation, and (ii) serve as a reference document for model usage, detailing the writing patterns and strategies for trigger words, quality tags, rating tags, style tags, and character tags.
|
43 |
+
|
44 |
+
### Basic Usage
|
45 |
+
|
46 |
+
- **CFG scale**: <span style="color:cyan">5-11</span>
|
47 |
+
- **Resolution**: Area (= width x height) around 1024x1024. Not lower than 256x256, and resolutions where both length and width are multiples of 32.
|
48 |
+
- **Sampling method**: Euler A (<span style="color:cyan">50+</span> steps) or DPM++ 2M Karras (<span style="color:cyan">~35</span> steps)
|
49 |
+
|
50 |
+
Due to the special training method, AWA's optimal inference step count is higher than regular values. As the inference steps increase, the quality of the generated images can continue to improve...
|
51 |
+
|
52 |
+
❓ **Question:** Why not use the standard SDXL resolution?
|
53 |
+
|
54 |
+
💡 **Answer:** Because the bucketing algorithm used in training does not adhere to a fixed set of buckets. Although this does not conform to positional encoding, we have not observed any adverse effects.
|
55 |
+
|
56 |
+
### Prompting Strategies
|
57 |
+
|
58 |
+
All text-to-image diffusion models have a notoriously high sensitivity to prompt, and AWA Diffusion is no exception. Even a misspelling in the prompt, or even replacing spaces with underscores, can affect the generated results.
|
59 |
+
AWA Diffusion encourages users to write prompt in **tags** separated by **comma + space (`, `)**. Although the model also supports natural language descriptions as prompt, or an intermix of both, the tag-by-tag format is more stable and user-friendly.
|
60 |
+
|
61 |
+
When describing a specific ACG concept, such as a character, style, or scene, we recommend users choose tags from the [Danbooru tags](https://danbooru.donmai.us/tags) and replace underscores in the Danbooru tags with spaces to ensure the model accurately understands your needs. For example, `bishop_(chess)` should be written as `bishop (chess)`, and in inference tools like AUTOMATIC1111 WebUI that use parentheses to weight prompt, all parentheses within the tags should be escaped, i.e., `bishop \(chess\)`.
|
62 |
+
|
63 |
+
#### Tag Ordering
|
64 |
+
|
65 |
+
Including AWA Diffusion, most diffusion models better understand logically ordered tags. While tag ordering is not mandatory, it can help the model better understand your needs. Generally, the earlier the tag in the order, the greater its impact on generation.
|
66 |
+
|
67 |
+
Here's an example of tag ordering. The example organizes the order of tags, prepends [art style tags](#style-tags) and [character tags](#character-tags) because style and subject are the most important to the image. Subsequently, other tags are added in order of importance. Lastly, [aesthetic tags](#aesthetic-tags) and [quality tags](#quality-tags) are positioned at the end to further emphasize the aesthetics of the image.
|
68 |
+
|
69 |
+
art style (<span style="color:red">_by xxx_</span>) -> character (<span style="color:orange">_1 frieren (sousou no frieren)_</span>) -> race (elf) -> composition (cowboy shot) -> painting style (<span style="color:green">_impasto_</span>) -> theme (fantasy theme) -> main environment (in the forest, at day) -> background (gradient background) -> action (sitting on ground) -> expression (expressionless) -> main characteristics (white hair) -> other characteristics (twintails, green eyes, parted lip) -> clothing (wearing a white dress) -> clothing accessories (frills) -> other items (holding a magic wand) -> secondary environment (grass, sunshine) -> aesthetics (<span style="color:blue">_beautiful color_</span>, <span style="color:cyan">_detailed_</span>) -> quality (<span style="color:purple">_best_</span> quality) -> secondary description (birds, cloud, butterfly)
|
70 |
+
|
71 |
+
Tag order is not set in stone. Flexibility in writing prompt can yield better results. For example, if the effect of a concept (such as style) is too strong and detracts from the aesthetic appeal of the image, you can move it to a later position to reduce its impact.
|
72 |
+
|
73 |
+
#### Negative Prompt
|
74 |
+
|
75 |
+
Negative prompt are not necessary for AWA Diffusion. If you use negative prompt, it is not the case that the more negative prompt, the better. They should be **as concise as possible and easily recognizable by the model**. Too many negative words may lead to poorer generation results.
|
76 |
+
Here are some recommended scenarios for using negative prompt:
|
77 |
+
|
78 |
+
1. Watermark: `signature`, `logo`, `artist name`;
|
79 |
+
2. Quality: `worst quality`, `lowres`, `ugly`, `abstract`;
|
80 |
+
3. Style: `real life`, `3d`, `celluloid`, `sketch`, `draft`;
|
81 |
+
4. Human anatomy: `deformed hand`, `fused fingers`, `extra limbs`, `extra arms`, `missing arm`, `extra legs`, `missing leg`, `extra digits`, `fewer digits`.
|
82 |
+
|
83 |
+
### Trigger Words
|
84 |
+
|
85 |
+
Add trigger words to your prompts to inform the model about the concept you want to generate. Trigger words can include character names, artistic styles, scenes, actions, quality, etc.
|
86 |
+
|
87 |
+
**Tips for Trigger Word**
|
88 |
+
|
89 |
+
1. **Typos**: The model is very sensitive to the spelling of trigger words. Even a single letter difference can cause a trigger to fail or lead to unexpected results.
|
90 |
+
2. **Bracket Escaping**: Pay attention when using inference tools that rely on parentheses for weighting prompt, such as AUTOMATIC1111 WebUI, to escape parentheses in trigger words, e.g., `1 lucy (cyberpunk)` -> `1 lucy \(cyberpunk\)`.
|
91 |
+
3. **Triggering Effect Preview**:Through searching tags on [Danbooru](https://danbooru.donmai.us/tags) to preview the tag and better understand the tag's meaning and usage.
|
92 |
+
|
93 |
+
#### Style Tags
|
94 |
+
|
95 |
+
Style tags are divided into two types: <span style="color:red">Painting Style Tags</span> and <span style="color:blue">Artistic Style Tags</span>. <span style="color:red">Painting Style Tags</span> describe the painting techniques or media used in the image, such as oil painting, watercolor, flat color, and impasto. <span style="color:blue">Artistic Style Tags</span> represent the artistic style of the artist behind the image.
|
96 |
+
|
97 |
+
AWA Diffusion supports the following <span style="color:red">Painting Style Tags</span>:
|
98 |
+
|
99 |
+
- Painting style tags available in the Danbooru tags, such as `oil painting`, `watercolor`, `flat color`, etc.;
|
100 |
+
- All painting style tags supported by [AID XL 0.8](https://civitai.com/models/124189/anime-illust-diffusion-xl), such as `flat-pasto`, etc.;
|
101 |
+
- All style tags supported by [Neta Art XL 1.0](https://civitai.com/models/410737/neta-art-xl), such as `gufeng`, etc.;
|
102 |
+
|
103 |
+
See the [Painting Style Tags List](https://huggingface.co/Eugeoter/artiwaifu-diffusion-1.0/blob/main/references/style.csv) for full lists of painting style tags.
|
104 |
+
|
105 |
+
AWA Diffusion supports the following <span style="color:blue">Artistic Style Tags</span>:
|
106 |
+
|
107 |
+
- Artistic style tags available in the Danbooru tags, such as `by yoneyama mai`, `by wlop`, etc.;
|
108 |
+
- All artistic style tags supported by [AID XL 0.8](https://civitai.com/models/124189/anime-illust-diffusion-xl), such as `by antifreeze3`, `by 7thknights`, etc.;
|
109 |
+
|
110 |
+
See the [Artistic Style Tags List](https://huggingface.co/Eugeoter/artiwaifu-diffusion-1.0/blob/main/references/artist.csv) for full lists of artistic style tags.
|
111 |
+
|
112 |
+
The higher the tag count in the tag repository, the more thoroughly the artistic style has been trained, and the higher the fidelity in generation. Typically, artistic style tags with a count higher than **50** yield better generation results.
|
113 |
+
|
114 |
+
**Tips for Style Tag**
|
115 |
+
|
116 |
+
1. **Intensity Adjustment**: You can adjust the intensity of a style by altering the order or weighting of style tags in your prompt. Frontloading a style tag enhances its effect, while placing it later reduces its effect.
|
117 |
+
|
118 |
+
❓ **Question:** Why include the prefix `by` in artistic style tags?
|
119 |
+
|
120 |
+
💡 **Answer:** To clearly inform the model that you want to generate a specific artistic style rather than something else, we recommend including the prefix `by` in artistic style tags. This differentiates `by xxx` from `xxx`, especially when `xxx` itself carries other meanings, such as `dino` which could represent either a dinosaur or an artist's identifier.
|
121 |
+
Similarly, when triggering characters, add a `1` as a prefix to the character trigger word.
|
122 |
+
|
123 |
+
#### Character Tags
|
124 |
+
|
125 |
+
Character tags describe the character IP in the generated image. Using character tags will guide the model to generate the **appearance features** of the character.
|
126 |
+
|
127 |
+
Character tags also need to be sourced from the [Character Tag List](https://huggingface.co/Eugeoter/artiwaifu-diffusion-1.0/blob/main/references/character.csv). To generate a specific character, first find the corresponding trigger word in the tag repository, replace all underscores `_` in the trigger word with spaces ` `, and prepend `1 ` to the character name.
|
128 |
+
For example, `1 ayanami rei` triggers the model to generate the character Rei Ayanami from the anime "EVA," corresponding to the Danbooru tag `ayanami_rei`; `1 asuna (sao)` triggers the model to generate the character Asuna from "Sword Art Online," corresponding to the Danbooru tag `asuna_(sao)`.
|
129 |
+
|
130 |
+
[More examples](#examples)
|
131 |
+
|
132 |
+
The higher the tag count in the tag repository, the more thoroughly the character has been trained, and the higher the fidelity in generation. Typically, character tags with a count higher than **100** yield better generation results.
|
133 |
+
|
134 |
+
**Tips for Character Tag**
|
135 |
+
|
136 |
+
1. **Character Costuming**: To achieve more flexible character costuming, character tags do not deliberately guide the model to draw the official attire of the character. To generate a character in a specific official outfit, besides the trigger word, you should also include a description of the attire in the prompt, e.g., "1 lucy (cyberpunk), <span style="color:cyan">wearing a white cropped jacket, underneath bodysuit, shorts, thighhighs, hip vent</span>".
|
137 |
+
2. **Series Annotations**: Some character tags include additional parentheses annotations after the character name. The parentheses and the annotations within cannot be omitted, e.g., `1 lucy (cyberpunk)` cannot be written as `1 lucy`. Other than that, you don't need to add any additional annotations, for example, you DON'T need to add the series tag to which the character belongs after the character tag.
|
138 |
+
3. **Known Issue 1**: When generating certain characters, mysterious feature deformations may occur, e.g., `1 asui tsuyu` triggering the character Tsuyu Asui from "My Hero Academia" may result in an extra black line between the eyes. This is because the model incorrectly interprets the large round eyes as glasses, thus `glasses` should be included in the negative prompt to avoid this issue.
|
139 |
+
4. **Known Issue 2**: When generating less popular characters, AWA Diffusion might produce images with incomplete feature restoration due to insufficient data/training. In such cases, we recommend that you extend the character description in your prompt beyond just the character name, detailing the character's origin, race, hair color, attire, etc.
|
140 |
+
|
141 |
+
**Character Tag Trigger Examples**
|
142 |
+
|
143 |
+
| Trigger Word | Note |
|
144 |
+
| ------------------------------- | -------------------------------------------------------------- |
|
145 |
+
| 1 lucy (cyberpunk) | ✅ Correct character tag |
|
146 |
+
| 1 lucy | ❌ Missing bracket annotation |
|
147 |
+
| 1 lucy (cyber) | ❌ Incorrect bracket annotation |
|
148 |
+
| lucy (cyberpunk) | ❌ Missing prefix `1 ` |
|
149 |
+
| 1 lucy cyberpunk | ❌ Missing brackets |
|
150 |
+
| 1 lucy (cyberpunk | ❌ Bracket not closed |
|
151 |
+
| 1 lucky (cyberpunk) | ❌ Spelling error |
|
152 |
+
| 1 lucy (cyberpunk: edgerunners) | ❌ Bracket annotation not following the required character tag |
|
153 |
+
|
154 |
+
❓ **Question:** Why do some character tags contain bracket annotations, e.g., `lucy (cyberpunk)`, while others do not, e.g., `frieren`?
|
155 |
+
|
156 |
+
💡 **Answer:** In different works, there may be characters with the same name, such as Asuna from "Sword Art Online" and "Blue Archive". To distinguish these characters with the same name, it is necessary to annotate the character's name with the work's name, abbreviated if the name is too long. For characters with unique names that currently have no duplicates, like `frieren`, no special annotations are required. Here is an example:
|
157 |
+
|
158 |
+
#### Quality Tags and Aesthetic Tags
|
159 |
+
|
160 |
+
For AWA Diffusion, including quality descriptors in your positive prompt is **very important**. Quality descriptions relate to quality tags and aesthetic tags.
|
161 |
+
|
162 |
+
Quality tags directly describe the aesthetic quality of the generated image, impacting the detail, texture, human anatomy, lighting, color, etc. Adding quality tags helps the model generate higher quality images. Quality tags are ranked from highest to lowest as follows:
|
163 |
+
<span style="color:orange">amazing quality</span> -> <span style="color:purple">best quality</span> -> <span style="color:blue">high quality</span> -> <span style="color:green">normal quality</span> -> low quality -> <span style="color:grey">worst quality</span>
|
164 |
+
|
165 |
+
Aesthetic tags describe the aesthetic features of the generated image, aiding the model in producing artistically appealing images. In addition to typical aesthetic words like `perspective`, `lighting and shadow`, AWA Diffusion has been specially trained to respond effectively to aesthetic trigger words such as `beautiful color`, `detailed`, and `aesthetic`, which respectively express appealing colors, details, and overall beauty.
|
166 |
+
|
167 |
+
The recommended generic way to describe quality is: _(Your Prompt), <span style="color:orange">beautiful color, detailed, amazing quality</span>_
|
168 |
+
|
169 |
+
**Tips for Quality and Aesthetic Tags**
|
170 |
+
|
171 |
+
1. **Tag Quantity**: Only one quality tag is needed; multiple aesthetic tags can be added.
|
172 |
+
2. **Tag Position**: The position of quality and aesthetic tags is not fixed, but they are typically placed at the end of the prompt.
|
173 |
+
3. **Relative Quality**: There is no absolute hierarchy of quality; the implied quality aligns with general aesthetic standards, and different users may have different perceptions of quality.
|
174 |
+
|
175 |
+
#### Rating Tags
|
176 |
+
|
177 |
+
Rating tags describe the level of exposure in the content of the generated image. Rating tags are ranked from highest to lowest as follows:
|
178 |
+
|
179 |
+
<span style="color:green">rating: general</span> (or <span style="color:green">safe</span>) -> <span style="color:yellow">rating: suggestive</span> -> <span style="color:orange">rating: questionable</span> -> <span style="color:red">rating: explicit</span> (or <span style="color:red">nsfw</span>)
|
180 |
+
|
181 |
+
### Prompt Word Examples
|
182 |
+
|
183 |
+
#### Example 1
|
184 |
+
|
185 |
+
**A**
|
186 |
+
|
187 |
+
_<span style="color:green">by yoneyama mai</span>, <span style="color:blue">1 frieren</span>, 1girl, solo, fantasy theme, smile, holding a magic wand, <span style="color:yellow">beautiful color</span>, <span style="color:red">amazing quality</span>_
|
188 |
+
|
189 |
+
1. <span style="color:green">by yoneyama mai</span> triggers the artistic style of Yoneyama Mai, placed at the front to enhance the effect.
|
190 |
+
2. <span style="color:blue">1 frieren</span> triggers the character Frieren from the series "Frieren at the Funeral."
|
191 |
+
3. <span style="color:yellow">beautiful color</span> describes the beautiful colors in the generated image.
|
192 |
+
4. <span style="color:red">amazing quality</span> describes the stunning quality of the generated image.
|
193 |
+
|
194 |
+
**B**
|
195 |
+
|
196 |
+
_<span style="color:green">by nixeu</span>, <span style="color:blue">1 lucy (cyberpunk)</span>, 1girl, solo, cowboy shot, gradient background, white cropped jacket, underneath bodysuit, shorts, thighhighs, hip vent, <span style="color:yellow">detailed</span>, <span style="color:red">best quality</span>_
|
197 |
+
|
198 |
+
#### Example 2: Style Mixing
|
199 |
+
|
200 |
+
By layering multiple different style tags, you can generate images with features of multiple styles.
|
201 |
+
|
202 |
+
**A** Simple Mixing
|
203 |
+
|
204 |
+
_**<span style="color:green">by ningen mame</span>, <span style="color:cyan">by ciloranko</span>, <span style="color:blue">by sho (sho lwlw)</span>**, 1girl, 1 hatsune miku, sitting, arm support, smile, detailed, amazing quality_
|
205 |
+
|
206 |
+
**B** Weighted Mixing
|
207 |
+
|
208 |
+
Using AUTOMATIC1111 WebUI prompt weighting syntax (parentheses weighting), weight different style tags to better control the generated image's style.
|
209 |
+
|
210 |
+
_**<span style="color:green">(by ningen mame:0.8)</span>, <span style="color:cyan">(by ciloranko:1.1)</span>, <span style="color:blue">(by sho \(sho lwlw\):1.2)</span>**, 1girl, 1 hatsune miku, sitting, arm support, smile, detailed, amazing quality_
|
211 |
+
|
212 |
+
#### Example 3: Multi-Character Scenes
|
213 |
+
|
214 |
+
By adding multiple character tags to your prompts, you can generate images with multiple characters in the same frame. Compared to other similar models, AWA performs better in multi-character scenes but remains unstable.
|
215 |
+
|
216 |
+
**A** Mixed Gender Scene
|
217 |
+
|
218 |
+
_**1girl and 1boy, <span style="color:blue">1 ganyu</span> girl, <span style="color:cyan">1 gojou satoru</span> boy**, beautiful color, amazing quality_
|
219 |
+
|
220 |
+
**B** Same Gender Scene
|
221 |
+
|
222 |
+
_**2girls, <span style="color:blue">1 ganyu</span> girl, <span style="color:orange">1 yoimiya</span> girl**, beautiful color, amazing quality_
|
223 |
+
|
224 |
+
## Future Work
|
225 |
+
|
226 |
+
AWA Diffusion is expected to combine high-level <span style="color:purple">aesthetics</span> with comprehensive <span style="color:cyan">knowledge</span>. It should neither have the traditional AI's greasy feel nor become a knowledge-deficient vase.
|
227 |
+
We will continue to explore more advanced training techniques and strategies, consistently improving the model's quality.
|
228 |
+
|
229 |
+
## Support Us
|
230 |
+
|
231 |
+
Training AWA Diffusion incurs substantial costs. If you appreciate our work, please consider supporting us through [Ko-fi](https://ko-fi.com/eugeai), to aid our research and development efforts. Thank you for your like and support!
|
artiwaifu-diffusion-v1.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73ed24bde3bd8146e386c2f9e49d463d8129fc10099559765aac5161dcbff606
|
3 |
+
size 6938040682
|
model_index.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionXLPipeline",
|
3 |
+
"_diffusers_version": "0.27.2",
|
4 |
+
"feature_extractor": [
|
5 |
+
null,
|
6 |
+
null
|
7 |
+
],
|
8 |
+
"force_zeros_for_empty_prompt": true,
|
9 |
+
"image_encoder": [
|
10 |
+
null,
|
11 |
+
null
|
12 |
+
],
|
13 |
+
"scheduler": [
|
14 |
+
"diffusers",
|
15 |
+
"EulerDiscreteScheduler"
|
16 |
+
],
|
17 |
+
"text_encoder": [
|
18 |
+
"transformers",
|
19 |
+
"CLIPTextModel"
|
20 |
+
],
|
21 |
+
"text_encoder_2": [
|
22 |
+
"transformers",
|
23 |
+
"CLIPTextModelWithProjection"
|
24 |
+
],
|
25 |
+
"tokenizer": [
|
26 |
+
"transformers",
|
27 |
+
"CLIPTokenizer"
|
28 |
+
],
|
29 |
+
"tokenizer_2": [
|
30 |
+
"transformers",
|
31 |
+
"CLIPTokenizer"
|
32 |
+
],
|
33 |
+
"unet": [
|
34 |
+
"diffusers",
|
35 |
+
"UNet2DConditionModel"
|
36 |
+
],
|
37 |
+
"vae": [
|
38 |
+
"diffusers",
|
39 |
+
"AutoencoderKL"
|
40 |
+
]
|
41 |
+
}
|
references/artist.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
references/character.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
references/style.csv
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"style","count"
|
2 |
+
"sketch","19690"
|
3 |
+
"celluloid","14565"
|
4 |
+
"flat-pasto","14387"
|
5 |
+
"thin-pasto","7421"
|
6 |
+
"pseudo-impasto","7335"
|
7 |
+
"realistic","7001"
|
8 |
+
"impasto","4576"
|
9 |
+
"flat color","4510"
|
10 |
+
"3d","3211"
|
11 |
+
"clean color","2949"
|
12 |
+
"anime coloring","2531"
|
13 |
+
"painting (medium)","1749"
|
14 |
+
"watercolor (medium)","1171"
|
15 |
+
"cel shading","943"
|
16 |
+
"photorealistic","861"
|
17 |
+
"oil painting (medium)","747"
|
18 |
+
"acrylic paint (medium)","491"
|
19 |
+
"marker (medium)","310"
|
20 |
+
"graphite (medium)","239"
|
21 |
+
"card (medium)","228"
|
22 |
+
"draft","184"
|
23 |
+
"colored pencil (medium)","147"
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "EulerDiscreteScheduler",
|
3 |
+
"_diffusers_version": "0.27.2",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"interpolation_type": "linear",
|
8 |
+
"num_train_timesteps": 1000,
|
9 |
+
"prediction_type": "epsilon",
|
10 |
+
"rescale_betas_zero_snr": false,
|
11 |
+
"sample_max_value": 1.0,
|
12 |
+
"set_alpha_to_one": false,
|
13 |
+
"sigma_max": null,
|
14 |
+
"sigma_min": null,
|
15 |
+
"skip_prk_steps": true,
|
16 |
+
"steps_offset": 1,
|
17 |
+
"timestep_spacing": "leading",
|
18 |
+
"timestep_type": "discrete",
|
19 |
+
"trained_betas": null,
|
20 |
+
"use_karras_sigmas": false
|
21 |
+
}
|
text_encoder/config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"CLIPTextModel"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.0,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "quick_gelu",
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 77,
|
16 |
+
"model_type": "clip_text_model",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"projection_dim": 768,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.27.4",
|
23 |
+
"vocab_size": 49408
|
24 |
+
}
|
text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92bc370fe6fb853e7e2cb4d9060447f7c7dc80729414f73beb1c091779f8ed0f
|
3 |
+
size 492265572
|
text_encoder_2/config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"CLIPTextModelWithProjection"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.0,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_size": 1280,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 5120,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 77,
|
16 |
+
"model_type": "clip_text_model",
|
17 |
+
"num_attention_heads": 20,
|
18 |
+
"num_hidden_layers": 32,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"projection_dim": 1280,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.27.4",
|
23 |
+
"vocab_size": 49408
|
24 |
+
}
|
text_encoder_2/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6776157756968af0664f7b4053a9197334c4bee34a1fb4682aa74076edddb3c9
|
3 |
+
size 2778702976
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"pad_token": "<|endoftext|>",
|
23 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
24 |
+
"tokenizer_class": "CLIPTokenizer",
|
25 |
+
"unk_token": {
|
26 |
+
"__type": "AddedToken",
|
27 |
+
"content": "<|endoftext|>",
|
28 |
+
"lstrip": false,
|
29 |
+
"normalized": true,
|
30 |
+
"rstrip": false,
|
31 |
+
"single_word": false
|
32 |
+
}
|
33 |
+
}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_2/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_2/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "!",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer_2/tokenizer_config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"pad_token": "!",
|
23 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
24 |
+
"tokenizer_class": "CLIPTokenizer",
|
25 |
+
"unk_token": {
|
26 |
+
"__type": "AddedToken",
|
27 |
+
"content": "<|endoftext|>",
|
28 |
+
"lstrip": false,
|
29 |
+
"normalized": true,
|
30 |
+
"rstrip": false,
|
31 |
+
"single_word": false
|
32 |
+
}
|
33 |
+
}
|
tokenizer_2/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
unet/config.json
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.27.2",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"addition_embed_type": "text_time",
|
6 |
+
"addition_embed_type_num_heads": 64,
|
7 |
+
"addition_time_embed_dim": 256,
|
8 |
+
"attention_head_dim": [
|
9 |
+
5,
|
10 |
+
10,
|
11 |
+
20
|
12 |
+
],
|
13 |
+
"attention_type": "default",
|
14 |
+
"block_out_channels": [
|
15 |
+
320,
|
16 |
+
640,
|
17 |
+
1280
|
18 |
+
],
|
19 |
+
"center_input_sample": false,
|
20 |
+
"class_embed_type": null,
|
21 |
+
"class_embeddings_concat": false,
|
22 |
+
"conv_in_kernel": 3,
|
23 |
+
"conv_out_kernel": 3,
|
24 |
+
"cross_attention_dim": 2048,
|
25 |
+
"cross_attention_norm": null,
|
26 |
+
"down_block_types": [
|
27 |
+
"DownBlock2D",
|
28 |
+
"CrossAttnDownBlock2D",
|
29 |
+
"CrossAttnDownBlock2D"
|
30 |
+
],
|
31 |
+
"downsample_padding": 1,
|
32 |
+
"dropout": 0.0,
|
33 |
+
"dual_cross_attention": false,
|
34 |
+
"encoder_hid_dim": null,
|
35 |
+
"encoder_hid_dim_type": null,
|
36 |
+
"flip_sin_to_cos": true,
|
37 |
+
"freq_shift": 0,
|
38 |
+
"in_channels": 4,
|
39 |
+
"layers_per_block": 2,
|
40 |
+
"mid_block_only_cross_attention": null,
|
41 |
+
"mid_block_scale_factor": 1,
|
42 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
43 |
+
"norm_eps": 1e-05,
|
44 |
+
"norm_num_groups": 32,
|
45 |
+
"num_attention_heads": null,
|
46 |
+
"num_class_embeds": null,
|
47 |
+
"only_cross_attention": false,
|
48 |
+
"out_channels": 4,
|
49 |
+
"projection_class_embeddings_input_dim": 2816,
|
50 |
+
"resnet_out_scale_factor": 1.0,
|
51 |
+
"resnet_skip_time_act": false,
|
52 |
+
"resnet_time_scale_shift": "default",
|
53 |
+
"reverse_transformer_layers_per_block": null,
|
54 |
+
"sample_size": 128,
|
55 |
+
"time_cond_proj_dim": null,
|
56 |
+
"time_embedding_act_fn": null,
|
57 |
+
"time_embedding_dim": null,
|
58 |
+
"time_embedding_type": "positional",
|
59 |
+
"timestep_post_act": null,
|
60 |
+
"transformer_layers_per_block": [
|
61 |
+
1,
|
62 |
+
2,
|
63 |
+
10
|
64 |
+
],
|
65 |
+
"up_block_types": [
|
66 |
+
"CrossAttnUpBlock2D",
|
67 |
+
"CrossAttnUpBlock2D",
|
68 |
+
"UpBlock2D"
|
69 |
+
],
|
70 |
+
"upcast_attention": false,
|
71 |
+
"use_linear_projection": true
|
72 |
+
}
|
unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb5230c94192202b1c8789b9a59e7e9c3852334cf6275be734735c7acf9f9f5f
|
3 |
+
size 10270077736
|
vae/config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.27.2",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
512,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"DownEncoderBlock2D",
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D"
|
16 |
+
],
|
17 |
+
"force_upcast": true,
|
18 |
+
"in_channels": 3,
|
19 |
+
"latent_channels": 4,
|
20 |
+
"latents_mean": null,
|
21 |
+
"latents_std": null,
|
22 |
+
"layers_per_block": 2,
|
23 |
+
"norm_num_groups": 32,
|
24 |
+
"out_channels": 3,
|
25 |
+
"sample_size": 1024,
|
26 |
+
"scaling_factor": 0.13025,
|
27 |
+
"up_block_types": [
|
28 |
+
"UpDecoderBlock2D",
|
29 |
+
"UpDecoderBlock2D",
|
30 |
+
"UpDecoderBlock2D",
|
31 |
+
"UpDecoderBlock2D"
|
32 |
+
]
|
33 |
+
}
|
vae/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646
|
3 |
+
size 334643268
|