Upload folder using huggingface_hub
Browse files- .gitattributes +23 -0
- README.md +143 -0
- david-and-the-forest.safetensors +3 -0
- david-and-the-forest_emb.safetensors +3 -0
- image-0.png +3 -0
- image-1.png +3 -0
- image-10.png +3 -0
- image-11.png +3 -0
- image-12.png +3 -0
- image-13.png +3 -0
- image-14.png +3 -0
- image-15.png +3 -0
- image-16.png +3 -0
- image-17.png +3 -0
- image-18.png +3 -0
- image-19.png +3 -0
- image-2.png +3 -0
- image-20.png +3 -0
- image-21.png +3 -0
- image-22.png +3 -0
- image-3.png +3 -0
- image-4.png +3 -0
- image-5.png +3 -0
- image-6.png +3 -0
- image-7.png +3 -0
- image-8.png +3 -0
- image-9.png +3 -0
- logs/dreambooth-lora-sd-xl/1706164162.678439/events.out.tfevents.1706164162.r-computational-mama-autotrain-david-and-the-forest-3-c0289-bg9.215.1 +3 -0
- logs/dreambooth-lora-sd-xl/1706164162.6802099/hparams.yml +74 -0
- logs/dreambooth-lora-sd-xl/events.out.tfevents.1706164162.r-computational-mama-autotrain-david-and-the-forest-3-c0289-bg9.215.0 +3 -0
- pytorch_lora_weights.safetensors +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,26 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
image-0.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
image-1.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
image-10.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
image-11.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
image-12.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
image-13.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
image-14.png filter=lfs diff=lfs merge=lfs -text
|
43 |
+
image-15.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
image-16.png filter=lfs diff=lfs merge=lfs -text
|
45 |
+
image-17.png filter=lfs diff=lfs merge=lfs -text
|
46 |
+
image-18.png filter=lfs diff=lfs merge=lfs -text
|
47 |
+
image-19.png filter=lfs diff=lfs merge=lfs -text
|
48 |
+
image-2.png filter=lfs diff=lfs merge=lfs -text
|
49 |
+
image-20.png filter=lfs diff=lfs merge=lfs -text
|
50 |
+
image-21.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
image-22.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
image-3.png filter=lfs diff=lfs merge=lfs -text
|
53 |
+
image-4.png filter=lfs diff=lfs merge=lfs -text
|
54 |
+
image-5.png filter=lfs diff=lfs merge=lfs -text
|
55 |
+
image-6.png filter=lfs diff=lfs merge=lfs -text
|
56 |
+
image-7.png filter=lfs diff=lfs merge=lfs -text
|
57 |
+
image-8.png filter=lfs diff=lfs merge=lfs -text
|
58 |
+
image-9.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- stable-diffusion-xl
|
4 |
+
- stable-diffusion-xl-diffusers
|
5 |
+
- text-to-image
|
6 |
+
- diffusers
|
7 |
+
- lora
|
8 |
+
- template:sd-lora
|
9 |
+
widget:
|
10 |
+
- text: <s0><s1> a drawing of two people with ink splatters on their faces
|
11 |
+
output:
|
12 |
+
url: image-0.png
|
13 |
+
- text: <s0><s1> a drawing of a person with a black and white paint splatter
|
14 |
+
output:
|
15 |
+
url: image-1.png
|
16 |
+
- text: <s0><s1> a drawing of a person with a black and white paint splatter
|
17 |
+
output:
|
18 |
+
url: image-2.png
|
19 |
+
- text: <s0><s1> a drawing of a man with a hat and a gun
|
20 |
+
output:
|
21 |
+
url: image-3.png
|
22 |
+
- text: <s0><s1> a drawing of a man with a hat and a gun
|
23 |
+
output:
|
24 |
+
url: image-4.png
|
25 |
+
- text: <s0><s1> a drawing of a black and white drawing of rocks
|
26 |
+
output:
|
27 |
+
url: image-5.png
|
28 |
+
- text: <s0><s1> a drawing of a black and white drawing of rocks
|
29 |
+
output:
|
30 |
+
url: image-6.png
|
31 |
+
- text: <s0><s1> a drawing of a man with a face on it
|
32 |
+
output:
|
33 |
+
url: image-7.png
|
34 |
+
- text: <s0><s1> a drawing of a man with a face on it
|
35 |
+
output:
|
36 |
+
url: image-8.png
|
37 |
+
- text: <s0><s1> a drawing of a fish on a wall
|
38 |
+
output:
|
39 |
+
url: image-9.png
|
40 |
+
- text: <s0><s1> a drawing of a person on a wall with a computer
|
41 |
+
output:
|
42 |
+
url: image-10.png
|
43 |
+
- text: <s0><s1> a drawing of a man with a long body and a long head
|
44 |
+
output:
|
45 |
+
url: image-11.png
|
46 |
+
- text: <s0><s1> a drawing of a dog with its tongue out
|
47 |
+
output:
|
48 |
+
url: image-12.png
|
49 |
+
- text: <s0><s1> a drawing of a man with a face on it
|
50 |
+
output:
|
51 |
+
url: image-13.png
|
52 |
+
- text: <s0><s1> a drawing of a man in a suit with a hat
|
53 |
+
output:
|
54 |
+
url: image-14.png
|
55 |
+
- text: <s0><s1> a drawing of a man standing on a hill with a moon in the background
|
56 |
+
output:
|
57 |
+
url: image-15.png
|
58 |
+
- text: <s0><s1> a drawing of a man with a face on his back
|
59 |
+
output:
|
60 |
+
url: image-16.png
|
61 |
+
- text: <s0><s1> a drawing of a man with a knife and a knife
|
62 |
+
output:
|
63 |
+
url: image-17.png
|
64 |
+
- text: <s0><s1> a drawing of a moon and a planet
|
65 |
+
output:
|
66 |
+
url: image-18.png
|
67 |
+
- text: <s0><s1> a drawing of a spider on paper with black and white paint
|
68 |
+
output:
|
69 |
+
url: image-19.png
|
70 |
+
- text: <s0><s1> a drawing of a man falling down with a black ink pen
|
71 |
+
output:
|
72 |
+
url: image-20.png
|
73 |
+
- text: <s0><s1> a drawing of a black and white animal with a black and white pattern
|
74 |
+
output:
|
75 |
+
url: image-21.png
|
76 |
+
- text: <s0><s1> a drawing of a group of small faces on a white wall
|
77 |
+
output:
|
78 |
+
url: image-22.png
|
79 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
80 |
+
instance_prompt: <s0><s1>
|
81 |
+
license: openrail++
|
82 |
+
---
|
83 |
+
|
84 |
+
# SDXL LoRA DreamBooth - computational-mama/david-and-the-forest
|
85 |
+
|
86 |
+
<Gallery />
|
87 |
+
|
88 |
+
## Model description
|
89 |
+
|
90 |
+
### These are computational-mama/david-and-the-forest LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0.
|
91 |
+
|
92 |
+
## Download model
|
93 |
+
|
94 |
+
### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke
|
95 |
+
|
96 |
+
- **LoRA**: download **[`david-and-the-forest.safetensors` here 💾](/computational-mama/david-and-the-forest/blob/main/david-and-the-forest.safetensors)**.
|
97 |
+
- Place it on your `models/Lora` folder.
|
98 |
+
- On AUTOMATIC1111, load the LoRA by adding `<lora:david-and-the-forest:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).
|
99 |
+
- *Embeddings*: download **[`david-and-the-forest_emb.safetensors` here 💾](/computational-mama/david-and-the-forest/blob/main/david-and-the-forest_emb.safetensors)**.
|
100 |
+
- Place it on it on your `embeddings` folder
|
101 |
+
- Use it by adding `david-and-the-forest_emb` to your prompt.
|
102 |
+
(you need both the LoRA and the embeddings as they were trained together for this LoRA)
|
103 |
+
|
104 |
+
|
105 |
+
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
|
106 |
+
|
107 |
+
```py
|
108 |
+
from diffusers import AutoPipelineForText2Image
|
109 |
+
import torch
|
110 |
+
from huggingface_hub import hf_hub_download
|
111 |
+
from safetensors.torch import load_file
|
112 |
+
|
113 |
+
pipeline = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16).to('cuda')
|
114 |
+
pipeline.load_lora_weights('computational-mama/david-and-the-forest', weight_name='pytorch_lora_weights.safetensors')
|
115 |
+
embedding_path = hf_hub_download(repo_id='computational-mama/david-and-the-forest', filename='david-and-the-forest_emb.safetensors' repo_type="model")
|
116 |
+
state_dict = load_file(embedding_path)
|
117 |
+
pipeline.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
|
118 |
+
pipeline.load_textual_inversion(state_dict["clip_g"], token=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)
|
119 |
+
|
120 |
+
image = pipeline('<s0><s1>').images[0]
|
121 |
+
```
|
122 |
+
|
123 |
+
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
|
124 |
+
|
125 |
+
## Trigger words
|
126 |
+
|
127 |
+
To trigger image generation of trained concept(or concepts) replace each concept identifier in you prompt with the new inserted tokens:
|
128 |
+
|
129 |
+
to trigger concept `TOK` → use `<s0><s1>` in your prompt
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
## Details
|
134 |
+
All [Files & versions](/computational-mama/david-and-the-forest/tree/main).
|
135 |
+
|
136 |
+
The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py).
|
137 |
+
|
138 |
+
LoRA for the text encoder was enabled. False.
|
139 |
+
|
140 |
+
Pivotal tuning was enabled: True.
|
141 |
+
|
142 |
+
Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
|
143 |
+
|
david-and-the-forest.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b96626dba29e6e4992ef27efa49b35d2742b06f48defd0f1cc1f04f29025399d
|
3 |
+
size 186046568
|
david-and-the-forest_emb.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4b6903c7e01ef2894e322ac85bc4c4e371e2cedaacfa1f9d84dc9c3c644bd15
|
3 |
+
size 8344
|
image-0.png
ADDED
![]() |
Git LFS Details
|
image-1.png
ADDED
![]() |
Git LFS Details
|
image-10.png
ADDED
![]() |
Git LFS Details
|
image-11.png
ADDED
![]() |
Git LFS Details
|
image-12.png
ADDED
![]() |
Git LFS Details
|
image-13.png
ADDED
![]() |
Git LFS Details
|
image-14.png
ADDED
![]() |
Git LFS Details
|
image-15.png
ADDED
![]() |
Git LFS Details
|
image-16.png
ADDED
![]() |
Git LFS Details
|
image-17.png
ADDED
![]() |
Git LFS Details
|
image-18.png
ADDED
![]() |
Git LFS Details
|
image-19.png
ADDED
![]() |
Git LFS Details
|
image-2.png
ADDED
![]() |
Git LFS Details
|
image-20.png
ADDED
![]() |
Git LFS Details
|
image-21.png
ADDED
![]() |
Git LFS Details
|
image-22.png
ADDED
![]() |
Git LFS Details
|
image-3.png
ADDED
![]() |
Git LFS Details
|
image-4.png
ADDED
![]() |
Git LFS Details
|
image-5.png
ADDED
![]() |
Git LFS Details
|
image-6.png
ADDED
![]() |
Git LFS Details
|
image-7.png
ADDED
![]() |
Git LFS Details
|
image-8.png
ADDED
![]() |
Git LFS Details
|
image-9.png
ADDED
![]() |
Git LFS Details
|
logs/dreambooth-lora-sd-xl/1706164162.678439/events.out.tfevents.1706164162.r-computational-mama-autotrain-david-and-the-forest-3-c0289-bg9.215.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcdb810d7256ee606d2a2cd844c3662c838ed3843d3fc4fef497aa89477987ad
|
3 |
+
size 3522
|
logs/dreambooth-lora-sd-xl/1706164162.6802099/hparams.yml
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.0001
|
5 |
+
adam_weight_decay_text_encoder: null
|
6 |
+
allow_tf32: false
|
7 |
+
cache_dir: null
|
8 |
+
cache_latents: true
|
9 |
+
caption_column: prompt
|
10 |
+
center_crop: false
|
11 |
+
checkpointing_steps: 100000
|
12 |
+
checkpoints_total_limit: null
|
13 |
+
class_data_dir: null
|
14 |
+
class_prompt: null
|
15 |
+
crops_coords_top_left_h: 0
|
16 |
+
crops_coords_top_left_w: 0
|
17 |
+
dataloader_num_workers: 0
|
18 |
+
dataset_config_name: null
|
19 |
+
dataset_name: ./3b59d000-4cda-4a8c-ae44-0fe76f71fdf9
|
20 |
+
enable_xformers_memory_efficient_attention: false
|
21 |
+
gradient_accumulation_steps: 1
|
22 |
+
gradient_checkpointing: true
|
23 |
+
hub_model_id: null
|
24 |
+
hub_token: null
|
25 |
+
image_column: image
|
26 |
+
instance_data_dir: null
|
27 |
+
instance_prompt: <s0><s1>
|
28 |
+
learning_rate: 1.0
|
29 |
+
local_rank: -1
|
30 |
+
logging_dir: logs
|
31 |
+
lr_num_cycles: 1
|
32 |
+
lr_power: 1.0
|
33 |
+
lr_scheduler: constant
|
34 |
+
lr_warmup_steps: 0
|
35 |
+
max_grad_norm: 1.0
|
36 |
+
max_train_steps: 1500
|
37 |
+
mixed_precision: bf16
|
38 |
+
num_class_images: 100
|
39 |
+
num_new_tokens_per_abstraction: 2
|
40 |
+
num_train_epochs: 66
|
41 |
+
num_validation_images: 4
|
42 |
+
optimizer: prodigy
|
43 |
+
output_dir: david-and-the-forest
|
44 |
+
pretrained_model_name_or_path: stabilityai/stable-diffusion-xl-base-1.0
|
45 |
+
pretrained_vae_model_name_or_path: madebyollin/sdxl-vae-fp16-fix
|
46 |
+
prior_generation_precision: null
|
47 |
+
prior_loss_weight: 1.0
|
48 |
+
prodigy_beta3: null
|
49 |
+
prodigy_decouple: true
|
50 |
+
prodigy_safeguard_warmup: true
|
51 |
+
prodigy_use_bias_correction: true
|
52 |
+
push_to_hub: false
|
53 |
+
rank: 32
|
54 |
+
repeats: 2
|
55 |
+
report_to: tensorboard
|
56 |
+
resolution: 1024
|
57 |
+
resume_from_checkpoint: null
|
58 |
+
revision: null
|
59 |
+
sample_batch_size: 4
|
60 |
+
scale_lr: false
|
61 |
+
seed: 42
|
62 |
+
snr_gamma: null
|
63 |
+
text_encoder_lr: 1.0
|
64 |
+
token_abstraction: TOK
|
65 |
+
train_batch_size: 2
|
66 |
+
train_text_encoder: false
|
67 |
+
train_text_encoder_frac: 1.0
|
68 |
+
train_text_encoder_ti: true
|
69 |
+
train_text_encoder_ti_frac: 0.5
|
70 |
+
use_8bit_adam: false
|
71 |
+
validation_epochs: 50
|
72 |
+
validation_prompt: null
|
73 |
+
variant: null
|
74 |
+
with_prior_preservation: false
|
logs/dreambooth-lora-sd-xl/events.out.tfevents.1706164162.r-computational-mama-autotrain-david-and-the-forest-3-c0289-bg9.215.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83809d918048b123f22224c5eb0f877cf855e270d6a6b1fe2c2f0d95f91f729c
|
3 |
+
size 125834
|
pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f525a5983a81b8f91ce62cc7bec161e41f2e3e8a01233032151801adfbce19f
|
3 |
+
size 185963768
|