End of training
Browse files- README.md +2 -2
- feature_extractor/preprocessor_config.json +1 -1
- logs/dreambooth/1695967749.9487479/events.out.tfevents.1695967749.linke5.3308851.1 +3 -0
- logs/dreambooth/1695967749.952343/hparams.yml +59 -0
- logs/dreambooth/1695967923.7254755/events.out.tfevents.1695967923.linke5.3363193.1 +3 -0
- logs/dreambooth/1695967923.7288308/hparams.yml +59 -0
- logs/dreambooth/events.out.tfevents.1695967749.linke5.3308851.0 +3 -0
- logs/dreambooth/events.out.tfevents.1695967923.linke5.3363193.0 +3 -0
- model_index.json +2 -2
- safety_checker/config.json +1 -3
- scheduler/scheduler_config.json +0 -4
- text_encoder/config.json +1 -1
- text_encoder/model.safetensors +1 -1
- unet/config.json +1 -1
- unet/diffusion_pytorch_model.safetensors +1 -1
- vae/config.json +1 -1
- vae/diffusion_pytorch_model.safetensors +1 -1
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
|
2 |
---
|
3 |
license: creativeml-openrail-m
|
4 |
-
base_model:
|
5 |
instance_prompt: a realistic photo of lyf woman
|
6 |
tags:
|
7 |
- stable-diffusion
|
@@ -14,7 +14,7 @@ inference: true
|
|
14 |
|
15 |
# DreamBooth - zhengzhou/checkpoints_lyf
|
16 |
|
17 |
-
This is a dreambooth model derived from
|
18 |
You can find some example images in the following.
|
19 |
|
20 |
|
|
|
1 |
|
2 |
---
|
3 |
license: creativeml-openrail-m
|
4 |
+
base_model: SG161222/Realistic_Vision_V2.0
|
5 |
instance_prompt: a realistic photo of lyf woman
|
6 |
tags:
|
7 |
- stable-diffusion
|
|
|
14 |
|
15 |
# DreamBooth - zhengzhou/checkpoints_lyf
|
16 |
|
17 |
+
This is a dreambooth model derived from SG161222/Realistic_Vision_V2.0. The weights were trained on a realistic photo of lyf woman using [DreamBooth](https://dreambooth.github.io/).
|
18 |
You can find some example images in the following.
|
19 |
|
20 |
|
feature_extractor/preprocessor_config.json
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
0.4578275,
|
15 |
0.40821073
|
16 |
],
|
17 |
-
"image_processor_type": "
|
18 |
"image_std": [
|
19 |
0.26862954,
|
20 |
0.26130258,
|
|
|
14 |
0.4578275,
|
15 |
0.40821073
|
16 |
],
|
17 |
+
"image_processor_type": "CLIPImageProcessor",
|
18 |
"image_std": [
|
19 |
0.26862954,
|
20 |
0.26130258,
|
logs/dreambooth/1695967749.9487479/events.out.tfevents.1695967749.linke5.3308851.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77b21825566631aa47fb68f751f6186d3ca92e1b1398b1243cdbb3324b9d1b7b
|
3 |
+
size 2766
|
logs/dreambooth/1695967749.952343/hparams.yml
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
center_crop: false
|
7 |
+
checkpointing_steps: 500
|
8 |
+
checkpoints_total_limit: null
|
9 |
+
class_data_dir: null
|
10 |
+
class_labels_conditioning: null
|
11 |
+
class_prompt: null
|
12 |
+
dataloader_num_workers: 0
|
13 |
+
enable_xformers_memory_efficient_attention: false
|
14 |
+
gradient_accumulation_steps: 1
|
15 |
+
gradient_checkpointing: false
|
16 |
+
hub_model_id: null
|
17 |
+
hub_token: null
|
18 |
+
instance_data_dir: ./lyf
|
19 |
+
instance_prompt: a realistic photo of lyf woman
|
20 |
+
learning_rate: 5.0e-06
|
21 |
+
local_rank: 0
|
22 |
+
logging_dir: logs
|
23 |
+
lr_num_cycles: 1
|
24 |
+
lr_power: 1.0
|
25 |
+
lr_scheduler: constant
|
26 |
+
lr_warmup_steps: 0
|
27 |
+
max_grad_norm: 1.0
|
28 |
+
max_train_steps: 400
|
29 |
+
mixed_precision: null
|
30 |
+
num_class_images: 1000
|
31 |
+
num_train_epochs: 80
|
32 |
+
num_validation_images: 4
|
33 |
+
offset_noise: false
|
34 |
+
output_dir: ./checkpoints_lyf
|
35 |
+
pre_compute_text_embeddings: false
|
36 |
+
pretrained_model_name_or_path: SG161222/Realistic_Vision_V2.0
|
37 |
+
prior_generation_precision: null
|
38 |
+
prior_loss_weight: 1.0
|
39 |
+
push_to_hub: true
|
40 |
+
report_to: tensorboard
|
41 |
+
resolution: 512
|
42 |
+
resume_from_checkpoint: null
|
43 |
+
revision: null
|
44 |
+
sample_batch_size: 4
|
45 |
+
scale_lr: false
|
46 |
+
seed: null
|
47 |
+
set_grads_to_none: false
|
48 |
+
skip_save_text_encoder: false
|
49 |
+
snr_gamma: null
|
50 |
+
text_encoder_use_attention_mask: false
|
51 |
+
tokenizer_max_length: null
|
52 |
+
tokenizer_name: null
|
53 |
+
train_batch_size: 1
|
54 |
+
train_text_encoder: false
|
55 |
+
use_8bit_adam: false
|
56 |
+
validation_prompt: null
|
57 |
+
validation_scheduler: DPMSolverMultistepScheduler
|
58 |
+
validation_steps: 100
|
59 |
+
with_prior_preservation: false
|
logs/dreambooth/1695967923.7254755/events.out.tfevents.1695967923.linke5.3363193.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae74900ce416b85c99e5c19b167f32b11c64f18eaf3a8dd26635fc6388f5f79f
|
3 |
+
size 2766
|
logs/dreambooth/1695967923.7288308/hparams.yml
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
center_crop: false
|
7 |
+
checkpointing_steps: 500
|
8 |
+
checkpoints_total_limit: null
|
9 |
+
class_data_dir: null
|
10 |
+
class_labels_conditioning: null
|
11 |
+
class_prompt: null
|
12 |
+
dataloader_num_workers: 0
|
13 |
+
enable_xformers_memory_efficient_attention: false
|
14 |
+
gradient_accumulation_steps: 1
|
15 |
+
gradient_checkpointing: false
|
16 |
+
hub_model_id: null
|
17 |
+
hub_token: null
|
18 |
+
instance_data_dir: ./lyf
|
19 |
+
instance_prompt: a realistic photo of lyf woman
|
20 |
+
learning_rate: 5.0e-06
|
21 |
+
local_rank: 0
|
22 |
+
logging_dir: logs
|
23 |
+
lr_num_cycles: 1
|
24 |
+
lr_power: 1.0
|
25 |
+
lr_scheduler: constant
|
26 |
+
lr_warmup_steps: 0
|
27 |
+
max_grad_norm: 1.0
|
28 |
+
max_train_steps: 400
|
29 |
+
mixed_precision: null
|
30 |
+
num_class_images: 1000
|
31 |
+
num_train_epochs: 80
|
32 |
+
num_validation_images: 4
|
33 |
+
offset_noise: false
|
34 |
+
output_dir: ./checkpoints_lyf
|
35 |
+
pre_compute_text_embeddings: false
|
36 |
+
pretrained_model_name_or_path: SG161222/Realistic_Vision_V2.0
|
37 |
+
prior_generation_precision: null
|
38 |
+
prior_loss_weight: 1.0
|
39 |
+
push_to_hub: true
|
40 |
+
report_to: tensorboard
|
41 |
+
resolution: 256
|
42 |
+
resume_from_checkpoint: null
|
43 |
+
revision: null
|
44 |
+
sample_batch_size: 4
|
45 |
+
scale_lr: false
|
46 |
+
seed: null
|
47 |
+
set_grads_to_none: false
|
48 |
+
skip_save_text_encoder: false
|
49 |
+
snr_gamma: null
|
50 |
+
text_encoder_use_attention_mask: false
|
51 |
+
tokenizer_max_length: null
|
52 |
+
tokenizer_name: null
|
53 |
+
train_batch_size: 1
|
54 |
+
train_text_encoder: false
|
55 |
+
use_8bit_adam: false
|
56 |
+
validation_prompt: null
|
57 |
+
validation_scheduler: DPMSolverMultistepScheduler
|
58 |
+
validation_steps: 100
|
59 |
+
with_prior_preservation: false
|
logs/dreambooth/events.out.tfevents.1695967749.linke5.3308851.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f0a36a49f8d5cd71b1e1b5e48db9a14b0d8c387e06dbef3a751df10afdba3d1
|
3 |
+
size 170
|
logs/dreambooth/events.out.tfevents.1695967923.linke5.3363193.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c8c21d480197efe62ee450e0ef4fbfb464caee5d844ca8bcbdc60632c13a617
|
3 |
+
size 33434
|
model_index.json
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
-
"_name_or_path": "
|
5 |
"feature_extractor": [
|
6 |
"transformers",
|
7 |
-
"
|
8 |
],
|
9 |
"requires_safety_checker": true,
|
10 |
"safety_checker": [
|
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
+
"_name_or_path": "SG161222/Realistic_Vision_V2.0",
|
5 |
"feature_extractor": [
|
6 |
"transformers",
|
7 |
+
"CLIPImageProcessor"
|
8 |
],
|
9 |
"requires_safety_checker": true,
|
10 |
"safety_checker": [
|
safety_checker/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"StableDiffusionSafetyChecker"
|
5 |
],
|
@@ -8,9 +8,7 @@
|
|
8 |
"model_type": "clip",
|
9 |
"projection_dim": 768,
|
10 |
"text_config": {
|
11 |
-
"bos_token_id": 0,
|
12 |
"dropout": 0.0,
|
13 |
-
"eos_token_id": 2,
|
14 |
"hidden_size": 768,
|
15 |
"intermediate_size": 3072,
|
16 |
"model_type": "clip_text_model",
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "SG161222/Realistic_Vision_V2.0/safety_checker",
|
3 |
"architectures": [
|
4 |
"StableDiffusionSafetyChecker"
|
5 |
],
|
|
|
8 |
"model_type": "clip",
|
9 |
"projection_dim": 768,
|
10 |
"text_config": {
|
|
|
11 |
"dropout": 0.0,
|
|
|
12 |
"hidden_size": 768,
|
13 |
"intermediate_size": 3072,
|
14 |
"model_type": "clip_text_model",
|
scheduler/scheduler_config.json
CHANGED
@@ -5,15 +5,11 @@
|
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
7 |
"clip_sample": false,
|
8 |
-
"clip_sample_range": 1.0,
|
9 |
-
"dynamic_thresholding_ratio": 0.995,
|
10 |
"num_train_timesteps": 1000,
|
11 |
"prediction_type": "epsilon",
|
12 |
-
"sample_max_value": 1.0,
|
13 |
"set_alpha_to_one": false,
|
14 |
"skip_prk_steps": true,
|
15 |
"steps_offset": 1,
|
16 |
-
"thresholding": false,
|
17 |
"timestep_spacing": "leading",
|
18 |
"trained_betas": null
|
19 |
}
|
|
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
7 |
"clip_sample": false,
|
|
|
|
|
8 |
"num_train_timesteps": 1000,
|
9 |
"prediction_type": "epsilon",
|
|
|
10 |
"set_alpha_to_one": false,
|
11 |
"skip_prk_steps": true,
|
12 |
"steps_offset": 1,
|
|
|
13 |
"timestep_spacing": "leading",
|
14 |
"trained_betas": null
|
15 |
}
|
text_encoder/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "SG161222/Realistic_Vision_V2.0",
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
text_encoder/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 492265168
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22ce0d12fcc400d5eb3ff053435d0a8b47e442ea288170a1a715318fdb52fbc5
|
3 |
size 492265168
|
unet/config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
-
"_name_or_path": "
|
5 |
"act_fn": "silu",
|
6 |
"addition_embed_type": null,
|
7 |
"addition_embed_type_num_heads": 64,
|
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
+
"_name_or_path": "SG161222/Realistic_Vision_V2.0",
|
5 |
"act_fn": "silu",
|
6 |
"addition_embed_type": null,
|
7 |
"addition_embed_type_num_heads": 64,
|
unet/diffusion_pytorch_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3438167536
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0fb90080648113b3414278418136cee90f93f81133b2579649bfe3aaacc8d807
|
3 |
size 3438167536
|
vae/config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
-
"_name_or_path": "
|
5 |
"act_fn": "silu",
|
6 |
"block_out_channels": [
|
7 |
128,
|
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_diffusers_version": "0.21.2",
|
4 |
+
"_name_or_path": "SG161222/Realistic_Vision_V2.0/vae",
|
5 |
"act_fn": "silu",
|
6 |
"block_out_channels": [
|
7 |
128,
|
vae/diffusion_pytorch_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 334643268
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86a6170968f284b6c0c111e2611c806a04102e2a48790ebd1d490ca78040d2bb
|
3 |
size 334643268
|