hpwang commited on
Commit
7e83ee4
1 Parent(s): 734cf95

Upload 45 files

Browse files
Files changed (46) hide show
  1. .gitattributes +1 -0
  2. tools/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors +3 -0
  3. tools/Fooocus/models/checkpoints/put_checkpoints_here +0 -0
  4. tools/Fooocus/models/clip/put_clip_or_text_encoder_models_here +0 -0
  5. tools/Fooocus/models/clip_vision/put_clip_vision_models_here +0 -0
  6. tools/Fooocus/models/clip_vision/wd-v1-4-moat-tagger-v2.csv +0 -0
  7. tools/Fooocus/models/configs/anything_v3.yaml +73 -0
  8. tools/Fooocus/models/configs/v1-inference.yaml +70 -0
  9. tools/Fooocus/models/configs/v1-inference_clip_skip_2.yaml +73 -0
  10. tools/Fooocus/models/configs/v1-inference_clip_skip_2_fp16.yaml +74 -0
  11. tools/Fooocus/models/configs/v1-inference_fp16.yaml +71 -0
  12. tools/Fooocus/models/configs/v1-inpainting-inference.yaml +71 -0
  13. tools/Fooocus/models/configs/v2-inference-v.yaml +68 -0
  14. tools/Fooocus/models/configs/v2-inference-v_fp32.yaml +68 -0
  15. tools/Fooocus/models/configs/v2-inference.yaml +67 -0
  16. tools/Fooocus/models/configs/v2-inference_fp32.yaml +67 -0
  17. tools/Fooocus/models/configs/v2-inpainting-inference.yaml +158 -0
  18. tools/Fooocus/models/controlnet/put_controlnets_and_t2i_here +0 -0
  19. tools/Fooocus/models/diffusers/put_diffusers_models_here +0 -0
  20. tools/Fooocus/models/embeddings/put_embeddings_or_textual_inversion_concepts_here +0 -0
  21. tools/Fooocus/models/gligen/put_gligen_models_here +0 -0
  22. tools/Fooocus/models/hypernetworks/put_hypernetworks_here +0 -0
  23. tools/Fooocus/models/inpaint/fooocus_inpaint_head.pth +3 -0
  24. tools/Fooocus/models/inpaint/inpaint_v26.fooocus.patch +3 -0
  25. tools/Fooocus/models/inpaint/put_inpaint_here +0 -0
  26. tools/Fooocus/models/loras/put_loras_here +0 -0
  27. tools/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors +3 -0
  28. tools/Fooocus/models/prompt_expansion/fooocus_expansion/config.json +40 -0
  29. tools/Fooocus/models/prompt_expansion/fooocus_expansion/merges.txt +0 -0
  30. tools/Fooocus/models/prompt_expansion/fooocus_expansion/positive.txt +642 -0
  31. tools/Fooocus/models/prompt_expansion/fooocus_expansion/pytorch_model.bin +3 -0
  32. tools/Fooocus/models/prompt_expansion/fooocus_expansion/special_tokens_map.json +5 -0
  33. tools/Fooocus/models/prompt_expansion/fooocus_expansion/tokenizer.json +0 -0
  34. tools/Fooocus/models/prompt_expansion/fooocus_expansion/tokenizer_config.json +10 -0
  35. tools/Fooocus/models/prompt_expansion/fooocus_expansion/vocab.json +0 -0
  36. tools/Fooocus/models/prompt_expansion/put_prompt_expansion_here +0 -0
  37. tools/Fooocus/models/safety_checker/put_safety_checker_models_here +0 -0
  38. tools/Fooocus/models/style_models/put_t2i_style_model_here +0 -0
  39. tools/Fooocus/models/unet/put_unet_files_here +0 -0
  40. tools/Fooocus/models/upscale_models/fooocus_upscaler_s409985e5.bin +3 -0
  41. tools/Fooocus/models/upscale_models/put_esrgan_and_other_upscale_models_here +0 -0
  42. tools/Fooocus/models/vae/put_vae_here +0 -0
  43. tools/Fooocus/models/vae_approx/put_taesd_encoder_pth_and_taesd_decoder_pth_here +0 -0
  44. tools/Fooocus/models/vae_approx/vaeapp_sd15.pth +3 -0
  45. tools/Fooocus/models/vae_approx/xl-to-v1_interposer-v4.0.safetensors +3 -0
  46. tools/Fooocus/models/vae_approx/xlvaeapp.pth +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tools/Fooocus/models/inpaint/inpaint_v26.fooocus.patch filter=lfs diff=lfs merge=lfs -text
tools/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb7e9e6897a1e58b10494bd989d001e3d4bc9b634633cd7b559838f612c2867
3
+ size 7105348592
tools/Fooocus/models/checkpoints/put_checkpoints_here ADDED
File without changes
tools/Fooocus/models/clip/put_clip_or_text_encoder_models_here ADDED
File without changes
tools/Fooocus/models/clip_vision/put_clip_vision_models_here ADDED
File without changes
tools/Fooocus/models/clip_vision/wd-v1-4-moat-tagger-v2.csv ADDED
The diff for this file is too large to render. See raw diff
 
tools/Fooocus/models/configs/anything_v3.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71
+ params:
72
+ layer: "hidden"
73
+ layer_idx: -2
tools/Fooocus/models/configs/v1-inference.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
tools/Fooocus/models/configs/v1-inference_clip_skip_2.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71
+ params:
72
+ layer: "hidden"
73
+ layer_idx: -2
tools/Fooocus/models/configs/v1-inference_clip_skip_2_fp16.yaml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ use_fp16: True
33
+ image_size: 32 # unused
34
+ in_channels: 4
35
+ out_channels: 4
36
+ model_channels: 320
37
+ attention_resolutions: [ 4, 2, 1 ]
38
+ num_res_blocks: 2
39
+ channel_mult: [ 1, 2, 4, 4 ]
40
+ num_heads: 8
41
+ use_spatial_transformer: True
42
+ transformer_depth: 1
43
+ context_dim: 768
44
+ use_checkpoint: True
45
+ legacy: False
46
+
47
+ first_stage_config:
48
+ target: ldm.models.autoencoder.AutoencoderKL
49
+ params:
50
+ embed_dim: 4
51
+ monitor: val/rec_loss
52
+ ddconfig:
53
+ double_z: true
54
+ z_channels: 4
55
+ resolution: 256
56
+ in_channels: 3
57
+ out_ch: 3
58
+ ch: 128
59
+ ch_mult:
60
+ - 1
61
+ - 2
62
+ - 4
63
+ - 4
64
+ num_res_blocks: 2
65
+ attn_resolutions: []
66
+ dropout: 0.0
67
+ lossconfig:
68
+ target: torch.nn.Identity
69
+
70
+ cond_stage_config:
71
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72
+ params:
73
+ layer: "hidden"
74
+ layer_idx: -2
tools/Fooocus/models/configs/v1-inference_fp16.yaml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ use_fp16: True
33
+ image_size: 32 # unused
34
+ in_channels: 4
35
+ out_channels: 4
36
+ model_channels: 320
37
+ attention_resolutions: [ 4, 2, 1 ]
38
+ num_res_blocks: 2
39
+ channel_mult: [ 1, 2, 4, 4 ]
40
+ num_heads: 8
41
+ use_spatial_transformer: True
42
+ transformer_depth: 1
43
+ context_dim: 768
44
+ use_checkpoint: True
45
+ legacy: False
46
+
47
+ first_stage_config:
48
+ target: ldm.models.autoencoder.AutoencoderKL
49
+ params:
50
+ embed_dim: 4
51
+ monitor: val/rec_loss
52
+ ddconfig:
53
+ double_z: true
54
+ z_channels: 4
55
+ resolution: 256
56
+ in_channels: 3
57
+ out_ch: 3
58
+ ch: 128
59
+ ch_mult:
60
+ - 1
61
+ - 2
62
+ - 4
63
+ - 4
64
+ num_res_blocks: 2
65
+ attn_resolutions: []
66
+ dropout: 0.0
67
+ lossconfig:
68
+ target: torch.nn.Identity
69
+
70
+ cond_stage_config:
71
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
tools/Fooocus/models/configs/v1-inpainting-inference.yaml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 7.5e-05
3
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: hybrid # important
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ finetune_keys: null
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71
+
tools/Fooocus/models/configs/v2-inference-v.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ parameterization: "v"
6
+ linear_start: 0.00085
7
+ linear_end: 0.0120
8
+ num_timesteps_cond: 1
9
+ log_every_t: 200
10
+ timesteps: 1000
11
+ first_stage_key: "jpg"
12
+ cond_stage_key: "txt"
13
+ image_size: 64
14
+ channels: 4
15
+ cond_stage_trainable: false
16
+ conditioning_key: crossattn
17
+ monitor: val/loss_simple_ema
18
+ scale_factor: 0.18215
19
+ use_ema: False # we set this to false because this is an inference only config
20
+
21
+ unet_config:
22
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23
+ params:
24
+ use_checkpoint: True
25
+ use_fp16: True
26
+ image_size: 32 # unused
27
+ in_channels: 4
28
+ out_channels: 4
29
+ model_channels: 320
30
+ attention_resolutions: [ 4, 2, 1 ]
31
+ num_res_blocks: 2
32
+ channel_mult: [ 1, 2, 4, 4 ]
33
+ num_head_channels: 64 # need to fix for flash-attn
34
+ use_spatial_transformer: True
35
+ use_linear_in_transformer: True
36
+ transformer_depth: 1
37
+ context_dim: 1024
38
+ legacy: False
39
+
40
+ first_stage_config:
41
+ target: ldm.models.autoencoder.AutoencoderKL
42
+ params:
43
+ embed_dim: 4
44
+ monitor: val/rec_loss
45
+ ddconfig:
46
+ #attn_type: "vanilla-xformers"
47
+ double_z: true
48
+ z_channels: 4
49
+ resolution: 256
50
+ in_channels: 3
51
+ out_ch: 3
52
+ ch: 128
53
+ ch_mult:
54
+ - 1
55
+ - 2
56
+ - 4
57
+ - 4
58
+ num_res_blocks: 2
59
+ attn_resolutions: []
60
+ dropout: 0.0
61
+ lossconfig:
62
+ target: torch.nn.Identity
63
+
64
+ cond_stage_config:
65
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66
+ params:
67
+ freeze: True
68
+ layer: "penultimate"
tools/Fooocus/models/configs/v2-inference-v_fp32.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ parameterization: "v"
6
+ linear_start: 0.00085
7
+ linear_end: 0.0120
8
+ num_timesteps_cond: 1
9
+ log_every_t: 200
10
+ timesteps: 1000
11
+ first_stage_key: "jpg"
12
+ cond_stage_key: "txt"
13
+ image_size: 64
14
+ channels: 4
15
+ cond_stage_trainable: false
16
+ conditioning_key: crossattn
17
+ monitor: val/loss_simple_ema
18
+ scale_factor: 0.18215
19
+ use_ema: False # we set this to false because this is an inference only config
20
+
21
+ unet_config:
22
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23
+ params:
24
+ use_checkpoint: True
25
+ use_fp16: False
26
+ image_size: 32 # unused
27
+ in_channels: 4
28
+ out_channels: 4
29
+ model_channels: 320
30
+ attention_resolutions: [ 4, 2, 1 ]
31
+ num_res_blocks: 2
32
+ channel_mult: [ 1, 2, 4, 4 ]
33
+ num_head_channels: 64 # need to fix for flash-attn
34
+ use_spatial_transformer: True
35
+ use_linear_in_transformer: True
36
+ transformer_depth: 1
37
+ context_dim: 1024
38
+ legacy: False
39
+
40
+ first_stage_config:
41
+ target: ldm.models.autoencoder.AutoencoderKL
42
+ params:
43
+ embed_dim: 4
44
+ monitor: val/rec_loss
45
+ ddconfig:
46
+ #attn_type: "vanilla-xformers"
47
+ double_z: true
48
+ z_channels: 4
49
+ resolution: 256
50
+ in_channels: 3
51
+ out_ch: 3
52
+ ch: 128
53
+ ch_mult:
54
+ - 1
55
+ - 2
56
+ - 4
57
+ - 4
58
+ num_res_blocks: 2
59
+ attn_resolutions: []
60
+ dropout: 0.0
61
+ lossconfig:
62
+ target: torch.nn.Identity
63
+
64
+ cond_stage_config:
65
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66
+ params:
67
+ freeze: True
68
+ layer: "penultimate"
tools/Fooocus/models/configs/v2-inference.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False # we set this to false because this is an inference only config
19
+
20
+ unet_config:
21
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22
+ params:
23
+ use_checkpoint: True
24
+ use_fp16: True
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: []
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
tools/Fooocus/models/configs/v2-inference_fp32.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False # we set this to false because this is an inference only config
19
+
20
+ unet_config:
21
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22
+ params:
23
+ use_checkpoint: True
24
+ use_fp16: False
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: []
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
tools/Fooocus/models/configs/v2-inpainting-inference.yaml ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.0e-05
3
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ scale_factor: 0.18215
17
+ monitor: val/loss_simple_ema
18
+ finetune_keys: null
19
+ use_ema: False
20
+
21
+ unet_config:
22
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23
+ params:
24
+ use_checkpoint: True
25
+ image_size: 32 # unused
26
+ in_channels: 9
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: [ ]
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
68
+
69
+
70
+ data:
71
+ target: ldm.data.laion.WebDataModuleFromConfig
72
+ params:
73
+ tar_base: null # for concat as in LAION-A
74
+ p_unsafe_threshold: 0.1
75
+ filter_word_list: "data/filters.yaml"
76
+ max_pwatermark: 0.45
77
+ batch_size: 8
78
+ num_workers: 6
79
+ multinode: True
80
+ min_size: 512
81
+ train:
82
+ shards:
83
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-0/{00000..18699}.tar -"
84
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-1/{00000..18699}.tar -"
85
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-2/{00000..18699}.tar -"
86
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-3/{00000..18699}.tar -"
87
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-4/{00000..18699}.tar -" #{00000-94333}.tar"
88
+ shuffle: 10000
89
+ image_key: jpg
90
+ image_transforms:
91
+ - target: torchvision.transforms.Resize
92
+ params:
93
+ size: 512
94
+ interpolation: 3
95
+ - target: torchvision.transforms.RandomCrop
96
+ params:
97
+ size: 512
98
+ postprocess:
99
+ target: ldm.data.laion.AddMask
100
+ params:
101
+ mode: "512train-large"
102
+ p_drop: 0.25
103
+ # NOTE use enough shards to avoid empty validation loops in workers
104
+ validation:
105
+ shards:
106
+ - "pipe:aws s3 cp s3://deep-floyd-s3/datasets/laion_cleaned-part5/{93001..94333}.tar - "
107
+ shuffle: 0
108
+ image_key: jpg
109
+ image_transforms:
110
+ - target: torchvision.transforms.Resize
111
+ params:
112
+ size: 512
113
+ interpolation: 3
114
+ - target: torchvision.transforms.CenterCrop
115
+ params:
116
+ size: 512
117
+ postprocess:
118
+ target: ldm.data.laion.AddMask
119
+ params:
120
+ mode: "512train-large"
121
+ p_drop: 0.25
122
+
123
+ lightning:
124
+ find_unused_parameters: True
125
+ modelcheckpoint:
126
+ params:
127
+ every_n_train_steps: 5000
128
+
129
+ callbacks:
130
+ metrics_over_trainsteps_checkpoint:
131
+ params:
132
+ every_n_train_steps: 10000
133
+
134
+ image_logger:
135
+ target: main.ImageLogger
136
+ params:
137
+ enable_autocast: False
138
+ disabled: False
139
+ batch_frequency: 1000
140
+ max_images: 4
141
+ increase_log_steps: False
142
+ log_first_step: False
143
+ log_images_kwargs:
144
+ use_ema_scope: False
145
+ inpaint: False
146
+ plot_progressive_rows: False
147
+ plot_diffusion_rows: False
148
+ N: 4
149
+ unconditional_guidance_scale: 5.0
150
+ unconditional_guidance_label: [""]
151
+ ddim_steps: 50 # todo check these out for depth2img,
152
+ ddim_eta: 0.0 # todo check these out for depth2img,
153
+
154
+ trainer:
155
+ benchmark: True
156
+ val_check_interval: 5000000
157
+ num_sanity_val_steps: 0
158
+ accumulate_grad_batches: 1
tools/Fooocus/models/controlnet/put_controlnets_and_t2i_here ADDED
File without changes
tools/Fooocus/models/diffusers/put_diffusers_models_here ADDED
File without changes
tools/Fooocus/models/embeddings/put_embeddings_or_textual_inversion_concepts_here ADDED
File without changes
tools/Fooocus/models/gligen/put_gligen_models_here ADDED
File without changes
tools/Fooocus/models/hypernetworks/put_hypernetworks_here ADDED
File without changes
tools/Fooocus/models/inpaint/fooocus_inpaint_head.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32f7f838e0c6d8f13437ba8411e77a4688d77a2e34df8857e4ef4d51f6b97692
3
+ size 52602
tools/Fooocus/models/inpaint/inpaint_v26.fooocus.patch ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8657a025104e22d70f9c060635d8e8c2196f433871a2f68dc40abd2171f0d59
3
+ size 1323362033
tools/Fooocus/models/inpaint/put_inpaint_here ADDED
File without changes
tools/Fooocus/models/loras/put_loras_here ADDED
File without changes
tools/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4852686128f953d0277d0793e2f0335352f96a919c9c16a09787d77f55cbdf6f
3
+ size 49553604
tools/Fooocus/models/prompt_expansion/fooocus_expansion/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "pad_token_id": 50256,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 50
34
+ }
35
+ },
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.23.0.dev0",
38
+ "use_cache": true,
39
+ "vocab_size": 50257
40
+ }
tools/Fooocus/models/prompt_expansion/fooocus_expansion/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tools/Fooocus/models/prompt_expansion/fooocus_expansion/positive.txt ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ abundant
2
+ accelerated
3
+ accepted
4
+ accepting
5
+ acclaimed
6
+ accomplished
7
+ acknowledged
8
+ activated
9
+ adapted
10
+ adjusted
11
+ admirable
12
+ adorable
13
+ adorned
14
+ advanced
15
+ adventurous
16
+ advocated
17
+ aesthetic
18
+ affirmed
19
+ affluent
20
+ agile
21
+ aimed
22
+ aligned
23
+ alive
24
+ altered
25
+ amazing
26
+ ambient
27
+ amplified
28
+ analytical
29
+ animated
30
+ appealing
31
+ applauded
32
+ appreciated
33
+ ardent
34
+ aromatic
35
+ arranged
36
+ arresting
37
+ articulate
38
+ artistic
39
+ associated
40
+ assured
41
+ astonishing
42
+ astounding
43
+ atmosphere
44
+ attempted
45
+ attentive
46
+ attractive
47
+ authentic
48
+ authoritative
49
+ awarded
50
+ awesome
51
+ backed
52
+ background
53
+ baked
54
+ balance
55
+ balanced
56
+ balancing
57
+ beaten
58
+ beautiful
59
+ beloved
60
+ beneficial
61
+ benevolent
62
+ best
63
+ bestowed
64
+ blazing
65
+ blended
66
+ blessed
67
+ boosted
68
+ borne
69
+ brave
70
+ breathtaking
71
+ brewed
72
+ bright
73
+ brilliant
74
+ brought
75
+ built
76
+ burning
77
+ calm
78
+ calmed
79
+ candid
80
+ caring
81
+ carried
82
+ catchy
83
+ celebrated
84
+ celestial
85
+ certain
86
+ championed
87
+ changed
88
+ charismatic
89
+ charming
90
+ chased
91
+ cheered
92
+ cheerful
93
+ cherished
94
+ chic
95
+ chosen
96
+ cinematic
97
+ clad
98
+ classic
99
+ classy
100
+ clear
101
+ coached
102
+ coherent
103
+ collected
104
+ color
105
+ colorful
106
+ colors
107
+ colossal
108
+ combined
109
+ comforting
110
+ commanding
111
+ committed
112
+ compassionate
113
+ compatible
114
+ complete
115
+ complex
116
+ complimentary
117
+ composed
118
+ composition
119
+ comprehensive
120
+ conceived
121
+ conferred
122
+ confident
123
+ connected
124
+ considerable
125
+ considered
126
+ consistent
127
+ conspicuous
128
+ constructed
129
+ constructive
130
+ contemplated
131
+ contemporary
132
+ content
133
+ contrasted
134
+ conveyed
135
+ cooked
136
+ cool
137
+ coordinated
138
+ coupled
139
+ courageous
140
+ coveted
141
+ cozy
142
+ created
143
+ creative
144
+ credited
145
+ crisp
146
+ critical
147
+ cultivated
148
+ cured
149
+ curious
150
+ current
151
+ customized
152
+ cute
153
+ daring
154
+ darling
155
+ dazzling
156
+ decorated
157
+ decorative
158
+ dedicated
159
+ deep
160
+ defended
161
+ definitive
162
+ delicate
163
+ delightful
164
+ delivered
165
+ depicted
166
+ designed
167
+ desirable
168
+ desired
169
+ destined
170
+ detail
171
+ detailed
172
+ determined
173
+ developed
174
+ devoted
175
+ devout
176
+ diligent
177
+ direct
178
+ directed
179
+ discovered
180
+ dispatched
181
+ displayed
182
+ distilled
183
+ distinct
184
+ distinctive
185
+ distinguished
186
+ diverse
187
+ divine
188
+ dramatic
189
+ draped
190
+ dreamed
191
+ driven
192
+ dynamic
193
+ earnest
194
+ eased
195
+ ecstatic
196
+ educated
197
+ effective
198
+ elaborate
199
+ elegant
200
+ elevated
201
+ elite
202
+ eminent
203
+ emotional
204
+ empowered
205
+ empowering
206
+ enchanted
207
+ encouraged
208
+ endorsed
209
+ endowed
210
+ enduring
211
+ energetic
212
+ engaging
213
+ enhanced
214
+ enigmatic
215
+ enlightened
216
+ enormous
217
+ enticing
218
+ envisioned
219
+ epic
220
+ esteemed
221
+ eternal
222
+ everlasting
223
+ evolved
224
+ exalted
225
+ examining
226
+ excellent
227
+ exceptional
228
+ exciting
229
+ exclusive
230
+ exemplary
231
+ exotic
232
+ expansive
233
+ exposed
234
+ expressive
235
+ exquisite
236
+ extended
237
+ extraordinary
238
+ extremely
239
+ fabulous
240
+ facilitated
241
+ fair
242
+ faithful
243
+ famous
244
+ fancy
245
+ fantastic
246
+ fascinating
247
+ fashionable
248
+ fashioned
249
+ favorable
250
+ favored
251
+ fearless
252
+ fermented
253
+ fertile
254
+ festive
255
+ fiery
256
+ fine
257
+ finest
258
+ firm
259
+ fixed
260
+ flaming
261
+ flashing
262
+ flashy
263
+ flavored
264
+ flawless
265
+ flourishing
266
+ flowing
267
+ focus
268
+ focused
269
+ formal
270
+ formed
271
+ fortunate
272
+ fostering
273
+ frank
274
+ fresh
275
+ fried
276
+ friendly
277
+ fruitful
278
+ fulfilled
279
+ full
280
+ futuristic
281
+ generous
282
+ gentle
283
+ genuine
284
+ gifted
285
+ gigantic
286
+ glamorous
287
+ glorious
288
+ glossy
289
+ glowing
290
+ gorgeous
291
+ graceful
292
+ gracious
293
+ grand
294
+ granted
295
+ grateful
296
+ great
297
+ grilled
298
+ grounded
299
+ grown
300
+ guarded
301
+ guided
302
+ hailed
303
+ handsome
304
+ healing
305
+ healthy
306
+ heartfelt
307
+ heavenly
308
+ heroic
309
+ highly
310
+ historic
311
+ holistic
312
+ holy
313
+ honest
314
+ honored
315
+ hoped
316
+ hopeful
317
+ iconic
318
+ ideal
319
+ illuminated
320
+ illuminating
321
+ illumination
322
+ illustrious
323
+ imaginative
324
+ imagined
325
+ immense
326
+ immortal
327
+ imposing
328
+ impressive
329
+ improved
330
+ incredible
331
+ infinite
332
+ informed
333
+ ingenious
334
+ innocent
335
+ innovative
336
+ insightful
337
+ inspirational
338
+ inspired
339
+ inspiring
340
+ instructed
341
+ integrated
342
+ intense
343
+ intricate
344
+ intriguing
345
+ invaluable
346
+ invented
347
+ investigative
348
+ invincible
349
+ inviting
350
+ irresistible
351
+ joined
352
+ joyful
353
+ keen
354
+ kindly
355
+ kinetic
356
+ knockout
357
+ laced
358
+ lasting
359
+ lauded
360
+ lavish
361
+ legendary
362
+ lifted
363
+ light
364
+ limited
365
+ linked
366
+ lively
367
+ located
368
+ logical
369
+ loved
370
+ lovely
371
+ loving
372
+ loyal
373
+ lucid
374
+ lucky
375
+ lush
376
+ luxurious
377
+ luxury
378
+ magic
379
+ magical
380
+ magnificent
381
+ majestic
382
+ marked
383
+ marvelous
384
+ massive
385
+ matched
386
+ matured
387
+ meaningful
388
+ memorable
389
+ merged
390
+ merry
391
+ meticulous
392
+ mindful
393
+ miraculous
394
+ modern
395
+ modified
396
+ monstrous
397
+ monumental
398
+ motivated
399
+ motivational
400
+ moved
401
+ moving
402
+ mystical
403
+ mythical
404
+ naive
405
+ neat
406
+ new
407
+ nice
408
+ nifty
409
+ noble
410
+ notable
411
+ noteworthy
412
+ novel
413
+ nuanced
414
+ offered
415
+ open
416
+ optimal
417
+ optimistic
418
+ orderly
419
+ organized
420
+ original
421
+ originated
422
+ outstanding
423
+ overwhelming
424
+ paired
425
+ palpable
426
+ passionate
427
+ peaceful
428
+ perfect
429
+ perfected
430
+ perpetual
431
+ persistent
432
+ phenomenal
433
+ pious
434
+ pivotal
435
+ placed
436
+ planned
437
+ pleasant
438
+ pleased
439
+ pleasing
440
+ plentiful
441
+ plotted
442
+ plush
443
+ poetic
444
+ poignant
445
+ polished
446
+ positive
447
+ praised
448
+ precious
449
+ precise
450
+ premier
451
+ premium
452
+ presented
453
+ preserved
454
+ prestigious
455
+ pretty
456
+ priceless
457
+ prime
458
+ pristine
459
+ probing
460
+ productive
461
+ professional
462
+ profound
463
+ progressed
464
+ progressive
465
+ prominent
466
+ promoted
467
+ pronounced
468
+ propelled
469
+ proportional
470
+ prosperous
471
+ protected
472
+ provided
473
+ provocative
474
+ pure
475
+ pursued
476
+ pushed
477
+ quaint
478
+ quality
479
+ questioning
480
+ quiet
481
+ radiant
482
+ rare
483
+ rational
484
+ real
485
+ reborn
486
+ reclaimed
487
+ recognized
488
+ recovered
489
+ refined
490
+ reflected
491
+ refreshed
492
+ refreshing
493
+ related
494
+ relaxed
495
+ relentless
496
+ reliable
497
+ relieved
498
+ remarkable
499
+ renewed
500
+ renowned
501
+ representative
502
+ rescued
503
+ resilient
504
+ respected
505
+ respectful
506
+ restored
507
+ retrieved
508
+ revealed
509
+ revealing
510
+ revered
511
+ revived
512
+ rewarded
513
+ rich
514
+ roasted
515
+ robust
516
+ romantic
517
+ royal
518
+ sacred
519
+ salient
520
+ satisfied
521
+ satisfying
522
+ saturated
523
+ saved
524
+ scenic
525
+ scientific
526
+ select
527
+ sensational
528
+ serious
529
+ set
530
+ shaped
531
+ sharp
532
+ shielded
533
+ shining
534
+ shiny
535
+ shown
536
+ significant
537
+ silent
538
+ sincere
539
+ singular
540
+ situated
541
+ sleek
542
+ slick
543
+ smart
544
+ snug
545
+ solemn
546
+ solid
547
+ soothing
548
+ sophisticated
549
+ sought
550
+ sparkling
551
+ special
552
+ spectacular
553
+ sped
554
+ spirited
555
+ spiritual
556
+ splendid
557
+ spread
558
+ stable
559
+ steady
560
+ still
561
+ stimulated
562
+ stimulating
563
+ stirred
564
+ straightforward
565
+ striking
566
+ strong
567
+ structured
568
+ stunning
569
+ sturdy
570
+ stylish
571
+ sublime
572
+ successful
573
+ sunny
574
+ superb
575
+ superior
576
+ supplied
577
+ supported
578
+ supportive
579
+ supreme
580
+ sure
581
+ surreal
582
+ sweet
583
+ symbolic
584
+ symmetry
585
+ synchronized
586
+ systematic
587
+ tailored
588
+ taking
589
+ targeted
590
+ taught
591
+ tempting
592
+ tender
593
+ terrific
594
+ thankful
595
+ theatrical
596
+ thought
597
+ thoughtful
598
+ thrilled
599
+ thrilling
600
+ thriving
601
+ tidy
602
+ timeless
603
+ touching
604
+ tough
605
+ trained
606
+ tranquil
607
+ transformed
608
+ translucent
609
+ transparent
610
+ transported
611
+ tremendous
612
+ trendy
613
+ tried
614
+ trim
615
+ true
616
+ trustworthy
617
+ unbelievable
618
+ unconditional
619
+ uncovered
620
+ unified
621
+ unique
622
+ united
623
+ universal
624
+ unmatched
625
+ unparalleled
626
+ upheld
627
+ valiant
628
+ valued
629
+ varied
630
+ very
631
+ vibrant
632
+ virtuous
633
+ vivid
634
+ warm
635
+ wealthy
636
+ whole
637
+ winning
638
+ wished
639
+ witty
640
+ wonderful
641
+ worshipped
642
+ worthy
tools/Fooocus/models/prompt_expansion/fooocus_expansion/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd54cc90d95d2c72b97830e4b38f44a6521847284d5b9dbcfd16ba82779cdeb3
3
+ size 351283802
tools/Fooocus/models/prompt_expansion/fooocus_expansion/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tools/Fooocus/models/prompt_expansion/fooocus_expansion/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tools/Fooocus/models/prompt_expansion/fooocus_expansion/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "name_or_path": "gpt2",
7
+ "special_tokens_map_file": null,
8
+ "tokenizer_class": "GPT2Tokenizer",
9
+ "unk_token": "<|endoftext|>"
10
+ }
tools/Fooocus/models/prompt_expansion/fooocus_expansion/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tools/Fooocus/models/prompt_expansion/put_prompt_expansion_here ADDED
File without changes
tools/Fooocus/models/safety_checker/put_safety_checker_models_here ADDED
File without changes
tools/Fooocus/models/style_models/put_t2i_style_model_here ADDED
File without changes
tools/Fooocus/models/unet/put_unet_files_here ADDED
File without changes
tools/Fooocus/models/upscale_models/fooocus_upscaler_s409985e5.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2a66d21d2e44d2b59c53414419279763a423a61f05bc43d7c24e0489aeca5a3
3
+ size 33636613
tools/Fooocus/models/upscale_models/put_esrgan_and_other_upscale_models_here ADDED
File without changes
tools/Fooocus/models/vae/put_vae_here ADDED
File without changes
tools/Fooocus/models/vae_approx/put_taesd_encoder_pth_and_taesd_decoder_pth_here ADDED
File without changes
tools/Fooocus/models/vae_approx/vaeapp_sd15.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f88c9078bb2238cdd0d8864671dd33e3f42e091e41f08903f3c15e4a54a9b39
3
+ size 213777
tools/Fooocus/models/vae_approx/xl-to-v1_interposer-v4.0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da4c2d772c621134ba1a2d961a439cbc4b4c85c43aae1197ab3c024e2a3efa57
3
+ size 5667280
tools/Fooocus/models/vae_approx/xlvaeapp.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c8db511f910184436a613807ce06a729f4cf96bbf7977c7c6c40d5b3e4a8333
3
+ size 213777