dikdimon commited on
Commit
34b2da7
·
verified ·
1 Parent(s): d610680

Upload Stable-diffusion using SD-Hub extension

Browse files
Files changed (35) hide show
  1. .gitattributes +1 -0
  2. Stable-diffusion/Put Stable Diffusion checkpoints here.txt +0 -0
  3. Stable-diffusion/ui/.gitattributes +62 -0
  4. Stable-diffusion/ui/1.wav +0 -0
  5. Stable-diffusion/ui/2.wav +0 -0
  6. Stable-diffusion/ui/3.wav +0 -0
  7. Stable-diffusion/ui/4x-AnimeSharp.pth +3 -0
  8. Stable-diffusion/ui/4x-UltraSharp.pth +3 -0
  9. Stable-diffusion/ui/4x_NMKD-Superscale-SP_178000_G.pth +3 -0
  10. Stable-diffusion/ui/4x_RealisticRescaler_100000_G.pth +3 -0
  11. Stable-diffusion/ui/4x_foolhardy_Remacri.pth +3 -0
  12. Stable-diffusion/ui/8x_RealESRGAN.pth +3 -0
  13. Stable-diffusion/ui/ADetailer.zip +3 -0
  14. Stable-diffusion/ui/BackToNature.mp3 +3 -0
  15. Stable-diffusion/ui/README.md +3 -0
  16. Stable-diffusion/ui/asd.zip +3 -0
  17. Stable-diffusion/ui/ass.zip +3 -0
  18. Stable-diffusion/ui/cloudflared.py +10 -0
  19. Stable-diffusion/ui/config.json +440 -0
  20. Stable-diffusion/ui/custom_hires_fix.py +416 -0
  21. Stable-diffusion/ui/embeddings.zip +3 -0
  22. Stable-diffusion/ui/encrypt_image.py +206 -0
  23. Stable-diffusion/ui/encrypt_images_info.js +27 -0
  24. Stable-diffusion/ui/hashes.py +81 -0
  25. Stable-diffusion/ui/lora_block_weight.py +1152 -0
  26. Stable-diffusion/ui/nenen88.py +52 -0
  27. Stable-diffusion/ui/venv.py +77 -0
  28. Stable-diffusion/ui/venv161.py +97 -0
  29. Stable-diffusion/ui/venv180.py +105 -0
  30. Stable-diffusion/ui/venv220.py +62 -0
  31. Stable-diffusion/ui/venv_19-6-2024.py +62 -0
  32. Stable-diffusion/ui/venvv.py +62 -0
  33. Stable-diffusion/ui/zzzzzz.safetensors +3 -0
  34. Stable-diffusion/venv-fusion.tar.lz4 +3 -0
  35. Stable-diffusion/venv-sd-trainer.tar.lz4 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Stable-diffusion/ui/BackToNature.mp3 filter=lfs diff=lfs merge=lfs -text
Stable-diffusion/Put Stable Diffusion checkpoints here.txt ADDED
File without changes
Stable-diffusion/ui/.gitattributes ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ BLIP/BLIP.gif filter=lfs diff=lfs merge=lfs -text
36
+ generative-models/assets/001_with_eval.png filter=lfs diff=lfs merge=lfs -text
37
+ stable-diffusion-stability-ai/assets/stable-inpainting/merged-leopards.png filter=lfs diff=lfs merge=lfs -text
38
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/d2i.gif filter=lfs diff=lfs merge=lfs -text
39
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/depth2img01.png filter=lfs diff=lfs merge=lfs -text
40
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/depth2img02.png filter=lfs diff=lfs merge=lfs -text
41
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0000.png filter=lfs diff=lfs merge=lfs -text
42
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0004.png filter=lfs diff=lfs merge=lfs -text
43
+ stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0005.png filter=lfs diff=lfs merge=lfs -text
44
+ stable-diffusion-stability-ai/assets/stable-samples/img2img/upscaling-in.png filter=lfs diff=lfs merge=lfs -text
45
+ stable-diffusion-stability-ai/assets/stable-samples/img2img/upscaling-out.png filter=lfs diff=lfs merge=lfs -text
46
+ stable-diffusion-stability-ai/assets/stable-samples/stable-unclip/unclip-variations.png filter=lfs diff=lfs merge=lfs -text
47
+ stable-diffusion-stability-ai/assets/stable-samples/stable-unclip/unclip-variations_noise.png filter=lfs diff=lfs merge=lfs -text
48
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0001.png filter=lfs diff=lfs merge=lfs -text
49
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0002.png filter=lfs diff=lfs merge=lfs -text
50
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0003.png filter=lfs diff=lfs merge=lfs -text
51
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0004.png filter=lfs diff=lfs merge=lfs -text
52
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0005.png filter=lfs diff=lfs merge=lfs -text
53
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0006.png filter=lfs diff=lfs merge=lfs -text
54
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0001.png filter=lfs diff=lfs merge=lfs -text
55
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0003.png filter=lfs diff=lfs merge=lfs -text
56
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0005.png filter=lfs diff=lfs merge=lfs -text
57
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0006.png filter=lfs diff=lfs merge=lfs -text
58
+ stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0007.png filter=lfs diff=lfs merge=lfs -text
59
+ stable-diffusion-stability-ai/assets/stable-samples/upscaling/merged-dog.png filter=lfs diff=lfs merge=lfs -text
60
+ stable-diffusion-stability-ai/assets/stable-samples/upscaling/sampled-bear-x4.png filter=lfs diff=lfs merge=lfs -text
61
+ stable-diffusion-stability-ai/assets/stable-samples/upscaling/snow-leopard-x4.png filter=lfs diff=lfs merge=lfs -text
62
+ BackToNature.mp3 filter=lfs diff=lfs merge=lfs -text
Stable-diffusion/ui/1.wav ADDED
Binary file (326 kB). View file
 
Stable-diffusion/ui/2.wav ADDED
Binary file (326 kB). View file
 
Stable-diffusion/ui/3.wav ADDED
Binary file (326 kB). View file
 
Stable-diffusion/ui/4x-AnimeSharp.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7a7de2dafd7331c1992862bbbcd9e9712a9f9f8e6303f0aaa59b4341d359bab
3
+ size 67010245
Stable-diffusion/ui/4x-UltraSharp.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5812231fc936b42af08a5edba784195495d303d5b3248c24489ef0c4021fe01
3
+ size 66961958
Stable-diffusion/ui/4x_NMKD-Superscale-SP_178000_G.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d1b0078fe71446e0469d8d4df59e96baa80d83cda600d68237d655830821bcc
3
+ size 66958607
Stable-diffusion/ui/4x_RealisticRescaler_100000_G.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7381a1229143c9301a94421b610d95eb312e2555743cc9e80099a0e15ac5bd3b
3
+ size 134051293
Stable-diffusion/ui/4x_foolhardy_Remacri.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1a73bd89c2da1ae494774746398689048b5a892bd9653e146713f9df8bca86a
3
+ size 67025055
Stable-diffusion/ui/8x_RealESRGAN.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b72fb469d12f05a4770813d2603eb1b550f40df6fb8b37d6c7bc2db3d2bff5e
3
+ size 67189359
Stable-diffusion/ui/ADetailer.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54efbdaea342f5fef3aad4f5a9c967b0ddd4db1087991529e355f3cb5ffb0b15
3
+ size 148437591
Stable-diffusion/ui/BackToNature.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2d3fc1121250e3c98019ea7a3f96168c9d547884e2fa316f305dfba8c26a42
3
+ size 4902735
Stable-diffusion/ui/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ ---
Stable-diffusion/ui/asd.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e22e725217abb4b6d8079b6be20c401393ab0945cc60c274a73e7c30b2139cf4
3
+ size 35111
Stable-diffusion/ui/ass.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f91ef37e889b2a5ee158c155afe6636bfce6fa5c2ccce2a06e872b0945680d6d
3
+ size 32885
Stable-diffusion/ui/cloudflared.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, sys, re, cloudpickle, shlex
2
+ from pathlib import Path
3
+
4
+ port = 7860
5
+ tunnel_class = cloudpickle.load(open("/kaggle/working/new_tunnel", "rb"), encoding="utf-8")
6
+ tunnel = tunnel_class(port)
7
+ tunnel.add_tunnel(command=f"cl tunnel --url localhost:{port}", name="cl", pattern=re.compile(r"[\w-]+\.trycloudflare\.com"))
8
+ asd = f'/kaggle/venv/bin/python3 launch.py {" ".join(sys.argv[1:])}'
9
+ with tunnel:
10
+ subprocess.call(shlex.split(asd))
Stable-diffusion/ui/config.json ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gradio_theme": "NoCrypt/miku",
3
+ "lora_preferred_name": "Filename",
4
+ "samples_filename_pattern": "[datetime<%M%S>]",
5
+ "show_progressbar": true,
6
+ "show_progress_grid": true,
7
+ "show_progress_every_n_steps": 2,
8
+ "live_previews_enable": true,
9
+ "live_previews_image_format": "webp",
10
+ "live_preview_allow_lowvram_full": false,
11
+ "live_preview_content": "Prompt",
12
+ "live_preview_refresh_period": 100.0,
13
+ "live_preview_fast_interrupt": true,
14
+ "quicksettings_list": [
15
+ "sd_model_checkpoint",
16
+ "sd_vae",
17
+ "CLIP_stop_at_last_layers"
18
+ ],
19
+ "ui_tab_order": [
20
+ "txt2img",
21
+ "img2img",
22
+ "Extras",
23
+ "Fast PNG Info",
24
+ "SuperMerger"
25
+ ],
26
+ "hidden_tabs": [
27
+ "PNG Info",
28
+ "Train",
29
+ "Checkpoint Merger"
30
+ ],
31
+ "ldsr_steps": 100,
32
+ "ldsr_cached": false,
33
+ "SCUNET_tile": 256,
34
+ "SCUNET_tile_overlap": 8,
35
+ "SWIN_tile": 192,
36
+ "SWIN_tile_overlap": 8,
37
+ "SWIN_torch_compile": false,
38
+ "hypertile_enable_unet": false,
39
+ "hypertile_enable_unet_secondpass": false,
40
+ "hypertile_max_depth_unet": 3,
41
+ "hypertile_max_tile_unet": 256,
42
+ "hypertile_swap_size_unet": 3,
43
+ "hypertile_enable_vae": false,
44
+ "hypertile_max_depth_vae": 3,
45
+ "hypertile_max_tile_vae": 128,
46
+ "hypertile_swap_size_vae": 3,
47
+ "bmab_debug_print": false,
48
+ "bmab_debug_logging": false,
49
+ "bmab_show_extends": false,
50
+ "bmab_test_function": false,
51
+ "bmab_keep_original_setting": false,
52
+ "bmab_save_image_before_process": false,
53
+ "bmab_save_image_after_process": false,
54
+ "bmab_for_developer": false,
55
+ "bmab_use_dino_predict": false,
56
+ "bmab_max_detailing_element": 0,
57
+ "bmab_detail_full": true,
58
+ "bmab_optimize_vram": "None",
59
+ "bmab_mask_model": "sam_vit_b",
60
+ "bmab_use_specific_model": false,
61
+ "bmab_model": "",
62
+ "bmab_cn_openpose": "control_v11p_sd15_openpose_fp16 [73c2b67d]",
63
+ "bmab_cn_lineart": "control_v11p_sd15_lineart_fp16 [5c23b17d]",
64
+ "bmab_cn_inpaint": "control_v11p_sd15_inpaint_fp16 [be8bc0ed]",
65
+ "bmab_cn_tile_resample": "control_v11f1e_sd15_tile_fp16 [3b860298]",
66
+ "bmab_cn_inpaint_depth_hand": "control_sd15_inpaint_depth_hand_fp16 [09456e54]",
67
+ "bmab_cn_ipadapter": "ip-adapter-plus_sd15 [836b5c2e]",
68
+ "bmab_additional_checkpoint_path": "",
69
+ "ad_max_models": 4,
70
+ "ad_extra_models_dir": "",
71
+ "ad_save_images_dir": "",
72
+ "ad_save_previews": false,
73
+ "ad_save_images_before": false,
74
+ "ad_only_selected_scripts": true,
75
+ "ad_script_names": "dynamic_prompting,dynamic_thresholding,lora_block_weight,negpip,wildcard_recursive,wildcards",
76
+ "ad_bbox_sortby": "None",
77
+ "ad_same_seed_for_each_tab": false,
78
+ "ad_dynamic_denoise_power": 0,
79
+ "ad_match_inpaint_bbox_size": "Off",
80
+ "arh_javascript_aspect_ratio_show": true,
81
+ "arh_javascript_aspect_ratio": "1:1, 3:2, 4:3, 5:4, 16:9",
82
+ "arh_ui_javascript_selection_method": "Aspect Ratios Dropdown",
83
+ "arh_hide_accordion_by_default": true,
84
+ "arh_expand_by_default": false,
85
+ "arh_ui_component_order_key": "MaxDimensionScaler, MinDimensionScaler, PredefinedAspectRatioButtons, PredefinedPercentageButtons",
86
+ "arh_show_max_width_or_height": false,
87
+ "arh_max_width_or_height": 1024.0,
88
+ "arh_show_min_width_or_height": false,
89
+ "arh_min_width_or_height": 1024.0,
90
+ "arh_show_predefined_aspect_ratios": false,
91
+ "arh_predefined_aspect_ratio_use_max_dim": false,
92
+ "arh_predefined_aspect_ratios": "1:1, 4:3, 16:9, 9:16, 21:9",
93
+ "arh_show_predefined_percentages": false,
94
+ "arh_predefined_percentages": "25, 50, 75, 125, 150, 175, 200",
95
+ "arh_predefined_percentages_display_key": "Incremental/decremental percentage (-50%, +50%)",
96
+ "control_net_detectedmap_dir": "detected_maps",
97
+ "control_net_models_path": "",
98
+ "control_net_modules_path": "",
99
+ "control_net_unit_count": 3,
100
+ "control_net_model_cache_size": 2,
101
+ "control_net_inpaint_blur_sigma": 7,
102
+ "control_net_no_detectmap": false,
103
+ "control_net_detectmap_autosaving": false,
104
+ "control_net_allow_script_control": false,
105
+ "control_net_sync_field_args": true,
106
+ "controlnet_show_batch_images_in_ui": false,
107
+ "controlnet_increment_seed_during_batch": false,
108
+ "controlnet_disable_openpose_edit": false,
109
+ "controlnet_disable_photopea_edit": false,
110
+ "controlnet_photopea_warning": true,
111
+ "controlnet_ignore_noninpaint_mask": false,
112
+ "controlnet_clip_detector_on_cpu": false,
113
+ "controlnet_control_type_dropdown": false,
114
+ "encrypt_image_is_enable": "Yes",
115
+ "sd_model_checkpoint": "speciosa25D_v12.safetensors [e1431355af]",
116
+ "sd_checkpoint_hash": "e1431355afb5d23c727748fafe357ad7359822fb7b4312059a1a583cb468a885",
117
+ "sd_vae": "sdxl_vae.safetensors",
118
+ "emphasis": "No norm",
119
+ "multiple_tqdm": true,
120
+ "outdir_samples": "",
121
+ "outdir_txt2img_samples": "outputs/txt2img-images",
122
+ "outdir_img2img_samples": "outputs/img2img-images",
123
+ "outdir_extras_samples": "outputs/extras-images",
124
+ "outdir_grids": "",
125
+ "outdir_txt2img_grids": "outputs/txt2img-grids",
126
+ "outdir_img2img_grids": "outputs/img2img-grids",
127
+ "outdir_save": "log/images",
128
+ "outdir_init_images": "outputs/init-images",
129
+ "samples_save": true,
130
+ "samples_format": "png",
131
+ "save_images_add_number": true,
132
+ "save_images_replace_action": "Replace",
133
+ "grid_save": true,
134
+ "grid_format": "png",
135
+ "grid_extended_filename": false,
136
+ "grid_only_if_multiple": true,
137
+ "grid_prevent_empty_spots": false,
138
+ "grid_zip_filename_pattern": "",
139
+ "n_rows": -1,
140
+ "font": "",
141
+ "grid_text_active_color": "#000000",
142
+ "grid_text_inactive_color": "#999999",
143
+ "grid_background_color": "#ffffff",
144
+ "save_images_before_face_restoration": false,
145
+ "save_images_before_highres_fix": false,
146
+ "save_images_before_color_correction": false,
147
+ "save_mask": false,
148
+ "save_mask_composite": false,
149
+ "jpeg_quality": 80,
150
+ "webp_lossless": false,
151
+ "export_for_4chan": true,
152
+ "img_downscale_threshold": 4.0,
153
+ "target_side_length": 4000.0,
154
+ "img_max_size_mp": 200.0,
155
+ "use_original_name_batch": true,
156
+ "use_upscaler_name_as_suffix": false,
157
+ "save_selected_only": true,
158
+ "save_write_log_csv": true,
159
+ "save_init_img": false,
160
+ "temp_dir": "",
161
+ "clean_temp_dir_at_start": false,
162
+ "save_incomplete_images": true,
163
+ "notification_audio": true,
164
+ "notification_volume": 100,
165
+ "save_to_dirs": true,
166
+ "grid_save_to_dirs": true,
167
+ "use_save_to_dirs_for_ui": false,
168
+ "directories_filename_pattern": "[date]",
169
+ "directories_max_prompt_words": 8,
170
+ "auto_backcompat": true,
171
+ "use_old_emphasis_implementation": false,
172
+ "use_old_karras_scheduler_sigmas": false,
173
+ "no_dpmpp_sde_batch_determinism": false,
174
+ "use_old_hires_fix_width_height": false,
175
+ "hires_fix_use_firstpass_conds": false,
176
+ "use_old_scheduling": false,
177
+ "use_downcasted_alpha_bar": false,
178
+ "refiner_switch_by_sample_steps": false,
179
+ "lora_functional": false,
180
+ "extra_networks_show_hidden_directories": true,
181
+ "extra_networks_dir_button_function": false,
182
+ "extra_networks_hidden_models": "When searched",
183
+ "extra_networks_default_multiplier": 1,
184
+ "extra_networks_card_width": 120.0,
185
+ "extra_networks_card_height": 120.0,
186
+ "extra_networks_card_text_scale": 0.6,
187
+ "extra_networks_card_show_desc": false,
188
+ "extra_networks_card_description_is_html": false,
189
+ "extra_networks_card_order_field": "Path",
190
+ "extra_networks_card_order": "Ascending",
191
+ "extra_networks_tree_view_style": "Dirs",
192
+ "extra_networks_tree_view_default_enabled": true,
193
+ "extra_networks_tree_view_default_width": 180.0,
194
+ "extra_networks_add_text_separator": " ",
195
+ "ui_extra_networks_tab_reorder": "",
196
+ "textual_inversion_print_at_load": false,
197
+ "textual_inversion_add_hashes_to_infotext": true,
198
+ "sd_hypernetwork": "None",
199
+ "sd_lora": "None",
200
+ "lora_add_hashes_to_infotext": true,
201
+ "lora_bundled_ti_to_infotext": true,
202
+ "lora_show_all": false,
203
+ "lora_hide_unknown_for_versions": [],
204
+ "lora_in_memory_limit": 0,
205
+ "lora_not_found_warning_console": false,
206
+ "lora_not_found_gradio_warning": false,
207
+ "cross_attention_optimization": "Automatic",
208
+ "s_min_uncond": 0,
209
+ "s_min_uncond_all": false,
210
+ "token_merging_ratio": 0,
211
+ "token_merging_ratio_img2img": 0,
212
+ "token_merging_ratio_hr": 0,
213
+ "pad_cond_uncond": false,
214
+ "pad_cond_uncond_v0": false,
215
+ "persistent_cond_cache": true,
216
+ "batch_cond_uncond": true,
217
+ "fp8_storage": "Disable",
218
+ "cache_fp16_weight": false,
219
+ "hide_samplers": [],
220
+ "eta_ddim": 0,
221
+ "eta_ancestral": 1,
222
+ "ddim_discretize": "uniform",
223
+ "s_churn": 0,
224
+ "s_tmin": 0,
225
+ "s_tmax": 0,
226
+ "s_noise": 1,
227
+ "sigma_min": 0.0,
228
+ "sigma_max": 0.0,
229
+ "rho": 0.0,
230
+ "eta_noise_seed_delta": 0,
231
+ "always_discard_next_to_last_sigma": false,
232
+ "sgm_noise_multiplier": false,
233
+ "uni_pc_variant": "bh1",
234
+ "uni_pc_skip_type": "time_uniform",
235
+ "uni_pc_order": 3,
236
+ "uni_pc_lower_order_final": true,
237
+ "sd_noise_schedule": "Default",
238
+ "skip_early_cond": 0,
239
+ "beta_dist_alpha": 0.6,
240
+ "beta_dist_beta": 0.6,
241
+ "sd_checkpoints_limit": 1,
242
+ "sd_checkpoints_keep_in_cpu": true,
243
+ "sd_checkpoint_cache": 0,
244
+ "sd_unet": "Automatic",
245
+ "enable_quantization": false,
246
+ "enable_batch_seeds": true,
247
+ "comma_padding_backtrack": 20,
248
+ "sdxl_clip_l_skip": false,
249
+ "upcast_attn": false,
250
+ "randn_source": "GPU",
251
+ "tiling": false,
252
+ "hires_fix_refiner_pass": "second pass",
253
+ "enable_prompt_comments": true,
254
+ "sd3_enable_t5": false,
255
+ "sdxl_crop_top": 0.0,
256
+ "sdxl_crop_left": 0.0,
257
+ "sdxl_refiner_low_aesthetic_score": 2.5,
258
+ "sdxl_refiner_high_aesthetic_score": 6.0,
259
+ "sd_vae_checkpoint_cache": 0,
260
+ "sd_vae_overrides_per_model_preferences": true,
261
+ "auto_vae_precision_bfloat16": false,
262
+ "auto_vae_precision": true,
263
+ "sd_vae_encode_method": "Full",
264
+ "sd_vae_decode_method": "Full",
265
+ "inpainting_mask_weight": 1,
266
+ "initial_noise_multiplier": 1,
267
+ "img2img_extra_noise": 0,
268
+ "img2img_color_correction": false,
269
+ "img2img_fix_steps": false,
270
+ "img2img_background_color": "#ffffff",
271
+ "img2img_editor_height": 720,
272
+ "img2img_sketch_default_brush_color": "#ffffff",
273
+ "img2img_inpaint_mask_brush_color": "#ffffff",
274
+ "img2img_inpaint_sketch_default_brush_color": "#ffffff",
275
+ "return_mask": false,
276
+ "return_mask_composite": false,
277
+ "img2img_batch_show_results_limit": 32,
278
+ "overlay_inpaint": true,
279
+ "return_grid": true,
280
+ "do_not_show_images": false,
281
+ "js_modal_lightbox": true,
282
+ "js_modal_lightbox_initially_zoomed": true,
283
+ "js_modal_lightbox_gamepad": false,
284
+ "js_modal_lightbox_gamepad_repeat": 250.0,
285
+ "sd_webui_modal_lightbox_icon_opacity": 1,
286
+ "sd_webui_modal_lightbox_toolbar_opacity": 0.9,
287
+ "gallery_height": "",
288
+ "open_dir_button_choice": "Subdirectory",
289
+ "enable_pnginfo": true,
290
+ "save_txt": false,
291
+ "add_model_name_to_info": true,
292
+ "add_model_hash_to_info": true,
293
+ "add_vae_name_to_info": true,
294
+ "add_vae_hash_to_info": true,
295
+ "add_user_name_to_info": false,
296
+ "add_version_to_infotext": true,
297
+ "disable_weights_auto_swap": true,
298
+ "infotext_skip_pasting": [],
299
+ "infotext_styles": "Apply if any",
300
+ "show_progress_type": "TAESD",
301
+ "js_live_preview_in_modal_lightbox": true,
302
+ "prevent_screen_sleep_during_generation": true,
303
+ "keyedit_precision_attention": 0.1,
304
+ "keyedit_precision_extra": 0.05,
305
+ "keyedit_delimiters": ".,\\/!?%^*;:{}=`~() ",
306
+ "keyedit_delimiters_whitespace": [
307
+ "Tab",
308
+ "Carriage Return",
309
+ "Line Feed"
310
+ ],
311
+ "keyedit_move": true,
312
+ "disable_token_counters": false,
313
+ "include_styles_into_token_counters": true,
314
+ "extra_options_txt2img": [],
315
+ "extra_options_img2img": [],
316
+ "extra_options_cols": 1,
317
+ "extra_options_accordion": false,
318
+ "compact_prompt_box": false,
319
+ "samplers_in_dropdown": true,
320
+ "dimensions_and_batch_together": true,
321
+ "sd_checkpoint_dropdown_use_short": false,
322
+ "hires_fix_show_sampler": false,
323
+ "hires_fix_show_prompts": false,
324
+ "txt2img_settings_accordion": false,
325
+ "img2img_settings_accordion": false,
326
+ "interrupt_after_current": false,
327
+ "localization": "None",
328
+ "ui_reorder_list": [],
329
+ "gradio_themes_cache": true,
330
+ "show_progress_in_title": true,
331
+ "send_seed": true,
332
+ "send_size": true,
333
+ "enable_reloading_ui_scripts": false,
334
+ "api_enable_requests": true,
335
+ "api_forbid_local_requests": true,
336
+ "api_useragent": "",
337
+ "prioritized_callbacks_app_started": [],
338
+ "prioritized_callbacks_model_loaded": [],
339
+ "prioritized_callbacks_ui_tabs": [],
340
+ "prioritized_callbacks_ui_settings": [],
341
+ "prioritized_callbacks_after_component": [],
342
+ "prioritized_callbacks_infotext_pasted": [],
343
+ "prioritized_callbacks_script_unloaded": [],
344
+ "prioritized_callbacks_before_ui": [],
345
+ "prioritized_callbacks_on_reload": [],
346
+ "prioritized_callbacks_list_optimizers": [],
347
+ "prioritized_callbacks_before_token_counter": [],
348
+ "prioritized_callbacks_script_before_process": [],
349
+ "prioritized_callbacks_script_process": [],
350
+ "prioritized_callbacks_script_before_process_batch": [],
351
+ "prioritized_callbacks_script_process_batch": [],
352
+ "prioritized_callbacks_script_postprocess": [],
353
+ "prioritized_callbacks_script_postprocess_batch": [],
354
+ "prioritized_callbacks_script_post_sample": [],
355
+ "prioritized_callbacks_script_on_mask_blend": [],
356
+ "prioritized_callbacks_script_postprocess_image": [],
357
+ "prioritized_callbacks_script_postprocess_maskoverlay": [],
358
+ "prioritized_callbacks_script_after_component": [],
359
+ "profiling_enable": false,
360
+ "profiling_activities": [
361
+ "CPU"
362
+ ],
363
+ "profiling_record_shapes": true,
364
+ "profiling_profile_memory": true,
365
+ "profiling_with_stack": true,
366
+ "profiling_filename": "trace.json",
367
+ "auto_launch_browser": "Local",
368
+ "enable_console_prompts": false,
369
+ "show_warnings": false,
370
+ "show_gradio_deprecation_warnings": true,
371
+ "memmon_poll_rate": 8,
372
+ "samples_log_stdout": false,
373
+ "enable_upscale_progressbar": true,
374
+ "print_hypernet_extra": false,
375
+ "list_hidden_files": true,
376
+ "disable_mmap_load_safetensors": false,
377
+ "hide_ldm_prints": true,
378
+ "dump_stacks_on_signal": false,
379
+ "face_restoration": false,
380
+ "face_restoration_model": "CodeFormer",
381
+ "code_former_weight": 0.5,
382
+ "face_restoration_unload": false,
383
+ "postprocessing_enable_in_main_ui": [],
384
+ "postprocessing_disable_in_extras": [],
385
+ "postprocessing_operation_order": [],
386
+ "upscaling_max_images_in_cache": 5,
387
+ "postprocessing_existing_caption_action": "Ignore",
388
+ "ESRGAN_tile": 192,
389
+ "ESRGAN_tile_overlap": 8,
390
+ "realesrgan_enabled_models": [
391
+ "R-ESRGAN 4x+",
392
+ "R-ESRGAN 4x+ Anime6B"
393
+ ],
394
+ "dat_enabled_models": [
395
+ "DAT x2",
396
+ "DAT x3",
397
+ "DAT x4"
398
+ ],
399
+ "DAT_tile": 192,
400
+ "DAT_tile_overlap": 8,
401
+ "set_scale_by_when_changing_upscaler": false,
402
+ "unload_models_when_training": false,
403
+ "pin_memory": false,
404
+ "save_optimizer_state": false,
405
+ "save_training_settings_to_txt": true,
406
+ "dataset_filename_word_regex": "",
407
+ "dataset_filename_join_string": " ",
408
+ "training_image_repeats_per_epoch": 1,
409
+ "training_write_csv_every": 500.0,
410
+ "training_xattention_optimizations": false,
411
+ "training_enable_tensorboard": false,
412
+ "training_tensorboard_save_images": false,
413
+ "training_tensorboard_flush_every": 120.0,
414
+ "canvas_hotkey_zoom": "Alt",
415
+ "canvas_hotkey_adjust": "Ctrl",
416
+ "canvas_hotkey_shrink_brush": "Q",
417
+ "canvas_hotkey_grow_brush": "W",
418
+ "canvas_hotkey_move": "F",
419
+ "canvas_hotkey_fullscreen": "S",
420
+ "canvas_hotkey_reset": "R",
421
+ "canvas_hotkey_overlap": "O",
422
+ "canvas_show_tooltip": true,
423
+ "canvas_auto_expand": true,
424
+ "canvas_blur_prompt": false,
425
+ "canvas_disabled_functions": [
426
+ "Overlap"
427
+ ],
428
+ "interrogate_keep_models_in_memory": false,
429
+ "interrogate_return_ranks": false,
430
+ "interrogate_clip_num_beams": 1,
431
+ "interrogate_clip_min_length": 24,
432
+ "interrogate_clip_max_length": 48,
433
+ "interrogate_clip_dict_limit": 1500.0,
434
+ "interrogate_clip_skip_categories": [],
435
+ "interrogate_deepbooru_score_threshold": 0.5,
436
+ "deepbooru_sort_alpha": true,
437
+ "deepbooru_use_spaces": true,
438
+ "deepbooru_escape": true,
439
+ "deepbooru_filter_tags": ""
440
+ }
Stable-diffusion/ui/custom_hires_fix.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from os.path import exists
3
+
4
+ from tqdm import trange
5
+ from modules import scripts, shared, processing, sd_samplers, script_callbacks, rng
6
+ from modules import devices, prompt_parser, sd_models, extra_networks
7
+ import modules.images as images
8
+ import k_diffusion
9
+
10
+ import gradio as gr
11
+ import numpy as np
12
+ from PIL import Image, ImageEnhance
13
+ import torch
14
+ import importlib
15
+
16
+
17
+ def safe_import(import_name, pkg_name = None):
18
+ try:
19
+ __import__(import_name)
20
+ except Exception:
21
+ pkg_name = pkg_name or import_name
22
+ import pip
23
+ if hasattr(pip, 'main'):
24
+ pip.main(['install', pkg_name])
25
+ else:
26
+ pip._internal.main(['install', pkg_name])
27
+ __import__(import_name)
28
+
29
+
30
+ safe_import('kornia')
31
+ safe_import('omegaconf')
32
+ safe_import('pathlib')
33
+ from omegaconf import DictConfig, OmegaConf
34
+ from pathlib import Path
35
+ import kornia
36
+ from skimage import exposure
37
+
38
+ config_path = Path(__file__).parent.resolve() / '../config.yaml'
39
+
40
+
41
+ class CustomHiresFix(scripts.Script):
42
+ def __init__(self):
43
+ super().__init__()
44
+ if not exists(config_path):
45
+ open(config_path, 'w').close()
46
+ self.config: DictConfig = OmegaConf.load(config_path)
47
+ self.callback_set = False
48
+ self.orig_clip_skip = None
49
+ self.orig_cfg = None
50
+ self.p: processing.StableDiffusionProcessing = None
51
+ self.pp = None
52
+ self.sampler = []
53
+ self.cond = None
54
+ self.uncond = None
55
+ self.step = None
56
+ self.tv = None
57
+ self.width = None
58
+ self.height = None
59
+ self.use_cn = False
60
+ self.external_code = None
61
+ self.cn_image = None
62
+ self.cn_units = []
63
+
64
+ def title(self):
65
+ return "Custom Hires Fix"
66
+
67
+ def show(self, is_img2img):
68
+ return scripts.AlwaysVisible
69
+
70
+ def ui(self, is_img2img):
71
+ with gr.Accordion(label='Custom hires fix', open=False):
72
+ enable = gr.Checkbox(label='Enable extension', value=self.config.get('enable', False))
73
+ with gr.Row():
74
+ width = gr.Slider(minimum=512, maximum=2048, step=8,
75
+ label="Upscale width to",
76
+ value=self.config.get('width', 1024), allow_flagging='never', show_progress=False)
77
+ height = gr.Slider(minimum=512, maximum=2048, step=8,
78
+ label="Upscale height to",
79
+ value=self.config.get('height', 0), allow_flagging='never', show_progress=False)
80
+ steps = gr.Slider(minimum=8, maximum=25, step=1,
81
+ label="Steps",
82
+ value=self.config.get('steps', 15))
83
+ with gr.Row():
84
+ prompt = gr.Textbox(label='Prompt for upscale (added to generation prompt)',
85
+ placeholder='Leave empty for using generation prompt',
86
+ value=self.config.get('prompt', ''))
87
+ with gr.Row():
88
+ negative_prompt = gr.Textbox(label='Negative prompt for upscale (replaces generation prompt)',
89
+ placeholder='Leave empty for using generation negative prompt',
90
+ value=self.config.get('negative_prompt', ''))
91
+ with gr.Row():
92
+ first_upscaler = gr.Dropdown([*[x.name for x in shared.sd_upscalers
93
+ if x.name not in ['None', 'Nearest', 'LDSR']]],
94
+ label='First upscaler',
95
+ value=self.config.get('first_upscaler', 'R-ESRGAN 4x+'))
96
+ second_upscaler = gr.Dropdown([*[x.name for x in shared.sd_upscalers
97
+ if x.name not in ['None', 'Nearest', 'LDSR']]],
98
+ label='Second upscaler',
99
+ value=self.config.get('second_upscaler', 'R-ESRGAN 4x+'))
100
+ with gr.Row():
101
+ first_latent = gr.Slider(minimum=0.0, maximum=1.0, step=0.01,
102
+ label="Latent upscale ratio (1)",
103
+ value=self.config.get('first_latent', 0.3))
104
+ second_latent = gr.Slider(minimum=0.0, maximum=1.0, step=0.01,
105
+ label="Latent upscale ratio (2)",
106
+ value=self.config.get('second_latent', 0.1))
107
+ with gr.Row():
108
+ filter = gr.Dropdown(['Noise sync (sharp)', 'Morphological (smooth)', 'Combined (balanced)'],
109
+ label='Filter mode',
110
+ value=self.config.get('filter', 'Noise sync (sharp)'))
111
+ strength = gr.Slider(minimum=1.0, maximum=3.5, step=0.1, label="Generation strength",
112
+ value=self.config.get('strength', 2.0))
113
+ denoise_offset = gr.Slider(minimum=-0.05, maximum=0.15, step=0.01,
114
+ label="Denoise offset",
115
+ value=self.config.get('denoise_offset', 0.05))
116
+ with gr.Accordion(label='Extra', open=False):
117
+ with gr.Row():
118
+ filter_offset = gr.Slider(minimum=-1.0, maximum=1.0, step=0.1,
119
+ label="Filter offset (higher - smoother)",
120
+ value=self.config.get('filter_offset', 0.0))
121
+ clip_skip = gr.Slider(minimum=0, maximum=5, step=1,
122
+ label="Clip skip for upscale (0 - not change)",
123
+ value=self.config.get('clip_skip', 0))
124
+ with gr.Row():
125
+ start_control_at = gr.Slider(minimum=0.0, maximum=0.7, step=0.01,
126
+ label="CN start for enabled units",
127
+ value=self.config.get('start_control_at', 0.0))
128
+ cn_ref = gr.Checkbox(label='Use last image for reference', value=self.config.get('cn_ref', False))
129
+ with gr.Row():
130
+ sampler = gr.Dropdown(['Restart', 'DPM++ 2M', 'DPM++ 2M Karras', 'DPM++ 2M SDE', 'DPM++ 2M SDE Karras', 'DPM++ 2M SDE Heun', 'DPM++ 2M SDE Heun Karras', 'DPM++ 3M SDE', 'DPM++ 3M SDE Karras', 'Restart + DPM++ 3M SDE'],
131
+ label='Sampler',
132
+ value=self.config.get('sampler', 'DPM++ 2M Karras'))
133
+
134
+ if is_img2img:
135
+ width.change(fn=lambda x: gr.update(value=0), inputs=width, outputs=height)
136
+ height.change(fn=lambda x: gr.update(value=0), inputs=height, outputs=width)
137
+ else:
138
+ width.change(fn=lambda x: gr.update(value=0), inputs=width, outputs=height)
139
+ height.change(fn=lambda x: gr.update(value=0), inputs=height, outputs=width)
140
+
141
+ ui = [enable, width, height, steps, first_upscaler, second_upscaler, first_latent, second_latent, prompt,
142
+ negative_prompt, strength, filter, filter_offset, denoise_offset, clip_skip, sampler, cn_ref, start_control_at]
143
+ for elem in ui:
144
+ setattr(elem, "do_not_save_to_config", True)
145
+ return ui
146
+
147
+ def process(self, p, *args, **kwargs):
148
+ self.p = p
149
+ self.cn_units = []
150
+ try:
151
+ self.external_code = importlib.import_module('extensions.sd-webui-controlnet.scripts.external_code', 'external_code')
152
+ cn_units = self.external_code.get_all_units_in_processing(p)
153
+ for unit in cn_units:
154
+ self.cn_units += [unit]
155
+ self.use_cn = len(self.cn_units) > 0
156
+ except ImportError:
157
+ self.use_cn = False
158
+
159
+ def postprocess_image(self, p, pp: scripts.PostprocessImageArgs,
160
+ enable, width, height, steps, first_upscaler, second_upscaler, first_latent, second_latent, prompt,
161
+ negative_prompt, strength, filter, filter_offset, denoise_offset, clip_skip, sampler, cn_ref, start_control_at
162
+ ):
163
+ if not enable:
164
+ return
165
+ self.step = 0
166
+ self.pp = pp
167
+ self.config.width = width
168
+ self.config.height = height
169
+ self.config.prompt = prompt.strip()
170
+ self.config.negative_prompt = negative_prompt.strip()
171
+ self.config.steps = steps
172
+ self.config.first_upscaler = first_upscaler
173
+ self.config.second_upscaler = second_upscaler
174
+ self.config.first_latent = first_latent
175
+ self.config.second_latent = second_latent
176
+ self.config.strength = strength
177
+ self.config.filter = filter
178
+ self.config.filter_offset = filter_offset
179
+ self.config.denoise_offset = denoise_offset
180
+ self.config.clip_skip = clip_skip
181
+ self.config.sampler = sampler
182
+ self.config.cn_ref = cn_ref
183
+ self.config.start_control_at = start_control_at
184
+ self.orig_clip_skip = shared.opts.CLIP_stop_at_last_layers
185
+ self.orig_cfg = p.cfg_scale
186
+
187
+ if clip_skip > 0:
188
+ shared.opts.CLIP_stop_at_last_layers = clip_skip
189
+ if 'Restart' in self.config.sampler:
190
+ self.sampler = sd_samplers.create_sampler('Restart', p.sd_model)
191
+ else:
192
+ self.sampler = sd_samplers.create_sampler(sampler, p.sd_model)
193
+
194
+ def denoise_callback(params: script_callbacks.CFGDenoiserParams):
195
+ if params.sampling_step > 0:
196
+ p.cfg_scale = self.orig_cfg
197
+ if self.step == 1 and self.config.strength != 1.0:
198
+ params.sigma[-1] = params.sigma[0] * (1 - (1 - self.config.strength) / 100)
199
+ elif self.step == 2 and self.config.filter == 'Noise sync (sharp)':
200
+ params.sigma[-1] = params.sigma[0] * (1 - (self.tv - 1 + self.config.filter_offset - (self.config.denoise_offset * 5)) / 50)
201
+ elif self.step == 2 and self.config.filter == 'Combined (balanced)':
202
+ params.sigma[-1] = params.sigma[0] * (1 - (self.tv - 1 + self.config.filter_offset - (self.config.denoise_offset * 5)) / 100)
203
+
204
+ if self.callback_set is False:
205
+ script_callbacks.on_cfg_denoiser(denoise_callback)
206
+ self.callback_set = True
207
+
208
+ _, loras_act = extra_networks.parse_prompt(prompt)
209
+ extra_networks.activate(p, loras_act)
210
+ _, loras_deact = extra_networks.parse_prompt(negative_prompt)
211
+ extra_networks.deactivate(p, loras_deact)
212
+
213
+ self.cn_image = pp.image
214
+
215
+ with devices.autocast():
216
+ shared.state.nextjob()
217
+ x = self.gen(pp.image)
218
+ shared.state.nextjob()
219
+ x = self.filter(x)
220
+ shared.opts.CLIP_stop_at_last_layers = self.orig_clip_skip
221
+ sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
222
+ pp.image = x
223
+ extra_networks.deactivate(p, loras_act)
224
+ OmegaConf.save(self.config, config_path)
225
+
226
+ def enable_cn(self, image: np.ndarray):
227
+ for unit in self.cn_units:
228
+ if unit.model != 'None':
229
+ unit.guidance_start = self.config.start_control_at if unit.enabled else unit.guidance_start
230
+ unit.processor_res = min(image.shape[0], image.shape[0])
231
+ unit.enabled = True
232
+ if unit.image is None:
233
+ unit.image = image
234
+ self.p.width = image.shape[1]
235
+ self.p.height = image.shape[0]
236
+ self.external_code.update_cn_script_in_processing(self.p, self.cn_units)
237
+ for script in self.p.scripts.alwayson_scripts:
238
+ if script.title().lower() == 'controlnet':
239
+ script.controlnet_hack(self.p)
240
+
241
+ def process_prompt(self):
242
+ prompt = self.p.prompt.strip().split('AND', 1)[0]
243
+ if self.config.prompt != '':
244
+ prompt = f'{prompt} {self.config.prompt}'
245
+
246
+ if self.config.negative_prompt != '':
247
+ negative_prompt = self.config.negative_prompt
248
+ else:
249
+ negative_prompt = self.p.negative_prompt.strip()
250
+
251
+ with devices.autocast():
252
+ if self.width is not None and self.height is not None and hasattr(prompt_parser, 'SdConditioning'):
253
+ c = prompt_parser.SdConditioning([prompt], False, self.width, self.height)
254
+ uc = prompt_parser.SdConditioning([negative_prompt], False, self.width, self.height)
255
+ else:
256
+ c = [prompt]
257
+ uc = [negative_prompt]
258
+ self.cond = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, c, self.config.steps)
259
+ self.uncond = prompt_parser.get_learned_conditioning(shared.sd_model, uc, self.config.steps)
260
+
261
+ def gen(self, x):
262
+ self.step = 1
263
+ ratio = x.width / x.height
264
+ self.width = self.config.width if self.config.width > 0 else int(self.config.height * ratio)
265
+ self.height = self.config.height if self.config.height > 0 else int(self.config.width / ratio)
266
+ self.width = int((self.width - x.width) // 2 + x.width)
267
+ self.height = int((self.height - x.height) // 2 + x.height)
268
+ sd_models.apply_token_merging(self.p.sd_model, self.p.get_token_merging_ratio(for_hr=True) / 2)
269
+
270
+ if self.use_cn:
271
+ self.enable_cn(np.array(self.cn_image.resize((self.width, self.height))))
272
+
273
+ with devices.autocast(), torch.inference_mode():
274
+ self.process_prompt()
275
+
276
+ x_big = None
277
+ if self.config.first_latent > 0:
278
+ image = np.array(x).astype(np.float32) / 255.0
279
+ image = np.moveaxis(image, 2, 0)
280
+ decoded_sample = torch.from_numpy(image)
281
+ decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
282
+ decoded_sample = 2.0 * decoded_sample - 1.0
283
+ encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
284
+ sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
285
+ x_big = torch.nn.functional.interpolate(sample, (self.height // 8, self.width // 8), mode='nearest')
286
+
287
+ if self.config.first_latent < 1:
288
+ x = images.resize_image(0, x, self.width, self.height,
289
+ upscaler_name=self.config.first_upscaler)
290
+ image = np.array(x).astype(np.float32) / 255.0
291
+ image = np.moveaxis(image, 2, 0)
292
+ decoded_sample = torch.from_numpy(image)
293
+ decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
294
+ decoded_sample = 2.0 * decoded_sample - 1.0
295
+ encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
296
+ sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
297
+ else:
298
+ sample = x_big
299
+ if x_big is not None and self.config.first_latent != 1:
300
+ sample = (sample * (1 - self.config.first_latent)) + (x_big * self.config.first_latent)
301
+ image_conditioning = self.p.img2img_image_conditioning(decoded_sample, sample)
302
+
303
+ noise = torch.zeros_like(sample)
304
+ noise = kornia.augmentation.RandomGaussianNoise(mean=0.0, std=1.0, p=1.0)(noise)
305
+ steps = int(max(((self.p.steps - self.config.steps) / 2) + self.config.steps, self.config.steps))
306
+ self.p.denoising_strength = 0.45 + self.config.denoise_offset * 0.2
307
+ self.p.cfg_scale = self.orig_cfg + 0
308
+
309
+ def denoiser_override(n):
310
+ sigmas = k_diffusion.sampling.get_sigmas_polyexponential(n, 0.01, 15, 0.5, devices.device)
311
+ return sigmas
312
+
313
+ self.p.rng = rng.ImageRNG(sample.shape[1:], self.p.seeds, subseeds=self.p.subseeds,
314
+ subseed_strength=self.p.subseed_strength,
315
+ seed_resize_from_h=self.p.seed_resize_from_h, seed_resize_from_w=self.p.seed_resize_from_w)
316
+
317
+ self.p.sampler_noise_scheduler_override = denoiser_override
318
+ self.p.batch_size = 1
319
+ sample = self.sampler.sample_img2img(self.p, sample.to(devices.dtype), noise, self.cond, self.uncond,
320
+ steps=steps, image_conditioning=image_conditioning).to(devices.dtype_vae)
321
+ b, c, w, h = sample.size()
322
+ self.tv = kornia.losses.TotalVariation()(sample).mean() / (w * h)
323
+ devices.torch_gc()
324
+ decoded_sample = processing.decode_first_stage(shared.sd_model, sample)
325
+ if math.isnan(decoded_sample.min()):
326
+ devices.torch_gc()
327
+ sample = torch.clamp(sample, -3, 3)
328
+ decoded_sample = processing.decode_first_stage(shared.sd_model, sample)
329
+ decoded_sample = torch.clamp((decoded_sample + 1.0) / 2.0, min=0.0, max=1.0).squeeze()
330
+ x_sample = 255. * np.moveaxis(decoded_sample.cpu().numpy(), 0, 2)
331
+ x_sample = x_sample.astype(np.uint8)
332
+ image = Image.fromarray(x_sample)
333
+ return image
334
+
335
+ def filter(self, x):
336
+ if 'Restart' == self.config.sampler:
337
+ self.sampler = sd_samplers.create_sampler('Restart', shared.sd_model)
338
+ elif 'Restart + DPM++ 3M SDE' == self.config.sampler:
339
+ self.sampler = sd_samplers.create_sampler('DPM++ 3M SDE', shared.sd_model)
340
+ self.step = 2
341
+ ratio = x.width / x.height
342
+ self.width = self.config.width if self.config.width > 0 else int(self.config.height * ratio)
343
+ self.height = self.config.height if self.config.height > 0 else int(self.config.width / ratio)
344
+ sd_models.apply_token_merging(self.p.sd_model, self.p.get_token_merging_ratio(for_hr=True))
345
+
346
+ if self.use_cn:
347
+ self.cn_image = x if self.config.cn_ref else self.cn_image
348
+ self.enable_cn(np.array(self.cn_image.resize((self.width, self.height))))
349
+
350
+ with devices.autocast(), torch.inference_mode():
351
+ self.process_prompt()
352
+
353
+ x_big = None
354
+ if self.config.second_latent > 0:
355
+ image = np.array(x).astype(np.float32) / 255.0
356
+ image = np.moveaxis(image, 2, 0)
357
+ decoded_sample = torch.from_numpy(image)
358
+ decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
359
+ decoded_sample = 2.0 * decoded_sample - 1.0
360
+ encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
361
+ sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
362
+ x_big = torch.nn.functional.interpolate(sample, (self.height // 8, self.width // 8), mode='nearest')
363
+
364
+ if self.config.second_latent < 1:
365
+ x = images.resize_image(0, x, self.width, self.height, upscaler_name=self.config.second_upscaler)
366
+ image = np.array(x).astype(np.float32) / 255.0
367
+ image = np.moveaxis(image, 2, 0)
368
+ decoded_sample = torch.from_numpy(image)
369
+ decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
370
+ decoded_sample = 2.0 * decoded_sample - 1.0
371
+ encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
372
+ sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
373
+ else:
374
+ sample = x_big
375
+ if x_big is not None and self.config.second_latent != 1:
376
+ sample = (sample * (1 - self.config.second_latent)) + (x_big * self.config.second_latent)
377
+ image_conditioning = self.p.img2img_image_conditioning(decoded_sample, sample)
378
+
379
+ noise = torch.zeros_like(sample)
380
+ noise = kornia.augmentation.RandomGaussianNoise(mean=0.0, std=1.0, p=1.0)(noise)
381
+ self.p.denoising_strength = 0.45 + self.config.denoise_offset
382
+ self.p.cfg_scale = self.orig_cfg + 3
383
+
384
+ if self.config.filter == 'Morphological (smooth)':
385
+ noise_mask = kornia.morphology.gradient(sample, torch.ones(5, 5).to(devices.device))
386
+ noise_mask = kornia.filters.median_blur(noise_mask, (3, 3))
387
+ noise_mask = (0.1 + noise_mask / noise_mask.max()) * (max(
388
+ (1.75 - (self.tv - 1) * 4), 1.75) - self.config.filter_offset)
389
+ noise = noise * noise_mask
390
+ elif self.config.filter == 'Combined (balanced)':
391
+ noise_mask = kornia.morphology.gradient(sample, torch.ones(5, 5).to(devices.device))
392
+ noise_mask = kornia.filters.median_blur(noise_mask, (3, 3))
393
+ noise_mask = (0.1 + noise_mask / noise_mask.max()) * (max(
394
+ (1.75 - (self.tv - 1) / 2), 1.75) - self.config.filter_offset)
395
+ noise = noise * noise_mask
396
+
397
+ def denoiser_override(n):
398
+ return k_diffusion.sampling.get_sigmas_polyexponential(n, 0.01, 7, 0.5, devices.device)
399
+
400
+ self.p.sampler_noise_scheduler_override = denoiser_override
401
+ self.p.batch_size = 1
402
+ samples = self.sampler.sample_img2img(self.p, sample.to(devices.dtype), noise, self.cond, self.uncond,
403
+ steps=self.config.steps, image_conditioning=image_conditioning
404
+ ).to(devices.dtype_vae)
405
+ devices.torch_gc()
406
+ self.p.iteration += 1
407
+ decoded_sample = processing.decode_first_stage(shared.sd_model, samples)
408
+ if math.isnan(decoded_sample.min()):
409
+ devices.torch_gc()
410
+ samples = torch.clamp(samples, -3, 3)
411
+ decoded_sample = processing.decode_first_stage(shared.sd_model, samples)
412
+ decoded_sample = torch.clamp((decoded_sample + 1.0) / 2.0, min=0.0, max=1.0).squeeze()
413
+ x_sample = 255. * np.moveaxis(decoded_sample.cpu().numpy(), 0, 2)
414
+ x_sample = x_sample.astype(np.uint8)
415
+ image = Image.fromarray(x_sample)
416
+ return image
Stable-diffusion/ui/embeddings.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793d2c9d60623bb3e7ff229d17777d95cef08c73c98d23ea3565de0de8519208
3
+ size 5621788
Stable-diffusion/ui/encrypt_image.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from pathlib import Path
4
+ from modules import shared,script_callbacks,scripts as md_scripts,images
5
+ from modules.api import api
6
+ from modules.shared import opts
7
+ from scripts.core.core import get_sha256,dencrypt_image,dencrypt_image_v2,encrypt_image_v2
8
+ from PIL import PngImagePlugin,_util,ImagePalette
9
+ from PIL import Image as PILImage
10
+ from io import BytesIO
11
+ from typing import Optional
12
+ from fastapi import FastAPI
13
+ from gradio import Blocks
14
+ from fastapi import FastAPI, Request, Response
15
+ import sys
16
+ from urllib.parse import unquote
17
+ from colorama import Fore, Back, Style
18
+
19
+ repo_dir = md_scripts.basedir()
20
+ password = getattr(shared.cmd_opts, 'encrypt_pass', None)
21
+
22
+
23
+ def hook_http_request(app: FastAPI):
24
+ @app.middleware("http")
25
+ async def image_dencrypt(req: Request, call_next):
26
+ endpoint:str = req.scope.get('path', 'err')
27
+ endpoint='/'+endpoint.strip('/')
28
+ # 兼容无边浏览器
29
+ if endpoint.startswith('/infinite_image_browsing/image-thumbnail') or endpoint.startswith('/infinite_image_browsing/file'):
30
+ query_string:str = req.scope.get('query_string').decode('utf-8')
31
+ query_string = unquote(query_string)
32
+ if query_string and query_string.index('path=')>=0:
33
+ query = query_string.split('&')
34
+ path = ''
35
+ for sub in query:
36
+ if sub.startswith('path='):
37
+ path = sub[sub.index('=')+1:]
38
+ if path:
39
+ endpoint = '/file=' + path
40
+ # 模型预览图
41
+ if endpoint.startswith('/sd_extra_networks/thumb'):
42
+ query_string:str = req.scope.get('query_string').decode('utf-8')
43
+ query_string = unquote(query_string)
44
+ if query_string and query_string.index('filename=')>=0:
45
+ query = query_string.split('&')
46
+ path = ''
47
+ for sub in query:
48
+ if sub.startswith('filename='):
49
+ path = sub[sub.index('=')+1:]
50
+ if path:
51
+ endpoint = '/file=' + path
52
+ if endpoint.startswith('/file='):
53
+ file_path = endpoint[6:] or ''
54
+ if not file_path: return await call_next(req)
55
+ if file_path.rfind('.') == -1: return await call_next(req)
56
+ if not file_path[file_path.rfind('.'):]: return await call_next(req)
57
+ if file_path[file_path.rfind('.'):].lower() in ['.png','.jpg','.jpeg','.webp','.abcd']:
58
+ image = PILImage.open(file_path)
59
+ pnginfo = image.info or {}
60
+ if 'Encrypt' in pnginfo:
61
+ buffered = BytesIO()
62
+ info = PngImagePlugin.PngInfo()
63
+ for key in pnginfo.keys():
64
+ if pnginfo[key]:
65
+ info.add_text(key,pnginfo[key])
66
+ image.save(buffered, format=PngImagePlugin.PngImageFile.format, pnginfo=info)
67
+ decrypted_image_data = buffered.getvalue()
68
+ response: Response = Response(content=decrypted_image_data, media_type="image/png")
69
+ return response
70
+
71
+ return await call_next(req)
72
+
73
+ def set_shared_options():
74
+ # 传递插件状态到前端
75
+ section = ("encrypt_image_is_enable",'图片加密' if shared.opts.localization == 'zh_CN' else "encrypt image" )
76
+ option = shared.OptionInfo(
77
+ default="是",
78
+ label='是否启用了加密插件' if shared.opts.localization == 'zh_CN' else "Whether the encryption plug-in is enabled",
79
+ section=section,
80
+ )
81
+ option.do_not_save = True
82
+ shared.opts.add_option(
83
+ "encrypt_image_is_enable",
84
+ option,
85
+ )
86
+ shared.opts.data['encrypt_image_is_enable'] = "是"
87
+
88
+ def app_started_callback(_: Blocks, app: FastAPI):
89
+ set_shared_options()
90
+
91
+
92
+ if PILImage.Image.__name__ != 'EncryptedImage':
93
+ super_open = PILImage.open
94
+ super_encode_pil_to_base64 = api.encode_pil_to_base64
95
+ super_modules_images_save_image = images.save_image
96
+ super_api_middleware = api.api_middleware
97
+ class EncryptedImage(PILImage.Image):
98
+ __name__ = "EncryptedImage"
99
+
100
+ @staticmethod
101
+ def from_image(image:PILImage.Image):
102
+ image = image.copy()
103
+ img = EncryptedImage()
104
+ img.im = image.im
105
+ img._mode = image.mode
106
+ if image.im.mode:
107
+ try:
108
+ img.mode = image.im.mode
109
+ except Exception as e:
110
+ ''
111
+ img._size = image.size
112
+ img.format = image.format
113
+ if image.mode in ("P", "PA"):
114
+ if image.palette:
115
+ img.palette = image.palette.copy()
116
+ else:
117
+ img.palette = ImagePalette.ImagePalette()
118
+ img.info = image.info.copy()
119
+ return img
120
+
121
+ def save(self, fp, format=None, **params):
122
+ filename = ""
123
+ if isinstance(fp, Path):
124
+ filename = str(fp)
125
+ elif _util.is_path(fp):
126
+ filename = fp
127
+ elif fp == sys.stdout:
128
+ try:
129
+ fp = sys.stdout.buffer
130
+ except AttributeError:
131
+ pass
132
+ if not filename and hasattr(fp, "name") and _util.is_path(fp.name):
133
+ # only set the name for metadata purposes
134
+ filename = fp.name
135
+
136
+ if not filename or not password:
137
+ # 如果没有密码或不保存到硬盘,直接保存
138
+ super().save(fp, format = format, **params)
139
+ return
140
+
141
+ if 'Encrypt' in self.info and (self.info['Encrypt'] == 'pixel_shuffle' or self.info['Encrypt'] == 'pixel_shuffle_2'):
142
+ super().save(fp, format = format, **params)
143
+ return
144
+
145
+ encrypt_image_v2(self, get_sha256(password))
146
+ self.format = PngImagePlugin.PngImageFile.format
147
+ pnginfo = params.get('pnginfo', PngImagePlugin.PngInfo())
148
+ if not pnginfo:
149
+ pnginfo = PngImagePlugin.PngInfo()
150
+ pnginfo.add_text('Encrypt', 'pixel_shuffle_2')
151
+ pnginfo.add_text('EncryptPwdSha', get_sha256(f'{get_sha256(password)}Encrypt'))
152
+ for key in (self.info or {}).keys():
153
+ if self.info[key]:
154
+ pnginfo.add_text(key,str(self.info[key]))
155
+ params.update(pnginfo=pnginfo)
156
+ super().save(fp, format=self.format, **params)
157
+ # 保存到文件后解密内存内的图片,让直接在内存内使用时图片正常
158
+ dencrypt_image_v2(self, get_sha256(password))
159
+
160
+
161
+
162
+ def open(fp,*args, **kwargs):
163
+ image = super_open(fp,*args, **kwargs)
164
+ if password and image.format.lower() == PngImagePlugin.PngImageFile.format.lower():
165
+ pnginfo = image.info or {}
166
+ if 'Encrypt' in pnginfo and pnginfo["Encrypt"] == 'pixel_shuffle':
167
+ dencrypt_image(image, get_sha256(password))
168
+ pnginfo["Encrypt"] = None
169
+ image = EncryptedImage.from_image(image=image)
170
+ return image
171
+ if 'Encrypt' in pnginfo and pnginfo["Encrypt"] == 'pixel_shuffle_2':
172
+ dencrypt_image_v2(image, get_sha256(password))
173
+ pnginfo["Encrypt"] = None
174
+ image = EncryptedImage.from_image(image=image)
175
+ return image
176
+ return EncryptedImage.from_image(image=image)
177
+
178
+ def encode_pil_to_base64(image:PILImage.Image):
179
+ with io.BytesIO() as output_bytes:
180
+ image.save(output_bytes, format="PNG", quality=opts.jpeg_quality)
181
+ pnginfo = image.info or {}
182
+ if 'Encrypt' in pnginfo and pnginfo["Encrypt"] == 'pixel_shuffle':
183
+ dencrypt_image(image, get_sha256(password))
184
+ pnginfo["Encrypt"] = None
185
+ if 'Encrypt' in pnginfo and pnginfo["Encrypt"] == 'pixel_shuffle_2':
186
+ dencrypt_image_v2(image, get_sha256(password))
187
+ pnginfo["Encrypt"] = None
188
+ bytes_data = output_bytes.getvalue()
189
+ return base64.b64encode(bytes_data)
190
+
191
+ def api_middleware(app: FastAPI):
192
+ super_api_middleware(app)
193
+ hook_http_request(app)
194
+
195
+ if password:
196
+ PILImage.Image = EncryptedImage
197
+ PILImage.open = open
198
+ api.encode_pil_to_base64 = encode_pil_to_base64
199
+ api.api_middleware = api_middleware
200
+
201
+ if password:
202
+ script_callbacks.on_app_started(app_started_callback)
203
+ print(f'{Fore.GREEN}[-] Image Encryption started.{Style.RESET_ALL}')
204
+
205
+ else:
206
+ print(f'{Fore.RED}[-] Image Encryption DISABLED.{Style.RESET_ALL}')
Stable-diffusion/ui/encrypt_images_info.js ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lsLoad = false;
2
+ onUiUpdate(function () {
3
+ if (lsLoad) return;
4
+ if (!opts) return;
5
+ if (!opts["encrypt_image_is_enable"]) return;
6
+ lsLoad = true;
7
+ let enable = opts["encrypt_image_is_enable"] == "是";
8
+
9
+ function renderInfo(txtOrImg2img, isEnable) {
10
+ let info = document.getElementById("encrypt_image_" + txtOrImg2img + "2img_info");
11
+ let top = document.getElementById(txtOrImg2img + "2img_neg_prompt");
12
+ if (!top) return;
13
+
14
+ if (!info) {
15
+ let parent = top.parentNode;
16
+ info = document.createElement("div");
17
+ info.style.minWidth = "100%";
18
+ info.style.textAlign = "center";
19
+ info.style.opacity = 0.5;
20
+ info.style.fontSize = ".89em";
21
+ info.id = "encrypt_image_" + txtOrImg2img + "2img_info";
22
+ parent.insertBefore(info, top.nextSibling);
23
+ }
24
+ }
25
+ renderInfo("txt", enable);
26
+ renderInfo("img", enable);
27
+ });
Stable-diffusion/ui/hashes.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os.path
3
+
4
+ from modules import shared
5
+ import modules.cache
6
+
7
+ dump_cache = modules.cache.dump_cache
8
+ cache = modules.cache.cache
9
+
10
+
11
+ def calculate_sha256(filename):
12
+ hash_sha256 = hashlib.sha256()
13
+ blksize = 1024 * 1024
14
+
15
+ with open(filename, "rb") as f:
16
+ for chunk in iter(lambda: f.read(blksize), b""):
17
+ hash_sha256.update(chunk)
18
+
19
+ return hash_sha256.hexdigest()
20
+
21
+
22
+ def sha256_from_cache(filename, title, use_addnet_hash=False):
23
+ hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
24
+ try:
25
+ ondisk_mtime = os.path.getmtime(filename)
26
+ except FileNotFoundError:
27
+ return None
28
+
29
+ if title not in hashes:
30
+ return None
31
+
32
+ cached_sha256 = hashes[title].get("sha256", None)
33
+ cached_mtime = hashes[title].get("mtime", 0)
34
+
35
+ if ondisk_mtime > cached_mtime or cached_sha256 is None:
36
+ return None
37
+
38
+ return cached_sha256
39
+
40
+
41
+ def sha256(filename, title, use_addnet_hash=False):
42
+ hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
43
+
44
+ sha256_value = sha256_from_cache(filename, title, use_addnet_hash)
45
+ if sha256_value is not None:
46
+ return sha256_value
47
+
48
+ if shared.cmd_opts.no_hashing:
49
+ return None
50
+
51
+ if use_addnet_hash:
52
+ with open(filename, "rb") as file:
53
+ sha256_value = addnet_hash_safetensors(file)
54
+ else:
55
+ sha256_value = calculate_sha256(filename)
56
+
57
+ hashes[title] = {
58
+ "mtime": os.path.getmtime(filename),
59
+ "sha256": sha256_value,
60
+ }
61
+
62
+ dump_cache()
63
+
64
+ return sha256_value
65
+
66
+
67
+ def addnet_hash_safetensors(b):
68
+ """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
69
+ hash_sha256 = hashlib.sha256()
70
+ blksize = 1024 * 1024
71
+
72
+ b.seek(0)
73
+ header = b.read(8)
74
+ n = int.from_bytes(header, "little")
75
+
76
+ offset = n + 8
77
+ b.seek(offset)
78
+ for chunk in iter(lambda: b.read(blksize), b""):
79
+ hash_sha256.update(chunk)
80
+
81
+ return hash_sha256.hexdigest()
Stable-diffusion/ui/lora_block_weight.py ADDED
@@ -0,0 +1,1152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import json
3
+ import os
4
+ import gc
5
+ import re
6
+ import sys
7
+ import torch
8
+ import shutil
9
+ import math
10
+ import importlib
11
+ import numpy as np
12
+ import gradio as gr
13
+ import os.path
14
+ import random
15
+ from pprint import pprint
16
+ import modules.ui
17
+ import modules.scripts as scripts
18
+ from PIL import Image, ImageFont, ImageDraw
19
+ import modules.shared as shared
20
+ from modules import devices, sd_models, images,cmd_args, extra_networks, sd_hijack
21
+ from modules.shared import cmd_opts, opts, state
22
+ from modules.processing import process_images, Processed
23
+ from modules.script_callbacks import CFGDenoiserParams, on_cfg_denoiser
24
+
25
+ LBW_T = "customscript/lora_block_weight.py/txt2img/Active/value"
26
+ LBW_I = "customscript/lora_block_weight.py/img2img/Active/value"
27
+
28
+ if os.path.exists(cmd_opts.ui_config_file):
29
+ with open(cmd_opts.ui_config_file, 'r', encoding="utf-8") as json_file:
30
+ ui_config = json.load(json_file)
31
+ else:
32
+ print("ui config file not found, using default values")
33
+ ui_config = {}
34
+
35
+ startup_t = ui_config[LBW_T] if LBW_T in ui_config else None
36
+ startup_i = ui_config[LBW_I] if LBW_I in ui_config else None
37
+ active_t = "Active" if startup_t else "Not Active"
38
+ active_i = "Active" if startup_i else "Not Active"
39
+
40
+ lxyz = ""
41
+ lzyx = ""
42
+ prompts = ""
43
+ xyelem = ""
44
+ princ = False
45
+
46
+ try:
47
+ from ldm_patched.modules import model_management
48
+ forge = True
49
+ except:
50
+ forge = False
51
+
52
+ BLOCKID26=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
53
+ BLOCKID17=["BASE","IN01","IN02","IN04","IN05","IN07","IN08","M00","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
54
+ BLOCKID12=["BASE","IN04","IN05","IN07","IN08","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05"]
55
+ BLOCKID20=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08"]
56
+ BLOCKNUMS = [12,17,20,26]
57
+ BLOCKIDS=[BLOCKID12,BLOCKID17,BLOCKID20,BLOCKID26]
58
+
59
+ BLOCKS=["encoder",
60
+ "diffusion_model_input_blocks_0_",
61
+ "diffusion_model_input_blocks_1_",
62
+ "diffusion_model_input_blocks_2_",
63
+ "diffusion_model_input_blocks_3_",
64
+ "diffusion_model_input_blocks_4_",
65
+ "diffusion_model_input_blocks_5_",
66
+ "diffusion_model_input_blocks_6_",
67
+ "diffusion_model_input_blocks_7_",
68
+ "diffusion_model_input_blocks_8_",
69
+ "diffusion_model_input_blocks_9_",
70
+ "diffusion_model_input_blocks_10_",
71
+ "diffusion_model_input_blocks_11_",
72
+ "diffusion_model_middle_block_",
73
+ "diffusion_model_output_blocks_0_",
74
+ "diffusion_model_output_blocks_1_",
75
+ "diffusion_model_output_blocks_2_",
76
+ "diffusion_model_output_blocks_3_",
77
+ "diffusion_model_output_blocks_4_",
78
+ "diffusion_model_output_blocks_5_",
79
+ "diffusion_model_output_blocks_6_",
80
+ "diffusion_model_output_blocks_7_",
81
+ "diffusion_model_output_blocks_8_",
82
+ "diffusion_model_output_blocks_9_",
83
+ "diffusion_model_output_blocks_10_",
84
+ "diffusion_model_output_blocks_11_",
85
+ "embedders"]
86
+
87
+ loopstopper = True
88
+
89
+ ATYPES =["none","Block ID","values","seed","Original Weights","elements"]
90
+
91
+ DEF_WEIGHT_PRESET = "\
92
+ NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
93
+ ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n\
94
+ INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
95
+ IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
96
+ INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
97
+ MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\n\
98
+ OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\n\
99
+ OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\n\
100
+ OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\n\
101
+ ALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5"
102
+
103
+ scriptpath = os.path.dirname(os.path.abspath(__file__))
104
+
105
+ class Script(modules.scripts.Script):
106
+ def __init__(self):
107
+ self.log = {}
108
+ self.stops = {}
109
+ self.starts = {}
110
+ self.active = False
111
+ self.lora = {}
112
+ self.lycoris = {}
113
+ self.networks = {}
114
+
115
+ self.stopsf = []
116
+ self.startsf = []
117
+ self.uf = []
118
+ self.lf = []
119
+ self.ef = []
120
+
121
+ def title(self):
122
+ return "LoRA Block Weight"
123
+
124
+ def show(self, is_img2img):
125
+ return modules.scripts.AlwaysVisible
126
+
127
+ def ui(self, is_img2img):
128
+ LWEIGHTSPRESETS = DEF_WEIGHT_PRESET
129
+
130
+ runorigin = scripts.scripts_txt2img.run
131
+ runorigini = scripts.scripts_img2img.run
132
+
133
+ scriptpath = os.path.dirname(os.path.abspath(__file__))
134
+ path_root = scripts.basedir()
135
+
136
+ extpath = os.path.join(scriptpath, "lbwpresets.txt")
137
+ extpathe = os.path.join(scriptpath, "elempresets.txt")
138
+ filepath = os.path.join(path_root,"scripts", "lbwpresets.txt")
139
+ filepathe = os.path.join(path_root,"scripts", "elempresets.txt")
140
+
141
+ if os.path.isfile(filepath) and not os.path.isfile(extpath):
142
+ shutil.move(filepath,extpath)
143
+
144
+ if os.path.isfile(filepathe) and not os.path.isfile(extpathe):
145
+ shutil.move(filepathe,extpathe)
146
+
147
+ lbwpresets=""
148
+
149
+ try:
150
+ with open(extpath,encoding="utf-8") as f:
151
+ lbwpresets = f.read()
152
+ except OSError as e:
153
+ lbwpresets=LWEIGHTSPRESETS
154
+ if not os.path.isfile(extpath):
155
+ try:
156
+ with open(extpath,mode = 'w',encoding="utf-8") as f:
157
+ f.write(lbwpresets)
158
+ except:
159
+ pass
160
+
161
+ try:
162
+ with open(extpathe,encoding="utf-8") as f:
163
+ elempresets = f.read()
164
+ except OSError as e:
165
+ elempresets=ELEMPRESETS
166
+ if not os.path.isfile(extpathe):
167
+ try:
168
+ with open(extpathe,mode = 'w',encoding="utf-8") as f:
169
+ f.write(elempresets)
170
+ except:
171
+ pass
172
+
173
+ loraratios=lbwpresets.splitlines()
174
+ lratios={}
175
+ for i,l in enumerate(loraratios):
176
+ if checkloadcond(l) : continue
177
+ lratios[l.split(":")[0]]=l.split(":")[1]
178
+ ratiostags = [k for k in lratios.keys()]
179
+ ratiostags = ",".join(ratiostags)
180
+
181
+ if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
182
+ args = cmd_args.parser.parse_args()
183
+ else:
184
+ args, _ = cmd_args.parser.parse_known_args()
185
+ if args.api:
186
+ register()
187
+
188
+ with gr.Accordion(f"LoRA Block Weight : {active_i if is_img2img else active_t}",open = False) as acc:
189
+ with gr.Row():
190
+ with gr.Column(min_width = 50, scale=1):
191
+ lbw_useblocks = gr.Checkbox(value = True,label="Active",interactive =True,elem_id="lbw_active")
192
+ debug = gr.Checkbox(value = False,label="Debug",interactive =True,elem_id="lbw_debug")
193
+ with gr.Column(scale=5):
194
+ bw_ratiotags= gr.TextArea(label="",value=ratiostags,visible =True,interactive =True,elem_id="lbw_ratios")
195
+ with gr.Accordion("XYZ plot",open = False):
196
+ gr.HTML(value='<p style= "word-wrap:break-word;">changeable blocks : BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11</p>')
197
+ xyzsetting = gr.Radio(label = "Active",choices = ["Disable","XYZ plot","Effective Block Analyzer"], value ="Disable",type = "index")
198
+ with gr.Row(visible = False) as esets:
199
+ diffcol = gr.Radio(label = "diff image color",choices = ["black","white"], value ="black",type = "value",interactive =True)
200
+ revxy = gr.Checkbox(value = False,label="change X-Y",interactive =True,elem_id="lbw_changexy")
201
+ thresh = gr.Textbox(label="difference threshold",lines=1,value="20",interactive =True,elem_id="diff_thr")
202
+ xtype = gr.Dropdown(label="X Types", choices=[x for x in ATYPES], value=ATYPES [2],interactive =True,elem_id="lbw_xtype")
203
+ xmen = gr.Textbox(label="X Values",lines=1,value="0,0.25,0.5,0.75,1",interactive =True,elem_id="lbw_xmen")
204
+ ytype = gr.Dropdown(label="Y Types", choices=[y for y in ATYPES], value=ATYPES [1],interactive =True,elem_id="lbw_ytype")
205
+ ymen = gr.Textbox(label="Y Values" ,lines=1,value="IN05-OUT05",interactive =True,elem_id="lbw_ymen")
206
+ ztype = gr.Dropdown(label="Z type", choices=[z for z in ATYPES], value=ATYPES[0],interactive =True,elem_id="lbw_ztype")
207
+ zmen = gr.Textbox(label="Z values",lines=1,value="",interactive =True,elem_id="lbw_zmen")
208
+
209
+ exmen = gr.Textbox(label="Range",lines=1,value="0.5,1",interactive =True,elem_id="lbw_exmen",visible = False)
210
+ eymen = gr.Textbox(label="Blocks (12ALL,17ALL,20ALL,26ALL also can be used)" ,lines=1,value="BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11",interactive =True,elem_id="lbw_eymen",visible = False)
211
+ ecount = gr.Number(value=1, label="number of seed", interactive=True, visible = True)
212
+
213
+ with gr.Accordion("Weights setting",open = True):
214
+ with gr.Row():
215
+ reloadtext = gr.Button(value="Reload Presets",variant='primary',elem_id="lbw_reload")
216
+ reloadtags = gr.Button(value="Reload Tags",variant='primary',elem_id="lbw_reload")
217
+ savetext = gr.Button(value="Save Presets",variant='primary',elem_id="lbw_savetext")
218
+ openeditor = gr.Button(value="Open TextEditor",variant='primary',elem_id="lbw_openeditor")
219
+ lbw_loraratios = gr.TextArea(label="",value=lbwpresets,visible =True,interactive = True,elem_id="lbw_ratiospreset")
220
+
221
+ with gr.Accordion("Elemental",open = False):
222
+ with gr.Row():
223
+ e_reloadtext = gr.Button(value="Reload Presets",variant='primary',elem_id="lbw_reload")
224
+ e_savetext = gr.Button(value="Save Presets",variant='primary',elem_id="lbw_savetext")
225
+ e_openeditor = gr.Button(value="Open TextEditor",variant='primary',elem_id="lbw_openeditor")
226
+ elemsets = gr.Checkbox(value = False,label="print change",interactive =True,elem_id="lbw_print_change")
227
+ elemental = gr.TextArea(label="Identifer:BlockID:Elements:Ratio,...,separated by empty line ",value = elempresets,interactive =True,elem_id="element")
228
+
229
+ d_true = gr.Checkbox(value = True,visible = False)
230
+ d_false = gr.Checkbox(value = False,visible = False)
231
+
232
+ lbw_useblocks.change(fn=lambda x:gr.update(label = f"LoRA Block Weight : {'Active' if x else 'Not Active'}"),inputs=lbw_useblocks, outputs=[acc])
233
+
234
+ import subprocess
235
+ def openeditors(b):
236
+ path = extpath if b else extpathe
237
+ subprocess.Popen(['start', path], shell=True)
238
+
239
+ def reloadpresets(isweight):
240
+ if isweight:
241
+ try:
242
+ with open(extpath,encoding="utf-8") as f:
243
+ return f.read()
244
+ except OSError as e:
245
+ pass
246
+ else:
247
+ try:
248
+ with open(extpath,encoding="utf-8") as f:
249
+ return f.read()
250
+ except OSError as e:
251
+ pass
252
+
253
+ def tagdicter(presets):
254
+ presets=presets.splitlines()
255
+ wdict={}
256
+ for l in presets:
257
+ if checkloadcond(l) : continue
258
+ w=[]
259
+ if ":" in l :
260
+ key = l.split(":",1)[0]
261
+ w = l.split(":",1)[1]
262
+ if any(len([w for w in w.split(",")]) == x for x in BLOCKNUMS):
263
+ wdict[key.strip()]=w
264
+ return ",".join(list(wdict.keys()))
265
+
266
+ def savepresets(text,isweight):
267
+ if isweight:
268
+ with open(extpath,mode = 'w',encoding="utf-8") as f:
269
+ f.write(text)
270
+ else:
271
+ with open(extpathe,mode = 'w',encoding="utf-8") as f:
272
+ f.write(text)
273
+
274
+ reloadtext.click(fn=reloadpresets,inputs=[d_true],outputs=[lbw_loraratios])
275
+ reloadtags.click(fn=tagdicter,inputs=[lbw_loraratios],outputs=[bw_ratiotags])
276
+ savetext.click(fn=savepresets,inputs=[lbw_loraratios,d_true],outputs=[])
277
+ openeditor.click(fn=openeditors,inputs=[d_true],outputs=[])
278
+
279
+ e_reloadtext.click(fn=reloadpresets,inputs=[d_false],outputs=[elemental])
280
+ e_savetext.click(fn=savepresets,inputs=[elemental,d_false],outputs=[])
281
+ e_openeditor.click(fn=openeditors,inputs=[d_false],outputs=[])
282
+
283
+ def urawaza(active):
284
+ if active > 0:
285
+ register()
286
+ scripts.scripts_txt2img.run = newrun
287
+ scripts.scripts_img2img.run = newrun
288
+ if active == 1:return [*[gr.update(visible = True) for x in range(6)],*[gr.update(visible = False) for x in range(4)]]
289
+ else:return [*[gr.update(visible = False) for x in range(6)],*[gr.update(visible = True) for x in range(4)]]
290
+ else:
291
+ scripts.scripts_txt2img.run = runorigin
292
+ scripts.scripts_img2img.run = runorigini
293
+ return [*[gr.update(visible = True) for x in range(6)],*[gr.update(visible = False) for x in range(4)]]
294
+
295
+ xyzsetting.change(fn=urawaza,inputs=[xyzsetting],outputs =[xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,esets])
296
+
297
+ return lbw_loraratios,lbw_useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,debug
298
+
299
+ def process(self, p, loraratios,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,debug):
300
+ #print("self =",self,"p =",p,"presets =",loraratios,"useblocks =",useblocks,"xyzsettings =",xyzsetting,"xtype =",xtype,"xmen =",xmen,"ytype =",ytype,"ymen =",ymen,"ztype =",ztype,"zmen =",zmen)
301
+ #Note that this does not use the default arg syntax because the default args are supposed to be at the end of the function
302
+ if(loraratios == None):
303
+ loraratios = DEF_WEIGHT_PRESET
304
+ if(useblocks == None):
305
+ useblocks = True
306
+
307
+ lorachecker(self)
308
+ self.log["enable LBW"] = useblocks
309
+ self.log["registerd"] = registerd
310
+
311
+ if useblocks:
312
+ self.active = True
313
+ loraratios=loraratios.splitlines()
314
+ elemental = elemental.split("\n\n") if elemental is not None else []
315
+ lratios={}
316
+ elementals={}
317
+ for l in loraratios:
318
+ if checkloadcond(l) : continue
319
+ l0=l.split(":",1)[0]
320
+ lratios[l0.strip()]=l.split(":",1)[1]
321
+ for e in elemental:
322
+ if ":" not in e: continue
323
+ e0=e.split(":",1)[0]
324
+ elementals[e0.strip()]=e.split(":",1)[1]
325
+ if elemsets : print(xyelem)
326
+ if xyzsetting and "XYZ" in p.prompt:
327
+ lratios["XYZ"] = lxyz
328
+ lratios["ZYX"] = lzyx
329
+ if xyelem != "":
330
+ if "XYZ" in elementals.keys():
331
+ elementals["XYZ"] = elementals["XYZ"] + ","+ xyelem
332
+ else:
333
+ elementals["XYZ"] = xyelem
334
+ self.lratios = lratios
335
+ self.elementals = elementals
336
+ global princ
337
+ princ = elemsets
338
+
339
+ if not hasattr(self,"lbt_dr_callbacks"):
340
+ self.lbt_dr_callbacks = on_cfg_denoiser(self.denoiser_callback)
341
+
342
+ def denoiser_callback(self, params: CFGDenoiserParams):
343
+ def setparams(self, key, te, u ,sets):
344
+ for dicts in [self.lora,self.lycoris,self.networks]:
345
+ for lora in dicts:
346
+ if lora.name.split("_in_LBW_")[0] == key:
347
+ lora.te_multiplier = te
348
+ lora.unet_multiplier = u
349
+ sets.append(key)
350
+
351
+ if forge and self.active:
352
+ if params.sampling_step in self.startsf:
353
+ shared.sd_model.forge_objects.unet.unpatch_model(device_to=devices.device)
354
+ for key, vals in shared.sd_model.forge_objects.unet.patches.items():
355
+ n_vals = []
356
+ lvals = [val for val in vals if val[1][0] in LORAS]
357
+ for s, v, m, l, e in zip(self.startsf, lvals, self.uf, self.lf, self.ef):
358
+ if s is not None and s == params.sampling_step:
359
+ ratio, errormodules = ratiodealer(key.replace(".","_"), l, e)
360
+ n_vals.append((ratio * m, *v[1:]))
361
+ else:
362
+ n_vals.append(v)
363
+ shared.sd_model.forge_objects.unet.patches[key] = n_vals
364
+ shared.sd_model.forge_objects.unet.patch_model()
365
+
366
+ if params.sampling_step in self.stopsf:
367
+ shared.sd_model.forge_objects.unet.unpatch_model(device_to=devices.device)
368
+ for key, vals in shared.sd_model.forge_objects.unet.patches.items():
369
+ n_vals = []
370
+ lvals = [val for val in vals if val[1][0] in LORAS]
371
+ for s, v, m, l, e in zip(self.stopsf, lvals, self.uf, self.lf, self.ef):
372
+ if s is not None and s == params.sampling_step:
373
+ n_vals.append((0, *v[1:]))
374
+ else:
375
+ n_vals.append(v)
376
+ shared.sd_model.forge_objects.unet.patches[key] = n_vals
377
+ shared.sd_model.forge_objects.unet.patch_model()
378
+
379
+ elif self.active:
380
+ if self.starts and params.sampling_step == 0:
381
+ for key, step_te_u in self.starts.items():
382
+ setparams(self, key, 0, 0, [])
383
+ #print("\nstart 0", self, key, 0, 0, [])
384
+
385
+ if self.starts:
386
+ sets = []
387
+ for key, step_te_u in self.starts.items():
388
+ step, te, u = step_te_u
389
+ if params.sampling_step > step - 2:
390
+ setparams(self, key, te, u, sets)
391
+ #print("\nstart", self, key, u, te, sets)
392
+ for key in sets:
393
+ del self.starts[key]
394
+
395
+ if self.stops:
396
+ sets = []
397
+ for key, step in self.stops.items():
398
+ if params.sampling_step > step - 2:
399
+ setparams(self, key, 0, 0, sets)
400
+ #print("\nstop", self, key, 0, 0, sets)
401
+ for key in sets:
402
+ del self.stops[key]
403
+
404
+ def before_process_batch(self, p, loraratios,useblocks,*args,**kwargs):
405
+ if useblocks:
406
+ resetmemory()
407
+ if not self.isnet: p.disable_extra_networks = False
408
+ global prompts
409
+ prompts = kwargs["prompts"].copy()
410
+
411
+ def process_batch(self, p, loraratios,useblocks,*args,**kwargs):
412
+ if useblocks:
413
+ if not self.isnet: p.disable_extra_networks = True
414
+
415
+ o_prompts = [p.prompt]
416
+ for prompt in prompts:
417
+ if "<lora" in prompt or "<lyco" in prompt:
418
+ o_prompts = prompts.copy()
419
+ if not self.isnet: loradealer(self, o_prompts ,self.lratios,self.elementals)
420
+
421
+ def postprocess(self, p, processed, presets,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,debug,*args):
422
+ if not useblocks:
423
+ return
424
+ lora = importer(self)
425
+ emb_db = sd_hijack.model_hijack.embedding_db
426
+
427
+ for net in lora.loaded_loras:
428
+ if hasattr(net,"bundle_embeddings"):
429
+ for emb_name, embedding in net.bundle_embeddings.items():
430
+ if embedding.loaded:
431
+ emb_db.register_embedding_by_name(None, shared.sd_model, emb_name)
432
+
433
+ lora.loaded_loras.clear()
434
+
435
+ if forge:
436
+ sd_models.model_data.get_sd_model().current_lora_hash = None
437
+ shared.sd_model.forge_objects_after_applying_lora.unet.unpatch_model()
438
+ shared.sd_model.forge_objects_after_applying_lora.clip.patcher.unpatch_model()
439
+
440
+ global lxyz,lzyx,xyelem
441
+ lxyz = lzyx = xyelem = ""
442
+ if debug:
443
+ print(self.log)
444
+ gc.collect()
445
+
446
+ def after_extra_networks_activate(self, p, presets,useblocks, *args, **kwargs):
447
+ if useblocks:
448
+ loradealer(self, kwargs["prompts"] ,self.lratios,self.elementals,kwargs["extra_network_data"])
449
+
450
+ def run(self,p,presets,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,debug):
451
+ if not useblocks:
452
+ return
453
+ self.__init__()
454
+ self.log["pass XYZ"] = True
455
+ self.log["XYZsets"] = xyzsetting
456
+ self.log["enable LBW"] = useblocks
457
+
458
+ if xyzsetting >0:
459
+ lorachecker(self)
460
+ lora = importer(self)
461
+ loraratios=presets.splitlines()
462
+ lratios={}
463
+ for l in loraratios:
464
+ if checkloadcond(l) : continue
465
+ l0=l.split(":",1)[0]
466
+ lratios[l0.strip()]=l.split(":",1)[1]
467
+
468
+ if "XYZ" in p.prompt:
469
+ base = lratios["XYZ"] if "XYZ" in lratios.keys() else "1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"
470
+ else: return
471
+
472
+ for i, all in enumerate(["12ALL","17ALL","20ALL","26ALL"]):
473
+ if eymen == all:
474
+ eymen = ",".join(BLOCKIDS[i])
475
+
476
+ if xyzsetting > 1:
477
+ xmen,ymen = exmen,eymen
478
+ xtype,ytype = "values","ID"
479
+ ebase = xmen.split(",")[1]
480
+ ebase = [ebase.strip()]*26
481
+ base = ",".join(ebase)
482
+ ztype = ""
483
+ if ecount > 1:
484
+ ztype = "seed"
485
+ zmen = ",".join([str(random.randrange(4294967294)) for x in range(int(ecount))])
486
+
487
+ #ATYPES =["none","Block ID","values","seed","Base Weights"]
488
+
489
+ def dicedealer(am):
490
+ for i,a in enumerate(am):
491
+ if a =="-1": am[i] = str(random.randrange(4294967294))
492
+ print(f"the die was thrown : {am}")
493
+
494
+ if p.seed == -1: p.seed = str(random.randrange(4294967294))
495
+
496
+ #print(f"xs:{xmen},ys:{ymen},zs:{zmen}")
497
+
498
+ def adjuster(a,at):
499
+ if "none" in at:a = ""
500
+ a = [a.strip() for a in a.split(',')]
501
+ if "seed" in at:dicedealer(a)
502
+ return a
503
+
504
+ xs = adjuster(xmen,xtype)
505
+ ys = adjuster(ymen,ytype)
506
+ zs = adjuster(zmen,ztype)
507
+
508
+ ids = alpha =seed = ""
509
+ p.batch_size = 1
510
+
511
+ print(f"xs:{xs},ys:{ys},zs:{zs}")
512
+
513
+ images = []
514
+
515
+ def weightsdealer(alpha,ids,base):
516
+ #print(f"weights from : {base}")
517
+ ids = [z.strip() for z in ids.split(' ')]
518
+ weights_t = [w.strip() for w in base.split(',')]
519
+ blockid = BLOCKIDS[BLOCKNUMS.index(len(weights_t))]
520
+ if ids[0]!="NOT":
521
+ flagger=[False]*len(weights_t)
522
+ changer = True
523
+ else:
524
+ flagger=[True]*len(weights_t)
525
+ changer = False
526
+ for id in ids:
527
+ if id =="NOT":continue
528
+ if "-" in id:
529
+ it = [it.strip() for it in id.split('-')]
530
+ if blockid.index(it[1]) > blockid.index(it[0]):
531
+ flagger[blockid.index(it[0]):blockid.index(it[1])+1] = [changer]*(blockid.index(it[1])-blockid.index(it[0])+1)
532
+ else:
533
+ flagger[blockid.index(it[1]):blockid.index(it[0])+1] = [changer]*(blockid.index(it[0])-blockid.index(it[1])+1)
534
+ else:
535
+ flagger[blockid.index(id)] =changer
536
+ for i,f in enumerate(flagger):
537
+ if f:weights_t[i]=alpha
538
+ outext = ",".join(weights_t)
539
+ #print(f"weights changed: {outext}")
540
+ return outext
541
+
542
+ generatedbases=[]
543
+ def xyzdealer(a,at):
544
+ nonlocal ids,alpha,p,base,c_base,generatedbases
545
+ if "ID" in at:return
546
+ if "values" in at:alpha = a
547
+ if "seed" in at:
548
+ p.seed = int(a)
549
+ generatedbases=[]
550
+ if "Weights" in at:base =c_base = lratios[a]
551
+ if "elements" in at:
552
+ global xyelem
553
+ xyelem = a
554
+
555
+ def imagedupewatcher(baselist,basetocheck,currentiteration):
556
+ for idx,alreadygenerated in enumerate(baselist):
557
+ if (basetocheck == alreadygenerated):
558
+ # E.g., we already generated IND+OUTS and this is now OUTS+IND with identical weights.
559
+ baselist.insert(currentiteration-1, basetocheck)
560
+ return idx
561
+ return -1
562
+
563
+ def strThree(someNumber): # Returns 1.12345 as 1.123 and 1.0000 as 1
564
+ return format(someNumber, ".3f").rstrip('0').rstrip('.')
565
+
566
+ # Adds X and Y together using array addition.
567
+ # If both X and Y have a value in the same block then Y's is set to 0;
568
+ # both values are used due to both XY and YX being generated, but the diagonal then only show the first value.
569
+ # imagedupwatcher prevents duplicate images from being generated;
570
+ # when X and Y have non-overlapping blocks then the upper triangular images are identical to the lower ones.
571
+ def xyoriginalweightsdealer(x,y):
572
+ xweights = np.asarray(lratios[x].split(','), dtype=np.float32) # np array easier to add later
573
+ yweights = np.asarray(lratios[y].split(','), dtype=np.float32)
574
+ for idx,xval in np.ndenumerate(xweights):
575
+ yval = yweights[idx]
576
+ if xval != 0 and yval != 0:
577
+ yweights[idx] = 0
578
+ # Add xweights to yweights, round to 3 places,
579
+ # map floats to string with format of 3 decimals trailing zeroes and decimal stripped
580
+ baseListToStrings = list(map(strThree, np.around(np.add(xweights,yweights,),3).tolist()))
581
+ return ",".join(baseListToStrings)
582
+
583
+ grids = []
584
+ images =[]
585
+
586
+ totalcount = len(xs)*len(ys)*len(zs) if xyzsetting < 2 else len(xs)*len(ys)*len(zs) //2 +1
587
+ shared.total_tqdm.updateTotal(totalcount)
588
+ xc = yc =zc = 0
589
+ state.job_count = totalcount
590
+ totalcount = len(xs)*len(ys)*len(zs)
591
+ c_base = base
592
+
593
+ for z in zs:
594
+ generatedbases=[]
595
+ images = []
596
+ yc = 0
597
+ xyzdealer(z,ztype)
598
+ for y in ys:
599
+ xc = 0
600
+ xyzdealer(y,ytype)
601
+ for x in xs:
602
+ xyzdealer(x,xtype)
603
+ if "Weights" in xtype and "Weights" in ytype:
604
+ c_base = xyoriginalweightsdealer(x,y)
605
+ else:
606
+ if "ID" in xtype:
607
+ if "values" in ytype:c_base = weightsdealer(y,x,base)
608
+ if "values" in ztype:c_base = weightsdealer(z,x,base)
609
+ if "ID" in ytype:
610
+ if "values" in xtype:c_base = weightsdealer(x,y,base)
611
+ if "values" in ztype:c_base = weightsdealer(z,y,base)
612
+ if "ID" in ztype:
613
+ if "values" in xtype:c_base = weightsdealer(x,z,base)
614
+ if "values" in ytype:c_base = weightsdealer(y,z,base)
615
+
616
+ iteration = len(xs)*len(ys)*zc + yc*len(xs) +xc +1
617
+ print(f"X:{xtype}, {x},Y: {ytype},{y}, Z:{ztype},{z}, base:{c_base} ({iteration}/{totalcount})")
618
+
619
+ dupe_index = imagedupewatcher(generatedbases,c_base,iteration)
620
+ if dupe_index > -1:
621
+ print(f"Skipping generation of duplicate base:{c_base}")
622
+ images.append(images[dupe_index].copy())
623
+ xc += 1
624
+ continue
625
+
626
+ global lxyz,lzyx
627
+ lxyz = c_base
628
+
629
+ cr_base = c_base.split(",")
630
+ cr_base_t=[]
631
+ for x in cr_base:
632
+ if not identifier(x):
633
+ cr_base_t.append(str(1-float(x)))
634
+ else:
635
+ cr_base_t.append(x)
636
+ lzyx = ",".join(cr_base_t)
637
+
638
+ if not(xc == 1 and not (yc ==0 ) and xyzsetting >1):
639
+ lora.loaded_loras.clear()
640
+ p.cached_c = [None,None]
641
+ p.cached_uc = [None,None]
642
+ p.cached_hr_c = [None, None]
643
+ p.cached_hr_uc = [None, None]
644
+ processed:Processed = process_images(p)
645
+ images.append(processed.images[0])
646
+ generatedbases.insert(iteration-1, c_base)
647
+ xc += 1
648
+ yc += 1
649
+ zc += 1
650
+ origin = loranames(processed.all_prompts) + ", "+ znamer(ztype,z,base)
651
+ images,xst,yst = effectivechecker(images,xs.copy(),ys.copy(),diffcol,thresh,revxy) if xyzsetting >1 else (images,xs.copy(),ys.copy())
652
+ grids.append(smakegrid(images,xst,yst,origin,p))
653
+ processed.images= grids
654
+ lora.loaded_loras.clear()
655
+ return processed
656
+
657
+ def identifier(char):
658
+ return char[0] in ["R", "U", "X"]
659
+
660
+ def znamer(at,a,base):
661
+ if "ID" in at:return f"Block : {a}"
662
+ if "values" in at:return f"value : {a}"
663
+ if "seed" in at:return f"seed : {a}"
664
+ if "Weights" in at:return f"original weights :\n {base}"
665
+ else: return ""
666
+
667
+ def loranames(all_prompts):
668
+ _, extra_network_data = extra_networks.parse_prompts(all_prompts[0:1])
669
+ calledloras = extra_network_data["lora"] if "lyco" not in extra_network_data.keys() else extra_network_data["lyco"]
670
+ names = ""
671
+ for called in calledloras:
672
+ if len(called.items) <3:continue
673
+ names += called.items[0]
674
+ return names
675
+
676
+ def lorachecker(self):
677
+ try:
678
+ import networks
679
+ self.isnet = True
680
+ self.layer_name = "network_layer_name"
681
+ except:
682
+ self.isnet = False
683
+ self.layer_name = "lora_layer_name"
684
+ try:
685
+ import lora
686
+ self.islora = True
687
+ except:
688
+ pass
689
+ try:
690
+ import lycoris
691
+ self.islyco = True
692
+ except:
693
+ pass
694
+ self.onlyco = (not self.islora) and self.islyco
695
+ self.isxl = hasattr(shared.sd_model,"conditioner")
696
+
697
+ self.log["isnet"] = self.isnet
698
+ self.log["isxl"] = self.isxl
699
+ self.log["islora"] = self.islora
700
+
701
+ def resetmemory():
702
+ try:
703
+ import networks as nets
704
+ nets.networks_in_memory = {}
705
+ gc.collect()
706
+
707
+ except:
708
+ pass
709
+
710
+ def importer(self):
711
+ if self.onlyco:
712
+ # lycorisモジュールを動的にインポート
713
+ lora_module = importlib.import_module("lycoris")
714
+ return lora_module
715
+ else:
716
+ # loraモジュールを動的にインポート
717
+ lora_module = importlib.import_module("lora")
718
+ return lora_module
719
+
720
+ def loradealer(self, prompts,lratios,elementals, extra_network_data = None):
721
+ if extra_network_data is None:
722
+ _, extra_network_data = extra_networks.parse_prompts(prompts)
723
+ moduletypes = extra_network_data.keys()
724
+
725
+ for ltype in moduletypes:
726
+ lorans = []
727
+ lorars = []
728
+ te_multipliers = []
729
+ unet_multipliers = []
730
+ elements = []
731
+ starts = []
732
+ stops = []
733
+ fparams = []
734
+ load = False
735
+ go_lbw = False
736
+
737
+ if not (ltype == "lora" or ltype == "lyco") : continue
738
+ for called in extra_network_data[ltype]:
739
+ items = called.items
740
+ setnow = False
741
+ name = items[0]
742
+ te = syntaxdealer(items,"te=",1)
743
+ unet = syntaxdealer(items,"unet=",2)
744
+ te,unet = multidealer(te,unet)
745
+
746
+ weights = syntaxdealer(items,"lbw=",2) if syntaxdealer(items,"lbw=",2) is not None else syntaxdealer(items,"w=",2)
747
+ elem = syntaxdealer(items, "lbwe=",3)
748
+ start = syntaxdealer(items,"start=",None)
749
+ stop = syntaxdealer(items,"stop=",None)
750
+ start, stop = stepsdealer(syntaxdealer(items,"step=",None), start, stop)
751
+
752
+ if weights is not None and (weights in lratios or any(weights.count(",") == x - 1 for x in BLOCKNUMS)):
753
+ wei = lratios[weights] if weights in lratios else weights
754
+ ratios = [w.strip() for w in wei.split(",")]
755
+ for i,r in enumerate(ratios):
756
+ if r =="R":
757
+ ratios[i] = round(random.random(),3)
758
+ elif r == "U":
759
+ ratios[i] = round(random.uniform(-0.5,1.5),3)
760
+ elif r[0] == "X":
761
+ base = syntaxdealer(items,"x=", 3) if len(items) >= 4 else 1
762
+ ratios[i] = getinheritedweight(base, r)
763
+ else:
764
+ ratios[i] = float(r)
765
+
766
+ if len(ratios) != 26:
767
+ ratios = to26(ratios)
768
+ setnow = True
769
+ else:
770
+ ratios = [1] * 26
771
+
772
+ if elem in elementals:
773
+ setnow = True
774
+ elem = elementals[elem]
775
+ else:
776
+ elem = ""
777
+
778
+ if setnow:
779
+ go_lbw = True
780
+ fparams.append([unet,ratios,elem])
781
+ settolist([lorans,te_multipliers,unet_multipliers,lorars,elements,starts,stops],[name,te,unet,ratios,elem,start,stop])
782
+
783
+ if start:
784
+ self.starts[name] = [int(start),te,unet]
785
+ self.log["starts"] = load = True
786
+
787
+ if stop:
788
+ self.stops[name] = int(stop)
789
+ self.log["stops"] = load = True
790
+
791
+ self.startsf = [int(s) if s is not None else None for s in starts]
792
+ self.stopsf = [int(s) if s is not None else None for s in stops]
793
+ self.uf = unet_multipliers
794
+ self.lf = lorars
795
+ self.ef = elements
796
+
797
+ if self.isnet: ltype = "nets"
798
+ if forge: ltype = "forge"
799
+ if go_lbw or load: load_loras_blocks(self, lorans,lorars,te_multipliers,unet_multipliers,elements,ltype, starts=starts)
800
+
801
+ def stepsdealer(step, start, stop):
802
+ if step is None or "-" not in step:
803
+ return start, stop
804
+ return step.split("-")
805
+
806
+ def settolist(ls,vs):
807
+ for l, v in zip(ls,vs):
808
+ l.append(v)
809
+
810
+ def syntaxdealer(items,target,index): #type "unet=", "x=", "lwbe="
811
+ for item in items:
812
+ if target in item:
813
+ return item.replace(target,"")
814
+ if index is None or index + 1> len(items): return None
815
+ if "=" in items[index]:return None
816
+ return items[index] if "@" not in items[index] else 1
817
+
818
+ def isfloat(t):
819
+ try:
820
+ float(t)
821
+ return True
822
+ except:
823
+ return False
824
+
825
+ def multidealer(t, u):
826
+ if t is None and u is None:
827
+ return 1,1
828
+ elif t is None:
829
+ return float(u),float(u)
830
+ elif u is None:
831
+ return float(t), float(t)
832
+ else:
833
+ return float(t),float(u)
834
+
835
+ re_inherited_weight = re.compile(r"X([+-])?([\d.]+)?")
836
+
837
+ def getinheritedweight(weight, offset):
838
+ match = re_inherited_weight.search(offset)
839
+ if match.group(1) == "+":
840
+ return float(weight) + float(match.group(2))
841
+ elif match.group(1) == "-":
842
+ return float(weight) - float(match.group(2))
843
+ else:
844
+ return float(weight)
845
+
846
+ def load_loras_blocks(self, names, lwei,te,unet,elements,ltype = "lora", starts = None):
847
+ oldnew=[]
848
+ if "lora" == ltype:
849
+ lora = importer(self)
850
+ self.lora = lora.loaded_loras
851
+ for loaded in lora.loaded_loras:
852
+ for n, name in enumerate(names):
853
+ if name == loaded.name:
854
+ if lwei[n] == [1] * 26 and elements[n] == "": continue
855
+ lbw(loaded,lwei[n],elements[n])
856
+ setall(loaded,te[n],unet[n])
857
+ newname = loaded.name +"_in_LBW_"+ str(round(random.random(),3))
858
+ oldname = loaded.name
859
+ loaded.name = newname
860
+ oldnew.append([oldname,newname])
861
+
862
+ elif "lyco" == ltype:
863
+ import lycoris as lycomo
864
+ self.lycoris = lycomo.loaded_lycos
865
+ for loaded in lycomo.loaded_lycos:
866
+ for n, name in enumerate(names):
867
+ if name == loaded.name:
868
+ lbw(loaded,lwei[n],elements[n])
869
+ setall(loaded,te[n],unet[n])
870
+
871
+ elif "nets" == ltype:
872
+ import networks as nets
873
+ self.networks = nets.loaded_networks
874
+ for loaded in nets.loaded_networks:
875
+ for n, name in enumerate(names):
876
+ if name == loaded.name:
877
+ lbw(loaded,lwei[n],elements[n])
878
+ setall(loaded,te[n],unet[n])
879
+
880
+ elif "forge" == ltype:
881
+ lbwf(te, unet, lwei, elements, starts)
882
+
883
+ try:
884
+ import lora_ctl_network as ctl
885
+ for old,new in oldnew:
886
+ if old in ctl.lora_weights.keys():
887
+ ctl.lora_weights[new] = ctl.lora_weights[old]
888
+ except:
889
+ pass
890
+
891
+ def setall(m,te,unet):
892
+ m.name = m.name + "_in_LBW_"+ str(round(random.random(),3))
893
+ m.te_multiplier = te
894
+ m.unet_multiplier = unet
895
+ m.multiplier = unet
896
+
897
+ def smakegrid(imgs,xs,ys,currentmodel,p):
898
+ ver_texts = [[images.GridAnnotation(y)] for y in ys]
899
+ hor_texts = [[images.GridAnnotation(x)] for x in xs]
900
+
901
+ w, h = imgs[0].size
902
+ grid = Image.new('RGB', size=(len(xs) * w, len(ys) * h), color='black')
903
+
904
+ for i, img in enumerate(imgs):
905
+ grid.paste(img, box=(i % len(xs) * w, i // len(xs) * h))
906
+
907
+ grid = images.draw_grid_annotations(grid,w, h, hor_texts, ver_texts)
908
+ grid = draw_origin(grid, currentmodel,w*len(xs),h*len(ys),w)
909
+ if opts.grid_save:
910
+ images.save_image(grid, opts.outdir_txt2img_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=p.seed, grid=True, p=p)
911
+
912
+ return grid
913
+
914
+ def get_font(fontsize):
915
+ fontpath = os.path.join(scriptpath, "Roboto-Regular.ttf")
916
+ try:
917
+ return ImageFont.truetype(opts.font or fontpath, fontsize)
918
+ except Exception:
919
+ return ImageFont.truetype(fontpath, fontsize)
920
+
921
+ def draw_origin(grid, text,width,height,width_one):
922
+ grid_d= Image.new("RGB", (grid.width,grid.height), "white")
923
+ grid_d.paste(grid,(0,0))
924
+
925
+ d= ImageDraw.Draw(grid_d)
926
+ color_active = (0, 0, 0)
927
+ fontsize = (width+height)//25
928
+ fnt = get_font(fontsize)
929
+
930
+ if grid.width != width_one:
931
+ while d.multiline_textsize(text, font=fnt)[0] > width_one*0.75 and fontsize > 0:
932
+ fontsize -=1
933
+ fnt = get_font(fontsize)
934
+ d.multiline_text((0,0), text, font=fnt, fill=color_active,align="center")
935
+ return grid_d
936
+
937
+ def newrun(p, *args):
938
+ script_index = args[0]
939
+
940
+ if args[0] ==0:
941
+ script = None
942
+ for obj in scripts.scripts_txt2img.alwayson_scripts:
943
+ if "lora_block_weight" in obj.filename:
944
+ script = obj
945
+ script_args = args[script.args_from:script.args_to]
946
+ else:
947
+ script = scripts.scripts_txt2img.selectable_scripts[script_index-1]
948
+
949
+ if script is None:
950
+ return None
951
+
952
+ script_args = args[script.args_from:script.args_to]
953
+
954
+ processed = script.run(p, *script_args)
955
+
956
+ shared.total_tqdm.clear()
957
+
958
+ return processed
959
+
960
+ registerd = False
961
+
962
+ def register():
963
+ global registerd
964
+ registerd = True
965
+ for obj in scripts.scripts_txt2img.alwayson_scripts:
966
+ if "lora_block_weight" in obj.filename:
967
+ if obj not in scripts.scripts_txt2img.selectable_scripts:
968
+ scripts.scripts_txt2img.selectable_scripts.append(obj)
969
+ scripts.scripts_txt2img.titles.append("LoRA Block Weight")
970
+ for obj in scripts.scripts_img2img.alwayson_scripts:
971
+ if "lora_block_weight" in obj.filename:
972
+ if obj not in scripts.scripts_img2img.selectable_scripts:
973
+ scripts.scripts_img2img.selectable_scripts.append(obj)
974
+ scripts.scripts_img2img.titles.append("LoRA Block Weight")
975
+
976
+ def effectivechecker(imgs,ss,ls,diffcol,thresh,revxy):
977
+ orig = imgs[1]
978
+ imgs = imgs[::2]
979
+ diffs = []
980
+ outnum =[]
981
+
982
+ for img in imgs:
983
+ abs_diff = cv2.absdiff(np.array(img) , np.array(orig))
984
+
985
+ abs_diff_t = cv2.threshold(abs_diff, int(thresh), 255, cv2.THRESH_BINARY)[1]
986
+ res = abs_diff_t.astype(np.uint8)
987
+ percentage = (np.count_nonzero(res) * 100)/ res.size
988
+ if "white" in diffcol: abs_diff = cv2.bitwise_not(abs_diff)
989
+ outnum.append(percentage)
990
+
991
+ abs_diff = Image.fromarray(abs_diff)
992
+
993
+ diffs.append(abs_diff)
994
+
995
+ outs = []
996
+ for i in range(len(ls)):
997
+ ls[i] = ls[i] + "\n Diff : " + str(round(outnum[i],3)) + "%"
998
+
999
+ if not revxy:
1000
+ for diff,img in zip(diffs,imgs):
1001
+ outs.append(diff)
1002
+ outs.append(img)
1003
+ outs.append(orig)
1004
+ ss = ["diff",ss[0],"source"]
1005
+ return outs,ss,ls
1006
+ else:
1007
+ outs = [orig]*len(diffs) + imgs + diffs
1008
+ ss = ["source",ss[0],"diff"]
1009
+ return outs,ls,ss
1010
+
1011
+ def lbw(lora,lwei,elemental):
1012
+ elemental = elemental.split(",")
1013
+ for key in lora.modules.keys():
1014
+ ratio, errormodules = ratiodealer(key, lwei, elemental)
1015
+
1016
+ ltype = type(lora.modules[key]).__name__
1017
+ set = False
1018
+ if ltype in LORAANDSOON.keys():
1019
+ if "OFT" not in ltype:
1020
+ setattr(lora.modules[key],LORAANDSOON[ltype],torch.nn.Parameter(getattr(lora.modules[key],LORAANDSOON[ltype]) * ratio))
1021
+ else:
1022
+ setattr(lora.modules[key],LORAANDSOON[ltype],getattr(lora.modules[key],LORAANDSOON[ltype]) * ratio)
1023
+ set = True
1024
+ else:
1025
+ if hasattr(lora.modules[key],"up_model"):
1026
+ lora.modules[key].up_model.weight= torch.nn.Parameter(lora.modules[key].up_model.weight *ratio)
1027
+ #print("LoRA using LoCON")
1028
+ set = True
1029
+ else:
1030
+ lora.modules[key].up.weight= torch.nn.Parameter(lora.modules[key].up.weight *ratio)
1031
+ #print("LoRA")
1032
+ set = True
1033
+ if not set :
1034
+ print("unkwon LoRA")
1035
+
1036
+ if len(errormodules) > 0:
1037
+ print(errormodules)
1038
+ return lora
1039
+
1040
+ LORAS = ["lora", "loha", "lokr"]
1041
+
1042
+ def lbwf(mt, mu, lwei, elemental, starts):
1043
+ for key, vals in shared.sd_model.forge_objects_after_applying_lora.unet.patches.items():
1044
+ n_vals = []
1045
+ lvals = [val for val in vals if val[1][0] in LORAS]
1046
+ for v, m, l, e ,s in zip(lvals, mu, lwei, elemental, starts):
1047
+ ratio, errormodules = ratiodealer(key.replace(".","_"), l, e)
1048
+ n_vals.append((ratio * m if s is None else 0, *v[1:]))
1049
+ shared.sd_model.forge_objects_after_applying_lora.unet.patches[key] = n_vals
1050
+
1051
+ for key, vals in shared.sd_model.forge_objects_after_applying_lora.clip.patcher.patches.items():
1052
+ n_vals = []
1053
+ lvals = [val for val in vals if val[1][0] in LORAS]
1054
+ for v, m, l, e in zip(lvals, mt, lwei, elemental):
1055
+ ratio, errormodules = ratiodealer(key.replace(".","_"), l, e)
1056
+ n_vals.append((ratio * m, *v[1:]))
1057
+ shared.sd_model.forge_objects_after_applying_lora.clip.patcher.patches[key] = n_vals
1058
+
1059
+ def ratiodealer(key, lwei, elemental):
1060
+ ratio = 1
1061
+ picked = False
1062
+ errormodules = []
1063
+ currentblock = 0
1064
+
1065
+ for i,block in enumerate(BLOCKS):
1066
+ if block in key:
1067
+ if i == 26:
1068
+ i = 0
1069
+ ratio = lwei[i]
1070
+ picked = True
1071
+ currentblock = i
1072
+
1073
+ if not picked:
1074
+ errormodules.append(key)
1075
+
1076
+ if len(elemental) > 0:
1077
+ skey = key + BLOCKID26[currentblock]
1078
+ for d in elemental:
1079
+ if d.count(":") != 2 :continue
1080
+ dbs,dws,dr = (hyphener(d.split(":")[0]),d.split(":")[1],d.split(":")[2])
1081
+ dbs,dws = (dbs.split(" "), dws.split(" "))
1082
+ dbn,dbs = (True,dbs[1:]) if dbs[0] == "NOT" else (False,dbs)
1083
+ dwn,dws = (True,dws[1:]) if dws[0] == "NOT" else (False,dws)
1084
+ flag = dbn
1085
+ for db in dbs:
1086
+ if db in skey:
1087
+ flag = not dbn
1088
+ if flag:flag = dwn
1089
+ else:continue
1090
+ for dw in dws:
1091
+ if dw in skey:
1092
+ flag = not dwn
1093
+ if flag:
1094
+ dr = float(dr)
1095
+ if princ :print(dbs,dws,key,dr)
1096
+ ratio = dr
1097
+
1098
+ return ratio, errormodules
1099
+
1100
+ LORAANDSOON = {
1101
+ "LoraHadaModule" : "w1a",
1102
+ "LycoHadaModule" : "w1a",
1103
+ "NetworkModuleHada": "w1a",
1104
+ "FullModule" : "weight",
1105
+ "NetworkModuleFull": "weight",
1106
+ "IA3Module" : "w",
1107
+ "NetworkModuleIa3" : "w",
1108
+ "LoraKronModule" : "w1",
1109
+ "LycoKronModule" : "w1",
1110
+ "NetworkModuleLokr": "w1",
1111
+ "NetworkModuleGLora": "w1a",
1112
+ "NetworkModuleNorm": "w_norm",
1113
+ "NetworkModuleOFT": "scale"
1114
+ }
1115
+
1116
+ def hyphener(t):
1117
+ t = t.split(" ")
1118
+ for i,e in enumerate(t):
1119
+ if "-" in e:
1120
+ e = e.split("-")
1121
+ if BLOCKID26.index(e[1]) > BLOCKID26.index(e[0]):
1122
+ t[i] = " ".join(BLOCKID26[BLOCKID26.index(e[0]):BLOCKID26.index(e[1])+1])
1123
+ else:
1124
+ t[i] = " ".join(BLOCKID26[BLOCKID26.index(e[1]):BLOCKID26.index(e[0])+1])
1125
+ return " ".join(t)
1126
+
1127
+ ELEMPRESETS="\
1128
+ ATTNDEEPON:IN05-OUT05:attn:1\n\n\
1129
+ ATTNDEEPOFF:IN05-OUT05:attn:0\n\n\
1130
+ PROJDEEPOFF:IN05-OUT05:proj:0\n\n\
1131
+ XYZ:::1"
1132
+
1133
+ def to26(ratios):
1134
+ ids = BLOCKIDS[BLOCKNUMS.index(len(ratios))]
1135
+ output = [0]*26
1136
+ for i, id in enumerate(ids):
1137
+ output[BLOCKID26.index(id)] = ratios[i]
1138
+ return output
1139
+
1140
+ def checkloadcond(l:str)->bool:
1141
+ # ここの条件分岐は読み込んだ行がBlock Waightの書式にあっているかを確認している。
1142
+ # [:]が含まれ、16個(LoRa)か25個(LyCORIS),11,19(XL),のカンマが含まれる形式であるうえ、
1143
+ # それがコメントアウト行(# foobar)でないことが求められている。
1144
+ # 逆に言うとコメントアウトしたいなら絶対"# "から始めることを要求している。
1145
+
1146
+ # This conditional branch is checking whether the loaded line conforms to the Block Weight format.
1147
+ # It is required that "[:]" is included, and the format contains either 16 commas (for LoRa) or 25 commas (for LyCORIS),
1148
+ # and it's not a comment line (e.g., "# foobar").
1149
+ # Conversely, if you want to comment out, it requires that it absolutely starts with "# ".
1150
+ res=(":" not in l) or (not any(l.count(",") == x - 1 for x in BLOCKNUMS)) or ("#" in l)
1151
+ #print("[debug]", res,repr(l))
1152
+ return res
Stable-diffusion/ui/nenen88.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys, time, shlex, subprocess
2
+ from IPython import get_ipython
3
+ from pathlib import Path
4
+
5
+ xxx = Path("/kaggle/working")
6
+ zzz = Path("/kaggle/working/asd")
7
+ tmp = Path("/kaggle/temp")
8
+
9
+ pantat = f"curl -sLo {xxx}/pantat88.py https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py"
10
+ get_ipython().system(pantat)
11
+
12
+ sys.path.append(str(xxx))
13
+ from pantat88 import pull, say, download
14
+
15
+ def nenen():
16
+ os.chdir(zzz)
17
+ say("【 {red} Installing Stable Diffusion {d} 】 {red}")
18
+
19
+ time.sleep(2)
20
+ pull(f"https://github.com/gutris1/segsmaker asd {zzz}")
21
+
22
+ req_list = [
23
+ f"https://huggingface.co/pantat88/ui/resolve/main/embeddings.zip {zzz}",
24
+ f"https://huggingface.co/pantat88/ui/resolve/main/4x-UltraSharp.pth {zzz}/models/ESRGAN",
25
+ f"https://huggingface.co/pantat88/ui/resolve/main/4x-AnimeSharp.pth {zzz}/models/ESRGAN",
26
+ f"https://huggingface.co/pantat88/ui/resolve/main/4x_NMKD-Superscale-SP_178000_G.pth {zzz}/models/ESRGAN",
27
+ f"https://huggingface.co/pantat88/ui/resolve/main/4x_RealisticRescaler_100000_G.pth {zzz}/models/ESRGAN",
28
+ f"https://huggingface.co/pantat88/ui/resolve/main/8x_RealESRGAN.pth {zzz}/models/ESRGAN",
29
+ f"https://huggingface.co/pantat88/ui/resolve/main/4x_foolhardy_Remacri.pth {zzz}/models/ESRGAN",
30
+ f"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors {zzz}/models/VAE"]
31
+
32
+ for lines in req_list:
33
+ download(lines)
34
+
35
+ unzip = f"unzip -qo {zzz}/embeddings.zip -d {zzz}/embeddings && rm {zzz}/embeddings.zip"
36
+ get_ipython().system(unzip)
37
+
38
+ cmd = [
39
+ f"rm -rf {tmp}/* {zzz}/models/Stable-diffusion/tmp_ckpt {zzz}/models/Lora/tmp_lora {zzz}/outputs",
40
+ f"mkdir -p {zzz}/models/Lora",
41
+ f"ln -vs {tmp}/ckpt {zzz}/models/Stable-diffusion/tmp_ckpt",
42
+ f"ln -vs {tmp}/lora {zzz}/models/Lora/tmp_lora",
43
+ f"ln -vs {tmp}/outputs {zzz}/outputs",
44
+ f"mkdir -p {tmp}/ckpt {tmp}/lora {tmp}/outputs {tmp}/svd {tmp}/z123"]
45
+
46
+ for lines in cmd:
47
+ subprocess.run(shlex.split(lines), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
48
+
49
+ nenen()
50
+ (xxx / 'nenen88.py').unlink()
51
+ get_ipython().run_line_magic('run', f'{xxx}/pantat88.py')
52
+ os.chdir(xxx)
Stable-diffusion/ui/venv.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, sys, os, time, errno, shlex
2
+ from IPython.display import clear_output, Image, display
3
+ from IPython import get_ipython
4
+ from pathlib import Path
5
+
6
+ xxx = Path('/kaggle/working')
7
+ script = xxx / 'venv.py'
8
+ img = xxx / "loading.png"
9
+ vnv = Path('/kaggle/venv')
10
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv-torch241-cu121.tar.lz4'
11
+ fn = Path(url).name
12
+
13
+ os.chdir(xxx)
14
+ sys.path.append(str(xxx))
15
+
16
+ get_ipython().system('curl -sLO https://raw.githubusercontent.com/gutris1/segsmaker/main/script/loading.png')
17
+ get_ipython().system('curl -sLO https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py')
18
+ display(Image(filename=str(img)))
19
+ time.sleep(1)
20
+
21
+ from pantat88 import say, download
22
+ say('【{red} Installing VENV{d} 】{red}')
23
+
24
+ req_list = [
25
+ "curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/new_tunnel",
26
+ "curl -Lo /usr/bin/cl https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64",
27
+ "apt-get update",
28
+ "apt -y install pv",
29
+ "pip install -q aria2 cloudpickle",
30
+ "chmod +x /usr/bin/cl"
31
+ ]
32
+
33
+ for items in req_list:
34
+ subprocess.run(shlex.split(items), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
35
+
36
+ def she_bang():
37
+ vnv_bin = vnv / 'bin'
38
+ old_shebang = b'#!/home/studio-lab-user/tmp/venv/bin/python3\n'
39
+ new_shebang = f"#!{vnv}/bin/python3\n"
40
+
41
+ for script in vnv_bin.glob('*'):
42
+ if script.is_file():
43
+ try:
44
+ with open(script, 'r+b') as file:
45
+ lines = file.readlines()
46
+ if lines and lines[0] == old_shebang:
47
+ lines[0] = new_shebang.encode('utf-8')
48
+ file.seek(0)
49
+ file.writelines(lines)
50
+ file.truncate()
51
+ print(f"Updated shebang in {script.name} to {new_shebang.strip()}")
52
+
53
+ except OSError as e:
54
+ if e.errno == 26:
55
+ print(f"Skipped {script.name}")
56
+ else:
57
+ print(f"Failed to update {script.name}: {e}")
58
+
59
+ def venv_install():
60
+ os.chdir('/kaggle')
61
+ download(url)
62
+
63
+ extract_venv = f'pv {fn} | lz4 -d | tar xf -'
64
+ get_ipython().system(extract_venv)
65
+ Path(fn).unlink()
66
+
67
+ get_ipython().system(f'rm -rf {vnv / "bin" / "pip*"}')
68
+ get_ipython().system(f'rm -rf {vnv / "bin" / "python*"}')
69
+ get_ipython().system(f'python3 -m venv {vnv}')
70
+ get_ipython().system(f'{vnv}/bin/python3 -m pip install -q --upgrade --force-reinstall pip')
71
+
72
+ venv_install()
73
+ she_bang()
74
+
75
+ clear_output(wait=True)
76
+ script.unlink()
77
+ os.chdir(xxx)
Stable-diffusion/ui/venv161.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import select
3
+ import errno
4
+ import pty
5
+ import sys
6
+ import os
7
+ import re
8
+
9
+ xxx = "/kaggle/working"
10
+ zzz = "/kaggle/working/asd"
11
+
12
+ os.system(f'wget -q https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py -O {xxx}/pantat88.py')
13
+ sys.path.append(xxx)
14
+
15
+ def venv_in():
16
+ from pantat88 import say
17
+ os.chdir('/kaggle')
18
+ say('【 {red} Installing VENV {d} 】 {red}')
19
+ os.system('apt -y install lz4 pv aria2 > /dev/null 2>&1')
20
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv-1_6_1.tar.lz4'
21
+ fn = 'venv-1_6_1.tar.lz4'
22
+ fc = f"aria2c --console-log-level=error --summary-interval=1 -c -x16 -s16 -k1M -j5 '{url}' -o '{fn}'"
23
+ woiii, appaa = pty.openpty()
24
+ qqqqq = subprocess.Popen(fc, shell=True, stdin=appaa, stdout=appaa, stderr=subprocess.STDOUT, close_fds=True)
25
+ os.close(appaa)
26
+
27
+ malam = ""
28
+ while True:
29
+ r, _, _ = select.select([woiii], [], [], 0.1)
30
+ if woiii in r:
31
+ try:
32
+ petualangan = os.read(woiii, 8192).decode()
33
+ malam += petualangan
34
+ for minggu in petualangan.splitlines():
35
+ if re.match(r'\[#\w{6}\s.*\]', minggu):
36
+ sys.stdout.write("\r" + " "*80 + "\r")
37
+ sys.stdout.write(f" {minggu}")
38
+ sys.stdout.flush()
39
+ break
40
+
41
+ except OSError as e:
42
+ if e.errno == errno.EIO:
43
+ break
44
+
45
+ if qqqqq.poll() is not None and not r:
46
+ break
47
+
48
+ kemarin = malam.find("Download Results:")
49
+ if kemarin != -1:
50
+ hhhhh = malam[kemarin:]
51
+ jjjjj = hhhhh.splitlines()
52
+ kkkkk = False
53
+ for ggggg in jjjjj:
54
+ if ggggg.strip().startswith("======+====+==========="):
55
+ kkkkk = True
56
+ print("\n" + f" {ggggg}")
57
+ continue
58
+ elif ggggg.strip().startswith("Status Legend:"):
59
+ break
60
+ elif kkkkk:
61
+ print(f" {ggggg}")
62
+
63
+ qqqqq.wait()
64
+ os.close(woiii)
65
+
66
+ extract_tar = f'pv {fn} | lz4 -d | tar xf -'
67
+
68
+ ikan, asin = pty.openpty()
69
+ proc = subprocess.Popen(extract_tar, shell=True, stdin=asin, stdout=asin, stderr=asin, close_fds=True)
70
+ os.close(asin)
71
+
72
+ while True:
73
+ r, _, _ = select.select([ikan], [], [], 0.1)
74
+ if ikan in r:
75
+ try:
76
+ jemuran = os.read(ikan, 1024).decode('utf-8', 'ignore')
77
+ print(jemuran, end='')
78
+ except OSError as e:
79
+ if e.errno == errno.EIO:
80
+ break
81
+ if proc.poll() is not None:
82
+ break
83
+
84
+ proc.wait()
85
+ os.close(ikan)
86
+ os.remove(fn)
87
+
88
+ oppai = '/kaggle/venv'
89
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "pip*")}')
90
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "python*")}')
91
+ os.system(f'python -m venv {oppai}')
92
+ say('【 {red} Setup Completed {d} 】 {red}')
93
+
94
+ if __name__ == '__main__':
95
+ venv_in()
96
+ assu = os.path.join(xxx, 'venv.py')
97
+ os.remove(assu)
Stable-diffusion/ui/venv180.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import select
3
+ import errno
4
+ import pty
5
+ import sys
6
+ import os
7
+ import re
8
+
9
+ xxx = "/kaggle/working"
10
+ zzz = "/kaggle/working/asd"
11
+
12
+ os.system('apt-get update > /dev/null 2>&1')
13
+ os.system('apt -y install lz4 pv aria2 > /dev/null 2>&1')
14
+
15
+ os.system(f'curl -sLo {xxx}/pantat88.py https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py')
16
+ os.system(f'curl -sLo {xxx}/nenen88.py https://huggingface.co/pantat88/ui/resolve/main/nenen88.py')
17
+ sys.path.append(xxx)
18
+
19
+ def venv_in():
20
+ from pantat88 import say
21
+ os.chdir('/kaggle')
22
+ say('【 {red} Installing VENV {d} 】 {red}')
23
+
24
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv-1_8_0.tar.lz4'
25
+ fn = 'venv-1_8_0.tar.lz4'
26
+ fc = f"aria2c --console-log-level=error --summary-interval=1 -c -x16 -s16 -k1M -j5 '{url}' -o '{fn}'"
27
+ woiii, appaa = pty.openpty()
28
+ qqqqq = subprocess.Popen(fc, shell=True, stdin=appaa, stdout=appaa, stderr=subprocess.STDOUT, close_fds=True)
29
+ os.close(appaa)
30
+
31
+ malam = ""
32
+ while True:
33
+ r, _, _ = select.select([woiii], [], [], 0.1)
34
+ if woiii in r:
35
+ try:
36
+ petualangan = os.read(woiii, 8192).decode()
37
+ malam += petualangan
38
+ for minggu in petualangan.splitlines():
39
+ if re.match(r'\[#\w{6}\s.*\]', minggu):
40
+ sys.stdout.write("\r" + " "*80 + "\r")
41
+ sys.stdout.write(f" {minggu}")
42
+ sys.stdout.flush()
43
+ break
44
+
45
+ except OSError as e:
46
+ if e.errno == errno.EIO:
47
+ break
48
+
49
+ if qqqqq.poll() is not None and not r:
50
+ break
51
+
52
+ kemarin = malam.find("Download Results:")
53
+ if kemarin != -1:
54
+ hhhhh = malam[kemarin:]
55
+ jjjjj = hhhhh.splitlines()
56
+ kkkkk = False
57
+ for ggggg in jjjjj:
58
+ if ggggg.strip().startswith("======+====+==========="):
59
+ kkkkk = True
60
+ print("\n" + f" {ggggg}")
61
+ continue
62
+ elif ggggg.strip().startswith("Status Legend:"):
63
+ break
64
+ elif kkkkk:
65
+ print(f" {ggggg}")
66
+
67
+ qqqqq.wait()
68
+ os.close(woiii)
69
+
70
+ extract_tar = f'pv {fn} | lz4 -d | tar xf -'
71
+
72
+ ikan, asin = pty.openpty()
73
+ proc = subprocess.Popen(extract_tar, shell=True, stdin=asin, stdout=asin, stderr=asin, close_fds=True)
74
+ os.close(asin)
75
+
76
+ while True:
77
+ r, _, _ = select.select([ikan], [], [], 0.1)
78
+ if ikan in r:
79
+ try:
80
+ jemuran = os.read(ikan, 1024).decode('utf-8', 'ignore')
81
+ print(jemuran, end='')
82
+ except OSError as e:
83
+ if e.errno == errno.EIO:
84
+ break
85
+ if proc.poll() is not None:
86
+ break
87
+
88
+ proc.wait()
89
+ os.close(ikan)
90
+ if os.path.exists(fn):
91
+ os.remove(fn)
92
+ else:
93
+ print(f"Warning: File {fn} not found, skipping removal.")
94
+
95
+
96
+ oppai = '/kaggle/venv'
97
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "pip*")}')
98
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "python*")}')
99
+ os.system(f'python -m venv {oppai}')
100
+ say('【 {red} Setup Completed {d} 】 {red}')
101
+
102
+ if __name__ == '__main__':
103
+ venv_in()
104
+ assu = os.path.join(xxx, 'venv.py')
105
+ os.remove(assu)
Stable-diffusion/ui/venv220.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, sys, os, time, select, pty, errno
2
+
3
+ xxx = '/kaggle/working'
4
+
5
+ assu = os.path.join(xxx, 'venv.py')
6
+ os.chdir(xxx)
7
+ os.system(f'curl -sLO https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py')
8
+ sys.path.append(xxx)
9
+ time.sleep(1)
10
+ from pantat88 import say, download
11
+ say('【{red} Installing VENV{d} 】{red}')
12
+
13
+ auuuwooo = [
14
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/get_ip.py",
15
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/new_tunnel",
16
+ f"curl -Lo /usr/bin/cl https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64",
17
+ f"apt-get update",
18
+ f"apt -y install lz4 pv aria2",
19
+ f"pip install -q git+https://github.com/DEX-1101/colablib",
20
+ f"npm install -g localtunnel",
21
+ f"chmod +x /usr/bin/cl"]
22
+
23
+ for tarzan in auuuwooo:
24
+ subprocess.run(tarzan.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
25
+
26
+ def venv_in():
27
+ os.chdir('/kaggle')
28
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv_torch220.tar.lz4'
29
+ fn = 'venv_torch220.tar.lz4'
30
+ download(url)
31
+
32
+ extract_tar = f'pv {fn} | lz4 -d | tar xf -'
33
+ ikan, asin = pty.openpty()
34
+ proc = subprocess.Popen(extract_tar, shell=True, stdin=asin, stdout=asin, stderr=asin, close_fds=True)
35
+ os.close(asin)
36
+ while True:
37
+ r, _, _ = select.select([ikan], [], [], 0.1)
38
+ if ikan in r:
39
+ try:
40
+ jemuran = os.read(ikan, 1024).decode('utf-8', 'ignore')
41
+ print(jemuran, end='')
42
+
43
+ except OSError as e:
44
+ if e.errno == errno.EIO:
45
+ break
46
+
47
+ if proc.poll() is not None:
48
+ break
49
+
50
+ proc.wait()
51
+ os.close(ikan)
52
+ os.remove(fn)
53
+
54
+ oppai = '/kaggle/venv'
55
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "pip*")}')
56
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "python*")}')
57
+ os.system(f'python -m venv {oppai}')
58
+
59
+ venv_in()
60
+ os.chdir(xxx)
61
+ say('【{red} VENV Setup Completed{d} 】{red}')
62
+ os.remove(assu)
Stable-diffusion/ui/venv_19-6-2024.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, sys, os, time, select, pty, errno
2
+
3
+ xxx = '/kaggle/working'
4
+
5
+ assu = os.path.join(xxx, 'venv.py')
6
+ os.chdir(xxx)
7
+ os.system(f'curl -sLO https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py')
8
+ sys.path.append(xxx)
9
+ time.sleep(1)
10
+ from pantat88 import say, download
11
+ say('【{red} Installing VENV{d} 】{red}')
12
+
13
+ auuuwooo = [
14
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/get_ip.py",
15
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/new_tunnel",
16
+ f"curl -Lo /usr/bin/cl https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64",
17
+ f"apt-get update",
18
+ f"apt -y install lz4 pv aria2",
19
+ f"pip install -q git+https://github.com/DEX-1101/colablib",
20
+ f"npm install -g localtunnel",
21
+ f"chmod +x /usr/bin/cl"]
22
+
23
+ for tarzan in auuuwooo:
24
+ subprocess.run(tarzan.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
25
+
26
+ def venv_in():
27
+ os.chdir('/kaggle')
28
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv_19-6-2024.tar.lz4'
29
+ fn = 'venv_19-6-2024.tar.lz4'
30
+ download(url)
31
+
32
+ extract_tar = f'pv {fn} | lz4 -d | tar xf -'
33
+ ikan, asin = pty.openpty()
34
+ proc = subprocess.Popen(extract_tar, shell=True, stdin=asin, stdout=asin, stderr=asin, close_fds=True)
35
+ os.close(asin)
36
+ while True:
37
+ r, _, _ = select.select([ikan], [], [], 0.1)
38
+ if ikan in r:
39
+ try:
40
+ jemuran = os.read(ikan, 1024).decode('utf-8', 'ignore')
41
+ print(jemuran, end='')
42
+
43
+ except OSError as e:
44
+ if e.errno == errno.EIO:
45
+ break
46
+
47
+ if proc.poll() is not None:
48
+ break
49
+
50
+ proc.wait()
51
+ os.close(ikan)
52
+ os.remove(fn)
53
+
54
+ oppai = '/kaggle/venv'
55
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "pip*")}')
56
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "python*")}')
57
+ os.system(f'python -m venv {oppai}')
58
+
59
+ venv_in()
60
+ os.chdir(xxx)
61
+ say('【{red} VENV Setup Completed{d} 】{red}')
62
+ os.remove(assu)
Stable-diffusion/ui/venvv.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, sys, os, time, select, pty, errno
2
+
3
+ xxx = '/kaggle/working'
4
+
5
+ assu = os.path.join(xxx, 'venv.py')
6
+ os.chdir(xxx)
7
+ os.system(f'curl -sLO https://raw.githubusercontent.com/gutris1/segsmaker/main/kaggle/script/pantat88.py')
8
+ sys.path.append(xxx)
9
+ time.sleep(1)
10
+ from pantat88 import say, download
11
+ say('【{red} Installing VENV{d} 】{red}')
12
+
13
+ auuuwooo = [
14
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/get_ip.py",
15
+ f"curl -LO https://github.com/DEX-1101/sd-webui-notebook/raw/main/res/new_tunnel",
16
+ f"curl -Lo /usr/bin/cl https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64",
17
+ f"apt-get update",
18
+ f"apt -y install lz4 pv aria2",
19
+ f"pip install -q git+https://github.com/DEX-1101/colablib",
20
+ f"npm install -g localtunnel",
21
+ f"chmod +x /usr/bin/cl"]
22
+
23
+ for tarzan in auuuwooo:
24
+ subprocess.run(tarzan.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
25
+
26
+ def venv_in():
27
+ os.chdir('/kaggle')
28
+ url = 'https://huggingface.co/pantat88/back_up/resolve/main/venv_torch220.tar.lz4'
29
+ fn = 'venv_torch220.tar.lz4'
30
+ download(url)
31
+
32
+ extract_tar = f'pv {fn} | lz4 -d | tar xf -'
33
+ ikan, asin = pty.openpty()
34
+ proc = subprocess.Popen(extract_tar, shell=True, stdin=asin, stdout=asin, stderr=asin, close_fds=True)
35
+ os.close(asin)
36
+ while True:
37
+ r, _, _ = select.select([ikan], [], [], 0.1)
38
+ if ikan in r:
39
+ try:
40
+ jemuran = os.read(ikan, 1024).decode('utf-8', 'ignore')
41
+ print(jemuran, end='')
42
+
43
+ except OSError as e:
44
+ if e.errno == errno.EIO:
45
+ break
46
+
47
+ if proc.poll() is not None:
48
+ break
49
+
50
+ proc.wait()
51
+ os.close(ikan)
52
+ os.remove(fn)
53
+
54
+ oppai = '/kaggle/venv'
55
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "pip*")}')
56
+ os.system(f'rm -rf {os.path.join(oppai, "bin", "python*")}')
57
+ os.system(f'python -m venv {oppai}')
58
+ say('【{red} VENV Setup Completed{d} 】{red}')
59
+
60
+ venv_in()
61
+ os.chdir(xxx)
62
+ os.remove(assu)
Stable-diffusion/ui/zzzzzz.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:054e950e72181bb45ddbc7106d3625de406477725b5b313a91fe4522f03dbe0a
3
+ size 6865699
Stable-diffusion/venv-fusion.tar.lz4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2186577551c3e55c2c1c8420cf3cb8bab234aceb7ca5f2463181a1abf95c745
3
+ size 4731403914
Stable-diffusion/venv-sd-trainer.tar.lz4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73a61eabc0e539b54d42dbfdea652279a9744df660e1841e62a29938138ed2fb
3
+ size 5481814036