moistdio commited on
Commit
6831a54
1 Parent(s): c6ee596

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .eslintignore +5 -0
  2. .eslintrc.js +98 -0
  3. .git-blame-ignore-revs +2 -0
  4. .gitattributes +2 -0
  5. .gitignore +53 -0
  6. .ipynb_checkpoints/requirements_versions-checkpoint.txt +42 -0
  7. .ipynb_checkpoints/webui-checkpoint.bat +98 -0
  8. .ipynb_checkpoints/webui-checkpoint.py +179 -0
  9. .ipynb_checkpoints/webui-checkpoint.sh +304 -0
  10. .ipynb_checkpoints/webui-user-checkpoint.bat +18 -0
  11. .ipynb_checkpoints/webui-user-checkpoint.sh +48 -0
  12. .pylintrc +3 -0
  13. CHANGELOG.md +1085 -0
  14. CITATION.cff +7 -0
  15. CODEOWNERS +1 -0
  16. LICENSE.txt +688 -0
  17. README.md +199 -8
  18. _typos.toml +5 -0
  19. backend/README.md +1 -0
  20. backend/args.py +67 -0
  21. backend/attention.py +501 -0
  22. backend/diffusion_engine/base.py +87 -0
  23. backend/diffusion_engine/flux.py +106 -0
  24. backend/diffusion_engine/sd15.py +81 -0
  25. backend/diffusion_engine/sd20.py +81 -0
  26. backend/diffusion_engine/sdxl.py +133 -0
  27. backend/huggingface/Kwai-Kolors/Kolors/model_index.json +25 -0
  28. backend/huggingface/Kwai-Kolors/Kolors/scheduler/scheduler_config.json +22 -0
  29. backend/huggingface/Kwai-Kolors/Kolors/text_encoder/config.json +42 -0
  30. backend/huggingface/Kwai-Kolors/Kolors/text_encoder/pytorch_model.bin.index.json +207 -0
  31. backend/huggingface/Kwai-Kolors/Kolors/text_encoder/tokenizer_config.json +12 -0
  32. backend/huggingface/Kwai-Kolors/Kolors/text_encoder/vocab.txt +3 -0
  33. backend/huggingface/Kwai-Kolors/Kolors/tokenizer/tokenizer_config.json +12 -0
  34. backend/huggingface/Kwai-Kolors/Kolors/tokenizer/vocab.txt +3 -0
  35. backend/huggingface/Kwai-Kolors/Kolors/unet/config.json +72 -0
  36. backend/huggingface/Kwai-Kolors/Kolors/vae/config.json +31 -0
  37. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/model_index.json +41 -0
  38. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/scheduler/scheduler_config.json +21 -0
  39. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder/config.json +33 -0
  40. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder_2/config.json +32 -0
  41. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder_2/model.safetensors.index.json +226 -0
  42. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/special_tokens_map.json +37 -0
  43. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/tokenizer_config.json +57 -0
  44. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/vocab.txt +0 -0
  45. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer_2/special_tokens_map.json +23 -0
  46. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer_2/tokenizer_config.json +39 -0
  47. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/transformer/config.json +20 -0
  48. backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/vae/config.json +33 -0
  49. backend/huggingface/black-forest-labs/FLUX.1-dev/model_index.json +32 -0
  50. backend/huggingface/black-forest-labs/FLUX.1-dev/scheduler/scheduler_config.json +11 -0
.eslintignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ extensions
2
+ extensions-disabled
3
+ extensions-builtin/sd_forge_controlnet
4
+ repositories
5
+ venv
.eslintrc.js ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* global module */
2
+ module.exports = {
3
+ env: {
4
+ browser: true,
5
+ es2021: true,
6
+ },
7
+ extends: "eslint:recommended",
8
+ parserOptions: {
9
+ ecmaVersion: "latest",
10
+ },
11
+ rules: {
12
+ "arrow-spacing": "error",
13
+ "block-spacing": "error",
14
+ "brace-style": "error",
15
+ "comma-dangle": ["error", "only-multiline"],
16
+ "comma-spacing": "error",
17
+ "comma-style": ["error", "last"],
18
+ "curly": ["error", "multi-line", "consistent"],
19
+ "eol-last": "error",
20
+ "func-call-spacing": "error",
21
+ "function-call-argument-newline": ["error", "consistent"],
22
+ "function-paren-newline": ["error", "consistent"],
23
+ "indent": ["error", 4],
24
+ "key-spacing": "error",
25
+ "keyword-spacing": "error",
26
+ "linebreak-style": ["error", "unix"],
27
+ "no-extra-semi": "error",
28
+ "no-mixed-spaces-and-tabs": "error",
29
+ "no-multi-spaces": "error",
30
+ "no-redeclare": ["error", {builtinGlobals: false}],
31
+ "no-trailing-spaces": "error",
32
+ "no-unused-vars": "off",
33
+ "no-whitespace-before-property": "error",
34
+ "object-curly-newline": ["error", {consistent: true, multiline: true}],
35
+ "object-curly-spacing": ["error", "never"],
36
+ "operator-linebreak": ["error", "after"],
37
+ "quote-props": ["error", "consistent-as-needed"],
38
+ "semi": ["error", "always"],
39
+ "semi-spacing": "error",
40
+ "semi-style": ["error", "last"],
41
+ "space-before-blocks": "error",
42
+ "space-before-function-paren": ["error", "never"],
43
+ "space-in-parens": ["error", "never"],
44
+ "space-infix-ops": "error",
45
+ "space-unary-ops": "error",
46
+ "switch-colon-spacing": "error",
47
+ "template-curly-spacing": ["error", "never"],
48
+ "unicode-bom": "error",
49
+ },
50
+ globals: {
51
+ //script.js
52
+ gradioApp: "readonly",
53
+ executeCallbacks: "readonly",
54
+ onAfterUiUpdate: "readonly",
55
+ onOptionsChanged: "readonly",
56
+ onUiLoaded: "readonly",
57
+ onUiUpdate: "readonly",
58
+ uiCurrentTab: "writable",
59
+ uiElementInSight: "readonly",
60
+ uiElementIsVisible: "readonly",
61
+ //ui.js
62
+ opts: "writable",
63
+ all_gallery_buttons: "readonly",
64
+ selected_gallery_button: "readonly",
65
+ selected_gallery_index: "readonly",
66
+ switch_to_txt2img: "readonly",
67
+ switch_to_img2img_tab: "readonly",
68
+ switch_to_img2img: "readonly",
69
+ switch_to_sketch: "readonly",
70
+ switch_to_inpaint: "readonly",
71
+ switch_to_inpaint_sketch: "readonly",
72
+ switch_to_extras: "readonly",
73
+ get_tab_index: "readonly",
74
+ create_submit_args: "readonly",
75
+ restart_reload: "readonly",
76
+ updateInput: "readonly",
77
+ onEdit: "readonly",
78
+ //extraNetworks.js
79
+ requestGet: "readonly",
80
+ popup: "readonly",
81
+ // profilerVisualization.js
82
+ createVisualizationTable: "readonly",
83
+ // from python
84
+ localization: "readonly",
85
+ // progrssbar.js
86
+ randomId: "readonly",
87
+ requestProgress: "readonly",
88
+ // imageviewer.js
89
+ modalPrevImage: "readonly",
90
+ modalNextImage: "readonly",
91
+ // localStorage.js
92
+ localSet: "readonly",
93
+ localGet: "readonly",
94
+ localRemove: "readonly",
95
+ // resizeHandle.js
96
+ setupResizeHandle: "writable"
97
+ }
98
+ };
.git-blame-ignore-revs ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Apply ESlint
2
+ 9c54b78d9dde5601e916f308d9a9d6953ec39430
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ backend/huggingface/Kwai-Kolors/Kolors/text_encoder/vocab.txt filter=lfs diff=lfs merge=lfs -text
37
+ backend/huggingface/Kwai-Kolors/Kolors/tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_space_mirror/
2
+ random_test.py
3
+ __pycache__
4
+ *.ckpt
5
+ *.safetensors
6
+ *.pth
7
+ *.dev.js
8
+ .DS_Store
9
+ /output/
10
+ /outputs/
11
+ /ESRGAN/*
12
+ /SwinIR/*
13
+ /repositories
14
+ /venv
15
+ /tmp
16
+ /output
17
+ /model.ckpt
18
+ /models/**/*
19
+ /GFPGANv1.3.pth
20
+ /gfpgan/weights/*.pth
21
+ /ui-config.json
22
+ /outputs
23
+ /config.json
24
+ /log
25
+ /webui.settings.bat
26
+ /embeddings
27
+ /styles.csv
28
+ /params.txt
29
+ /styles.csv.bak
30
+ /webui-user.bat
31
+ /webui-user.sh
32
+ /interrogate
33
+ /user.css
34
+ /.idea
35
+ notification.mp3
36
+ /SwinIR
37
+ /textual_inversion
38
+ .vscode
39
+ /extensions
40
+ /test/stdout.txt
41
+ /test/stderr.txt
42
+ /cache.json*
43
+ /config_states/
44
+ /node_modules
45
+ /package-lock.json
46
+ /.coverage*
47
+ /test/test_outputs
48
+ /cache
49
+ trace.json
50
+ /sysinfo-????-??-??-??-??.json
51
+ /test/results.xml
52
+ coverage.xml
53
+ **/tests/**/expectations
.ipynb_checkpoints/requirements_versions-checkpoint.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ setuptools==69.5.1 # temp fix for compatibility with some old packages
2
+ GitPython==3.1.32
3
+ Pillow==9.5.0
4
+ accelerate==0.21.0
5
+ blendmodes==2022
6
+ clean-fid==0.1.35
7
+ diskcache==5.6.3
8
+ einops==0.4.1
9
+ facexlib==0.3.0
10
+ fastapi==0.104.1
11
+ gradio==4.40.0
12
+ httpcore==0.15
13
+ inflection==0.5.1
14
+ jsonmerge==1.8.0
15
+ kornia==0.6.7
16
+ lark==1.1.2
17
+ numpy==1.26.2
18
+ omegaconf==2.2.3
19
+ open-clip-torch==2.20.0
20
+ piexif==1.1.3
21
+ protobuf==3.20.0
22
+ psutil==5.9.5
23
+ pytorch_lightning==1.9.4
24
+ resize-right==0.0.2
25
+ safetensors==0.4.2
26
+ scikit-image==0.21.0
27
+ spandrel==0.3.4
28
+ spandrel-extra-arches==0.1.1
29
+ tomesd==0.1.3
30
+ torch
31
+ torchdiffeq==0.2.3
32
+ torchsde==0.2.6
33
+ transformers==4.44.0
34
+ httpx==0.24.1
35
+ pillow-avif-plugin==1.4.3
36
+ diffusers==0.29.2
37
+ gradio_rangeslider==0.0.6
38
+ gradio_imageslider==0.0.20
39
+ loadimg==0.1.2
40
+ tqdm==4.66.1
41
+ peft==0.12.0
42
+ pydantic==2.8.2
.ipynb_checkpoints/webui-checkpoint.bat ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ if exist webui.settings.bat (
4
+ call webui.settings.bat
5
+ )
6
+
7
+ if not defined PYTHON (set PYTHON=python)
8
+ if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%")
9
+ if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")
10
+
11
+ set SD_WEBUI_RESTART=tmp/restart
12
+ set ERROR_REPORTING=FALSE
13
+
14
+ mkdir tmp 2>NUL
15
+
16
+ %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt
17
+ if %ERRORLEVEL% == 0 goto :check_pip
18
+ echo Couldn't launch python
19
+ goto :show_stdout_stderr
20
+
21
+ :check_pip
22
+ %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt
23
+ if %ERRORLEVEL% == 0 goto :start_venv
24
+ if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr
25
+ %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt
26
+ if %ERRORLEVEL% == 0 goto :start_venv
27
+ echo Couldn't install pip
28
+ goto :show_stdout_stderr
29
+
30
+ :start_venv
31
+ if ["%VENV_DIR%"] == ["-"] goto :skip_venv
32
+ if ["%SKIP_VENV%"] == ["1"] goto :skip_venv
33
+
34
+ dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt
35
+ if %ERRORLEVEL% == 0 goto :activate_venv
36
+
37
+ for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
38
+ echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
39
+ %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
40
+ if %ERRORLEVEL% == 0 goto :upgrade_pip
41
+ echo Unable to create venv in directory "%VENV_DIR%"
42
+ goto :show_stdout_stderr
43
+
44
+ :upgrade_pip
45
+ "%VENV_DIR%\Scripts\Python.exe" -m pip install --upgrade pip
46
+ if %ERRORLEVEL% == 0 goto :activate_venv
47
+ echo Warning: Failed to upgrade PIP version
48
+
49
+ :activate_venv
50
+ set PYTHON="%VENV_DIR%\Scripts\Python.exe"
51
+ call "%VENV_DIR%\Scripts\activate.bat"
52
+ echo venv %PYTHON%
53
+
54
+ :skip_venv
55
+ if [%ACCELERATE%] == ["True"] goto :accelerate
56
+ goto :launch
57
+
58
+ :accelerate
59
+ echo Checking for accelerate
60
+ set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe"
61
+ if EXIST %ACCELERATE% goto :accelerate_launch
62
+
63
+ :launch
64
+ %PYTHON% launch.py %*
65
+ if EXIST tmp/restart goto :skip_venv
66
+ pause
67
+ exit /b
68
+
69
+ :accelerate_launch
70
+ echo Accelerating
71
+ %ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py
72
+ if EXIST tmp/restart goto :skip_venv
73
+ pause
74
+ exit /b
75
+
76
+ :show_stdout_stderr
77
+
78
+ echo.
79
+ echo exit code: %errorlevel%
80
+
81
+ for /f %%i in ("tmp\stdout.txt") do set size=%%~zi
82
+ if %size% equ 0 goto :show_stderr
83
+ echo.
84
+ echo stdout:
85
+ type tmp\stdout.txt
86
+
87
+ :show_stderr
88
+ for /f %%i in ("tmp\stderr.txt") do set size=%%~zi
89
+ if %size% equ 0 goto :show_stderr
90
+ echo.
91
+ echo stderr:
92
+ type tmp\stderr.txt
93
+
94
+ :endofscript
95
+
96
+ echo.
97
+ echo Launch unsuccessful. Exiting.
98
+ pause
.ipynb_checkpoints/webui-checkpoint.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import time
5
+
6
+ from modules import timer
7
+ from modules import initialize_util
8
+ from modules import initialize
9
+ from threading import Thread
10
+ from modules_forge.initialization import initialize_forge
11
+ from modules_forge import main_thread
12
+
13
+
14
+ startup_timer = timer.startup_timer
15
+ startup_timer.record("launcher")
16
+
17
+ initialize_forge()
18
+
19
+ initialize.imports()
20
+
21
+ initialize.check_versions()
22
+
23
+ initialize.initialize()
24
+
25
+
26
+ def create_api(app):
27
+ from modules.api.api import Api
28
+ from modules.call_queue import queue_lock
29
+
30
+ api = Api(app, queue_lock)
31
+ return api
32
+
33
+
34
+ def api_only_worker():
35
+ from fastapi import FastAPI
36
+ from modules.shared_cmd_options import cmd_opts
37
+
38
+ app = FastAPI()
39
+ initialize_util.setup_middleware(app)
40
+ api = create_api(app)
41
+
42
+ from modules import script_callbacks
43
+ script_callbacks.before_ui_callback()
44
+ script_callbacks.app_started_callback(None, app)
45
+
46
+ print(f"Startup time: {startup_timer.summary()}.")
47
+ api.launch(
48
+ server_name=initialize_util.gradio_server_name(),
49
+ port=cmd_opts.port if cmd_opts.port else 7861,
50
+ root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else ""
51
+ )
52
+
53
+
54
+ def webui_worker():
55
+ from modules.shared_cmd_options import cmd_opts
56
+
57
+ launch_api = cmd_opts.api
58
+
59
+ from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
60
+
61
+ while 1:
62
+ if shared.opts.clean_temp_dir_at_start:
63
+ ui_tempdir.cleanup_tmpdr()
64
+ startup_timer.record("cleanup temp dir")
65
+
66
+ script_callbacks.before_ui_callback()
67
+ startup_timer.record("scripts before_ui_callback")
68
+
69
+ shared.demo = ui.create_ui()
70
+ startup_timer.record("create ui")
71
+
72
+ if not cmd_opts.no_gradio_queue:
73
+ shared.demo.queue(64)
74
+
75
+ gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None
76
+
77
+ auto_launch_browser = False
78
+ if os.getenv('SD_WEBUI_RESTARTING') != '1':
79
+ if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch:
80
+ auto_launch_browser = True
81
+ elif shared.opts.auto_launch_browser == "Local":
82
+ auto_launch_browser = not cmd_opts.webui_is_non_local
83
+
84
+ from modules_forge.forge_canvas.canvas import canvas_js_root_path
85
+
86
+ app, local_url, share_url = shared.demo.launch(
87
+ share=cmd_opts.share,
88
+ server_name=initialize_util.gradio_server_name(),
89
+ server_port=cmd_opts.port,
90
+ ssl_keyfile=cmd_opts.tls_keyfile,
91
+ ssl_certfile=cmd_opts.tls_certfile,
92
+ ssl_verify=cmd_opts.disable_tls_verify,
93
+ debug=cmd_opts.gradio_debug,
94
+ auth=gradio_auth_creds,
95
+ inbrowser=auto_launch_browser,
96
+ prevent_thread_lock=True,
97
+ allowed_paths=cmd_opts.gradio_allowed_path + [canvas_js_root_path],
98
+ app_kwargs={
99
+ "docs_url": "/docs",
100
+ "redoc_url": "/redoc",
101
+ },
102
+ root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "",
103
+ )
104
+
105
+ startup_timer.record("gradio launch")
106
+
107
+ # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
108
+ # an attacker to trick the user into opening a malicious HTML page, which makes a request to the
109
+ # running web ui and do whatever the attacker wants, including installing an extension and
110
+ # running its code. We disable this here. Suggested by RyotaK.
111
+ app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
112
+
113
+ initialize_util.setup_middleware(app)
114
+
115
+ progress.setup_progress_api(app)
116
+ ui.setup_ui_api(app)
117
+
118
+ if launch_api:
119
+ create_api(app)
120
+
121
+ ui_extra_networks.add_pages_to_demo(app)
122
+
123
+ startup_timer.record("add APIs")
124
+
125
+ with startup_timer.subcategory("app_started_callback"):
126
+ script_callbacks.app_started_callback(shared.demo, app)
127
+
128
+ timer.startup_record = startup_timer.dump()
129
+ print(f"Startup time: {startup_timer.summary()}.")
130
+
131
+ try:
132
+ while True:
133
+ server_command = shared.state.wait_for_server_command(timeout=5)
134
+ if server_command:
135
+ if server_command in ("stop", "restart"):
136
+ break
137
+ else:
138
+ print(f"Unknown server command: {server_command}")
139
+ except KeyboardInterrupt:
140
+ print('Caught KeyboardInterrupt, stopping...')
141
+ server_command = "stop"
142
+
143
+ if server_command == "stop":
144
+ print("Stopping server...")
145
+ # If we catch a keyboard interrupt, we want to stop the server and exit.
146
+ shared.demo.close()
147
+ break
148
+
149
+ # disable auto launch webui in browser for subsequent UI Reload
150
+ os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
151
+
152
+ print('Restarting UI...')
153
+ shared.demo.close()
154
+ time.sleep(0.5)
155
+ startup_timer.reset()
156
+ script_callbacks.app_reload_callback()
157
+ startup_timer.record("app reload callback")
158
+ script_callbacks.script_unloaded_callback()
159
+ startup_timer.record("scripts unloaded callback")
160
+ initialize.initialize_rest(reload_script_modules=True)
161
+
162
+
163
+ def api_only():
164
+ Thread(target=api_only_worker, daemon=True).start()
165
+
166
+
167
+ def webui():
168
+ Thread(target=webui_worker, daemon=True).start()
169
+
170
+
171
+ if __name__ == "__main__":
172
+ from modules.shared_cmd_options import cmd_opts
173
+
174
+ if cmd_opts.nowebui:
175
+ api_only()
176
+ else:
177
+ webui()
178
+
179
+ main_thread.loop()
.ipynb_checkpoints/webui-checkpoint.sh ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #################################################
3
+ # Please do not make any changes to this file, #
4
+ # change the variables in webui-user.sh instead #
5
+ #################################################
6
+
7
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
8
+
9
+
10
+ # If run from macOS, load defaults from webui-macos-env.sh
11
+ if [[ "$OSTYPE" == "darwin"* ]]; then
12
+ if [[ -f "$SCRIPT_DIR"/webui-macos-env.sh ]]
13
+ then
14
+ source "$SCRIPT_DIR"/webui-macos-env.sh
15
+ fi
16
+ fi
17
+
18
+ # Read variables from webui-user.sh
19
+ # shellcheck source=/dev/null
20
+ if [[ -f "$SCRIPT_DIR"/webui-user.sh ]]
21
+ then
22
+ source "$SCRIPT_DIR"/webui-user.sh
23
+ fi
24
+
25
+ # If $venv_dir is "-", then disable venv support
26
+ use_venv=1
27
+ if [[ $venv_dir == "-" ]]; then
28
+ use_venv=0
29
+ fi
30
+
31
+ # Set defaults
32
+ # Install directory without trailing slash
33
+ if [[ -z "${install_dir}" ]]
34
+ then
35
+ install_dir="$SCRIPT_DIR"
36
+ fi
37
+
38
+ # Name of the subdirectory (defaults to stable-diffusion-webui)
39
+ if [[ -z "${clone_dir}" ]]
40
+ then
41
+ clone_dir="stable-diffusion-webui"
42
+ fi
43
+
44
+ # python3 executable
45
+ if [[ -z "${python_cmd}" ]]
46
+ then
47
+ python_cmd="python3.10"
48
+ fi
49
+ if [[ ! -x "$(command -v "${python_cmd}")" ]]
50
+ then
51
+ python_cmd="python3"
52
+ fi
53
+
54
+ # git executable
55
+ if [[ -z "${GIT}" ]]
56
+ then
57
+ export GIT="git"
58
+ else
59
+ export GIT_PYTHON_GIT_EXECUTABLE="${GIT}"
60
+ fi
61
+
62
+ # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
63
+ if [[ -z "${venv_dir}" ]] && [[ $use_venv -eq 1 ]]
64
+ then
65
+ venv_dir="venv"
66
+ fi
67
+
68
+ if [[ -z "${LAUNCH_SCRIPT}" ]]
69
+ then
70
+ LAUNCH_SCRIPT="launch.py"
71
+ fi
72
+
73
+ # this script cannot be run as root by default
74
+ can_run_as_root=0
75
+
76
+ # read any command line flags to the webui.sh script
77
+ while getopts "f" flag > /dev/null 2>&1
78
+ do
79
+ case ${flag} in
80
+ f) can_run_as_root=1;;
81
+ *) break;;
82
+ esac
83
+ done
84
+
85
+ # Disable sentry logging
86
+ export ERROR_REPORTING=FALSE
87
+
88
+ # Do not reinstall existing pip packages on Debian/Ubuntu
89
+ export PIP_IGNORE_INSTALLED=0
90
+
91
+ # Pretty print
92
+ delimiter="################################################################"
93
+
94
+ printf "\n%s\n" "${delimiter}"
95
+ printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n"
96
+ printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m"
97
+ printf "\n%s\n" "${delimiter}"
98
+
99
+ # Do not run as root
100
+ if [[ $(id -u) -eq 0 && can_run_as_root -eq 0 ]]
101
+ then
102
+ printf "\n%s\n" "${delimiter}"
103
+ printf "\e[1m\e[31mERROR: This script must not be launched as root, aborting...\e[0m"
104
+ printf "\n%s\n" "${delimiter}"
105
+ exit 1
106
+ else
107
+ printf "\n%s\n" "${delimiter}"
108
+ printf "Running on \e[1m\e[32m%s\e[0m user" "$(whoami)"
109
+ printf "\n%s\n" "${delimiter}"
110
+ fi
111
+
112
+ if [[ $(getconf LONG_BIT) = 32 ]]
113
+ then
114
+ printf "\n%s\n" "${delimiter}"
115
+ printf "\e[1m\e[31mERROR: Unsupported Running on a 32bit OS\e[0m"
116
+ printf "\n%s\n" "${delimiter}"
117
+ exit 1
118
+ fi
119
+
120
+ if [[ -d "$SCRIPT_DIR/.git" ]]
121
+ then
122
+ printf "\n%s\n" "${delimiter}"
123
+ printf "Repo already cloned, using it as install directory"
124
+ printf "\n%s\n" "${delimiter}"
125
+ install_dir="${SCRIPT_DIR}/../"
126
+ clone_dir="${SCRIPT_DIR##*/}"
127
+ fi
128
+
129
+ # Check prerequisites
130
+ gpu_info=$(lspci 2>/dev/null | grep -E "VGA|Display")
131
+ case "$gpu_info" in
132
+ *"Navi 1"*)
133
+ export HSA_OVERRIDE_GFX_VERSION=10.3.0
134
+ if [[ -z "${TORCH_COMMAND}" ]]
135
+ then
136
+ pyv="$(${python_cmd} -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]:02d}")')"
137
+ # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711
138
+ if [[ $pyv == "3.8" ]]
139
+ then
140
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl"
141
+ elif [[ $pyv == "3.9" ]]
142
+ then
143
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl"
144
+ elif [[ $pyv == "3.10" ]]
145
+ then
146
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl"
147
+ else
148
+ printf "\e[1m\e[31mERROR: RX 5000 series GPUs python version must be between 3.8 and 3.10, aborting...\e[0m"
149
+ exit 1
150
+ fi
151
+ fi
152
+ ;;
153
+ *"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
154
+ ;;
155
+ *"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
156
+ export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
157
+ ;;
158
+ *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
159
+ printf "\n%s\n" "${delimiter}"
160
+ printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half"
161
+ printf "\n%s\n" "${delimiter}"
162
+ ;;
163
+ *)
164
+ ;;
165
+ esac
166
+ if ! echo "$gpu_info" | grep -q "NVIDIA";
167
+ then
168
+ if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
169
+ then
170
+ export TORCH_COMMAND="pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.7"
171
+ elif npu-smi info 2>/dev/null
172
+ then
173
+ export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu==2.1.0"
174
+ fi
175
+ fi
176
+
177
+ for preq in "${GIT}" "${python_cmd}"
178
+ do
179
+ if ! hash "${preq}" &>/dev/null
180
+ then
181
+ printf "\n%s\n" "${delimiter}"
182
+ printf "\e[1m\e[31mERROR: %s is not installed, aborting...\e[0m" "${preq}"
183
+ printf "\n%s\n" "${delimiter}"
184
+ exit 1
185
+ fi
186
+ done
187
+
188
+ if [[ $use_venv -eq 1 ]] && ! "${python_cmd}" -c "import venv" &>/dev/null
189
+ then
190
+ printf "\n%s\n" "${delimiter}"
191
+ printf "\e[1m\e[31mERROR: python3-venv is not installed, aborting...\e[0m"
192
+ printf "\n%s\n" "${delimiter}"
193
+ exit 1
194
+ fi
195
+
196
+ cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; }
197
+ if [[ -d "${clone_dir}" ]]
198
+ then
199
+ cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
200
+ else
201
+ printf "\n%s\n" "${delimiter}"
202
+ printf "Clone stable-diffusion-webui"
203
+ printf "\n%s\n" "${delimiter}"
204
+ "${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}"
205
+ cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
206
+ fi
207
+
208
+ if [[ $use_venv -eq 1 ]] && [[ -z "${VIRTUAL_ENV}" ]];
209
+ then
210
+ printf "\n%s\n" "${delimiter}"
211
+ printf "Create and activate python venv"
212
+ printf "\n%s\n" "${delimiter}"
213
+ cd "${install_dir}"/"${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
214
+ if [[ ! -d "${venv_dir}" ]]
215
+ then
216
+ "${python_cmd}" -m venv "${venv_dir}"
217
+ "${venv_dir}"/bin/python -m pip install --upgrade pip
218
+ first_launch=1
219
+ fi
220
+ # shellcheck source=/dev/null
221
+ if [[ -f "${venv_dir}"/bin/activate ]]
222
+ then
223
+ source "${venv_dir}"/bin/activate
224
+ # ensure use of python from venv
225
+ python_cmd="${venv_dir}"/bin/python
226
+ else
227
+ printf "\n%s\n" "${delimiter}"
228
+ printf "\e[1m\e[31mERROR: Cannot activate python venv, aborting...\e[0m"
229
+ printf "\n%s\n" "${delimiter}"
230
+ exit 1
231
+ fi
232
+ else
233
+ printf "\n%s\n" "${delimiter}"
234
+ printf "python venv already activate or run without venv: ${VIRTUAL_ENV}"
235
+ printf "\n%s\n" "${delimiter}"
236
+ fi
237
+
238
+ # Try using TCMalloc on Linux
239
+ prepare_tcmalloc() {
240
+ if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
241
+ # check glibc version
242
+ LIBC_VER=$(echo $(ldd --version | awk 'NR==1 {print $NF}') | grep -oP '\d+\.\d+')
243
+ echo "glibc version is $LIBC_VER"
244
+ libc_vernum=$(expr $LIBC_VER)
245
+ # Since 2.34 libpthread is integrated into libc.so
246
+ libc_v234=2.34
247
+ # Define Tcmalloc Libs arrays
248
+ TCMALLOC_LIBS=("libtcmalloc(_minimal|)\.so\.\d" "libtcmalloc\.so\.\d")
249
+ # Traversal array
250
+ for lib in "${TCMALLOC_LIBS[@]}"
251
+ do
252
+ # Determine which type of tcmalloc library the library supports
253
+ TCMALLOC="$(PATH=/sbin:/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)"
254
+ TC_INFO=(${TCMALLOC//=>/})
255
+ if [[ ! -z "${TC_INFO}" ]]; then
256
+ echo "Check TCMalloc: ${TC_INFO}"
257
+ # Determine if the library is linked to libpthread and resolve undefined symbol: pthread_key_create
258
+ if [ $(echo "$libc_vernum < $libc_v234" | bc) -eq 1 ]; then
259
+ # glibc < 2.34 pthread_key_create into libpthread.so. check linking libpthread.so...
260
+ if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then
261
+ echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO[2]}"
262
+ # set fullpath LD_PRELOAD (To be on the safe side)
263
+ export LD_PRELOAD="${TC_INFO[2]}"
264
+ break
265
+ else
266
+ echo "$TC_INFO is not linked with libpthread will trigger undefined symbol: pthread_Key_create error"
267
+ fi
268
+ else
269
+ # Version 2.34 of libc.so (glibc) includes the pthread library IN GLIBC. (USE ubuntu 22.04 and modern linux system and WSL)
270
+ # libc.so(glibc) is linked with a library that works in ALMOST ALL Linux userlands. SO NO CHECK!
271
+ echo "$TC_INFO is linked with libc.so,execute LD_PRELOAD=${TC_INFO[2]}"
272
+ # set fullpath LD_PRELOAD (To be on the safe side)
273
+ export LD_PRELOAD="${TC_INFO[2]}"
274
+ break
275
+ fi
276
+ fi
277
+ done
278
+ if [[ -z "${LD_PRELOAD}" ]]; then
279
+ printf "\e[1m\e[31mCannot locate TCMalloc. Do you have tcmalloc or google-perftool installed on your system? (improves CPU memory usage)\e[0m\n"
280
+ fi
281
+ fi
282
+ }
283
+
284
+ KEEP_GOING=1
285
+ export SD_WEBUI_RESTART=tmp/restart
286
+ while [[ "$KEEP_GOING" -eq "1" ]]; do
287
+ if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ]; then
288
+ printf "\n%s\n" "${delimiter}"
289
+ printf "Accelerating launch.py..."
290
+ printf "\n%s\n" "${delimiter}"
291
+ prepare_tcmalloc
292
+ accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
293
+ else
294
+ printf "\n%s\n" "${delimiter}"
295
+ printf "Launching launch.py..."
296
+ printf "\n%s\n" "${delimiter}"
297
+ prepare_tcmalloc
298
+ "${python_cmd}" -u "${LAUNCH_SCRIPT}" "$@"
299
+ fi
300
+
301
+ if [[ ! -f tmp/restart ]]; then
302
+ KEEP_GOING=0
303
+ fi
304
+ done
.ipynb_checkpoints/webui-user-checkpoint.bat ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ set PYTHON=
4
+ set GIT=
5
+ set VENV_DIR=
6
+ set COMMANDLINE_ARGS=
7
+
8
+ @REM Uncomment following code to reference an existing A1111 checkout.
9
+ @REM set A1111_HOME=Your A1111 checkout dir
10
+ @REM
11
+ @REM set VENV_DIR=%A1111_HOME%/venv
12
+ @REM set COMMANDLINE_ARGS=%COMMANDLINE_ARGS% ^
13
+ @REM --ckpt-dir %A1111_HOME%/models/Stable-diffusion ^
14
+ @REM --hypernetwork-dir %A1111_HOME%/models/hypernetworks ^
15
+ @REM --embeddings-dir %A1111_HOME%/embeddings ^
16
+ @REM --lora-dir %A1111_HOME%/models/Lora
17
+
18
+ call webui.bat
.ipynb_checkpoints/webui-user-checkpoint.sh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #########################################################
3
+ # Uncomment and change the variables below to your need:#
4
+ #########################################################
5
+
6
+ # Install directory without trailing slash
7
+ #install_dir="/home/$(whoami)"
8
+
9
+ # Name of the subdirectory
10
+ #clone_dir="stable-diffusion-webui"
11
+
12
+ # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
13
+ #export COMMANDLINE_ARGS=""
14
+
15
+ # python3 executable
16
+ #python_cmd="python3"
17
+
18
+ # git executable
19
+ #export GIT="git"
20
+
21
+ # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
22
+ #venv_dir="venv"
23
+
24
+ # script to launch to start the app
25
+ #export LAUNCH_SCRIPT="launch.py"
26
+
27
+ # install command for torch
28
+ #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113"
29
+
30
+ # Requirements file to use for stable-diffusion-webui
31
+ #export REQS_FILE="requirements_versions.txt"
32
+
33
+ # Fixed git repos
34
+ #export K_DIFFUSION_PACKAGE=""
35
+ #export GFPGAN_PACKAGE=""
36
+
37
+ # Fixed git commits
38
+ #export STABLE_DIFFUSION_COMMIT_HASH=""
39
+ #export CODEFORMER_COMMIT_HASH=""
40
+ #export BLIP_COMMIT_HASH=""
41
+
42
+ # Uncomment to enable accelerated launch
43
+ #export ACCELERATE="True"
44
+
45
+ # Uncomment to disable TCMalloc
46
+ #export NO_TCMALLOC="True"
47
+
48
+ ###########################################
.pylintrc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
2
+ [MESSAGES CONTROL]
3
+ disable=C,R,W,E,I
CHANGELOG.md ADDED
@@ -0,0 +1,1085 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## 1.10.1
2
+
3
+ ### Bug Fixes:
4
+ * fix image upscale on cpu ([#16275](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16275))
5
+
6
+
7
+ ## 1.10.0
8
+
9
+ ### Features:
10
+ * A lot of performance improvements (see below in Performance section)
11
+ * Stable Diffusion 3 support ([#16030](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16030), [#16164](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16164), [#16212](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16212))
12
+ * Recommended Euler sampler; DDIM and other timestamp samplers currently not supported
13
+ * T5 text model is disabled by default, enable it in settings
14
+ * New schedulers:
15
+ * Align Your Steps ([#15751](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15751))
16
+ * KL Optimal ([#15608](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15608))
17
+ * Normal ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
18
+ * DDIM ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
19
+ * Simple ([#16142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16142))
20
+ * Beta ([#16235](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16235))
21
+ * New sampler: DDIM CFG++ ([#16035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16035))
22
+
23
+ ### Minor:
24
+ * Option to skip CFG on early steps ([#15607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15607))
25
+ * Add --models-dir option ([#15742](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15742))
26
+ * Allow mobile users to open context menu by using two fingers press ([#15682](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15682))
27
+ * Infotext: add Lora name as TI hashes for bundled Textual Inversion ([#15679](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15679))
28
+ * Check model's hash after downloading it to prevent corruped downloads ([#15602](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15602))
29
+ * More extension tag filtering options ([#15627](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15627))
30
+ * When saving AVIF, use JPEG's quality setting ([#15610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15610))
31
+ * Add filename pattern: `[basename]` ([#15978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15978))
32
+ * Add option to enable clip skip for clip L on SDXL ([#15992](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15992))
33
+ * Option to prevent screen sleep during generation ([#16001](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16001))
34
+ * ToggleLivePriview button in image viewer ([#16065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16065))
35
+ * Remove ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
36
+ * option to disable save button log.csv ([#16242](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16242))
37
+
38
+ ### Extensions and API:
39
+ * Add process_before_every_sampling hook ([#15984](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15984))
40
+ * Return HTTP 400 instead of 404 on invalid sampler error ([#16140](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16140))
41
+
42
+ ### Performance:
43
+ * [Performance 1/6] use_checkpoint = False ([#15803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15803))
44
+ * [Performance 2/6] Replace einops.rearrange with torch native ops ([#15804](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15804))
45
+ * [Performance 4/6] Precompute is_sdxl_inpaint flag ([#15806](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15806))
46
+ * [Performance 5/6] Prevent unnecessary extra networks bias backup ([#15816](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15816))
47
+ * [Performance 6/6] Add --precision half option to avoid casting during inference ([#15820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15820))
48
+ * [Performance] LDM optimization patches ([#15824](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15824))
49
+ * [Performance] Keep sigmas on CPU ([#15823](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15823))
50
+ * Check for nans in unet only once, after all steps have been completed
51
+ * Added pption to run torch profiler for image generation
52
+
53
+ ### Bug Fixes:
54
+ * Fix for grids without comprehensive infotexts ([#15958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15958))
55
+ * feat: lora partial update precede full update ([#15943](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15943))
56
+ * Fix bug where file extension had an extra '.' under some circumstances ([#15893](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15893))
57
+ * Fix corrupt model initial load loop ([#15600](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15600))
58
+ * Allow old sampler names in API ([#15656](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15656))
59
+ * more old sampler scheduler compatibility ([#15681](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15681))
60
+ * Fix Hypertile xyz ([#15831](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15831))
61
+ * XYZ CSV skipinitialspace ([#15832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15832))
62
+ * fix soft inpainting on mps and xpu, torch_utils.float64 ([#15815](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15815))
63
+ * fix extention update when not on main branch ([#15797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15797))
64
+ * update pickle safe filenames
65
+ * use relative path for webui-assets css ([#15757](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15757))
66
+ * When creating a virtual environment, upgrade pip in webui.bat/webui.sh ([#15750](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15750))
67
+ * Fix AttributeError ([#15738](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15738))
68
+ * use script_path for webui root in launch_utils ([#15705](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15705))
69
+ * fix extra batch mode P Transparency ([#15664](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15664))
70
+ * use gradio theme colors in css ([#15680](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15680))
71
+ * Fix dragging text within prompt input ([#15657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15657))
72
+ * Add correct mimetype for .mjs files ([#15654](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15654))
73
+ * QOL Items - handle metadata issues more cleanly for SD models, Loras and embeddings ([#15632](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15632))
74
+ * replace wsl-open with wslpath and explorer.exe ([#15968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15968))
75
+ * Fix SDXL Inpaint ([#15976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15976))
76
+ * multi size grid ([#15988](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15988))
77
+ * fix Replace preview ([#16118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16118))
78
+ * Possible fix of wrong scale in weight decomposition ([#16151](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16151))
79
+ * Ensure use of python from venv on Mac and Linux ([#16116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16116))
80
+ * Prioritize python3.10 over python3 if both are available on Linux and Mac (with fallback) ([#16092](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16092))
81
+ * stoping generation extras ([#16085](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16085))
82
+ * Fix SD2 loading ([#16078](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16078), [#16079](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16079))
83
+ * fix infotext Lora hashes for hires fix different lora ([#16062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16062))
84
+ * Fix sampler scheduler autocorrection warning ([#16054](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16054))
85
+ * fix ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
86
+ * fix upscale logic ([#16239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16239))
87
+ * [bug] do not break progressbar on non-job actions (add wrap_gradio_call_no_job) ([#16202](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16202))
88
+ * fix OSError: cannot write mode P as JPEG ([#16194](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16194))
89
+
90
+ ### Other:
91
+ * fix changelog #15883 -> #15882 ([#15907](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15907))
92
+ * ReloadUI backgroundColor --background-fill-primary ([#15864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15864))
93
+ * Use different torch versions for Intel and ARM Macs ([#15851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15851))
94
+ * XYZ override rework ([#15836](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15836))
95
+ * scroll extensions table on overflow ([#15830](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15830))
96
+ * img2img batch upload method ([#15817](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15817))
97
+ * chore: sync v1.8.0 packages according to changelog ([#15783](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15783))
98
+ * Add AVIF MIME type support to mimetype definitions ([#15739](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15739))
99
+ * Update imageviewer.js ([#15730](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15730))
100
+ * no-referrer ([#15641](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15641))
101
+ * .gitignore trace.json ([#15980](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15980))
102
+ * Bump spandrel to 0.3.4 ([#16144](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16144))
103
+ * Defunct --max-batch-count ([#16119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16119))
104
+ * docs: update bug_report.yml ([#16102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16102))
105
+ * Maintaining Project Compatibility for Python 3.9 Users Without Upgrade Requirements. ([#16088](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16088), [#16169](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16169), [#16192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16192))
106
+ * Update torch for ARM Macs to 2.3.1 ([#16059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16059))
107
+ * remove deprecated setting dont_fix_second_order_samplers_schedule ([#16061](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16061))
108
+ * chore: fix typos ([#16060](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16060))
109
+ * shlex.join launch args in console log ([#16170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16170))
110
+ * activate venv .bat ([#16231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16231))
111
+ * add ids to the resize tabs in img2img ([#16218](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16218))
112
+ * update installation guide linux ([#16178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16178))
113
+ * Robust sysinfo ([#16173](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16173))
114
+ * do not send image size on paste inpaint ([#16180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16180))
115
+ * Fix noisy DS_Store files for MacOS ([#16166](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16166))
116
+
117
+
118
+ ## 1.9.4
119
+
120
+ ### Bug Fixes:
121
+ * pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
122
+
123
+ ## 1.9.3
124
+
125
+ ### Bug Fixes:
126
+ * fix get_crop_region_v2 ([#15594](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15594))
127
+
128
+ ## 1.9.2
129
+
130
+ ### Extensions and API:
131
+ * restore 1.8.0-style naming of scripts
132
+
133
+ ## 1.9.1
134
+
135
+ ### Minor:
136
+ * Add avif support ([#15582](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15582))
137
+ * Add filename patterns: `[sampler_scheduler]` and `[scheduler]` ([#15581](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15581))
138
+
139
+ ### Extensions and API:
140
+ * undo adding scripts to sys.modules
141
+ * Add schedulers API endpoint ([#15577](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15577))
142
+ * Remove API upscaling factor limits ([#15560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15560))
143
+
144
+ ### Bug Fixes:
145
+ * Fix images do not match / Coordinate 'right' is less than 'left' ([#15534](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15534))
146
+ * fix: remove_callbacks_for_function should also remove from the ordered map ([#15533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15533))
147
+ * fix x1 upscalers ([#15555](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15555))
148
+ * Fix cls.__module__ value in extension script ([#15532](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15532))
149
+ * fix typo in function call (eror -> error) ([#15531](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15531))
150
+
151
+ ### Other:
152
+ * Hide 'No Image data blocks found.' message ([#15567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15567))
153
+ * Allow webui.sh to be runnable from arbitrary directories containing a .git file ([#15561](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15561))
154
+ * Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ ([#15544](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15544))
155
+ * numpy DeprecationWarning product -> prod ([#15547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15547))
156
+ * get_crop_region_v2 ([#15583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15583), [#15587](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15587))
157
+
158
+
159
+ ## 1.9.0
160
+
161
+ ### Features:
162
+ * Make refiner switchover based on model timesteps instead of sampling steps ([#14978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14978))
163
+ * add an option to have old-style directory view instead of tree view; stylistic changes for extra network sorting/search controls
164
+ * add UI for reordering callbacks, support for specifying callback order in extension metadata ([#15205](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205))
165
+ * Sgm uniform scheduler for SDXL-Lightning models ([#15325](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15325))
166
+ * Scheduler selection in main UI ([#15333](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15333), [#15361](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15361), [#15394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15394))
167
+
168
+ ### Minor:
169
+ * "open images directory" button now opens the actual dir ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
170
+ * Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
171
+ * make extra network card description plaintext by default, with an option to re-enable HTML as it was
172
+ * resize handle for extra networks ([#15041](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15041))
173
+ * cmd args: `--unix-filenames-sanitization` and `--filenames-max-length` ([#15031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15031))
174
+ * show extra networks parameters in HTML table rather than raw JSON ([#15131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15131))
175
+ * Add DoRA (weight-decompose) support for LoRA/LoHa/LoKr ([#15160](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15160), [#15283](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15283))
176
+ * Add '--no-prompt-history' cmd args for disable last generation prompt history ([#15189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15189))
177
+ * update preview on Replace Preview ([#15201](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15201))
178
+ * only fetch updates for extensions' active git branches ([#15233](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15233))
179
+ * put upscale postprocessing UI into an accordion ([#15223](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15223))
180
+ * Support dragdrop for URLs to read infotext ([#15262](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15262))
181
+ * use diskcache library for caching ([#15287](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15287), [#15299](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15299))
182
+ * Allow PNG-RGBA for Extras Tab ([#15334](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15334))
183
+ * Support cover images embedded in safetensors metadata ([#15319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15319))
184
+ * faster interrupt when using NN upscale ([#15380](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15380))
185
+ * Extras upscaler: an input field to limit maximul side length for the output image ([#15293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15293), [#15415](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15415), [#15417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15417), [#15425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15425))
186
+ * add an option to hide postprocessing options in Extras tab
187
+
188
+ ### Extensions and API:
189
+ * ResizeHandleRow - allow overriden column scale parametr ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
190
+ * call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist
191
+ * make it possible to use zoom.js outside webui context ([#15286](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15286), [#15288](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15288))
192
+ * allow variants for extension name in metadata.ini ([#15290](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15290))
193
+ * make reloading UI scripts optional when doing Reload UI, and off by default
194
+ * put request: gr.Request at start of img2img function similar to txt2img
195
+ * open_folder as util ([#15442](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15442))
196
+ * make it possible to import extensions' script files as `import scripts.<filename>` ([#15423](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15423))
197
+
198
+ ### Performance:
199
+ * performance optimization for extra networks HTML pages
200
+ * optimization for extra networks filtering
201
+ * optimization for extra networks sorting
202
+
203
+ ### Bug Fixes:
204
+ * prevent escape button causing an interrupt when no generation has been made yet
205
+ * [bug] avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
206
+ * possible fix for reload button not appearing in some cases for extra networks.
207
+ * fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
208
+ * Fix resize-handle visability for vertical layout (mobile) ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010))
209
+ * register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
210
+ * Protect alphas_cumprod during refiner switchover ([#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
211
+ * Fix EXIF orientation in API image loading ([#15062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15062))
212
+ * Only override emphasis if actually used in prompt ([#15141](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15141))
213
+ * Fix emphasis infotext missing from `params.txt` ([#15142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15142))
214
+ * fix extract_style_text_from_prompt #15132 ([#15135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15135))
215
+ * Fix Soft Inpaint for AnimateDiff ([#15148](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15148))
216
+ * edit-attention: deselect surrounding whitespace ([#15178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15178))
217
+ * chore: fix font not loaded ([#15183](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15183))
218
+ * use natural sort in extra networks when ordering by path
219
+ * Fix built-in lora system bugs caused by torch.nn.MultiheadAttention ([#15190](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15190))
220
+ * Avoid error from None in get_learned_conditioning ([#15191](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15191))
221
+ * Add entry to MassFileLister after writing metadata ([#15199](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15199))
222
+ * fix issue with Styles when Hires prompt is used ([#15269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15269), [#15276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15276))
223
+ * Strip comments from hires fix prompt ([#15263](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15263))
224
+ * Make imageviewer event listeners browser consistent ([#15261](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15261))
225
+ * Fix AttributeError in OFT when trying to get MultiheadAttention weight ([#15260](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15260))
226
+ * Add missing .mean() back ([#15239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15239))
227
+ * fix "Restore progress" button ([#15221](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15221))
228
+ * fix ui-config for InputAccordion [custom_script_source] ([#15231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15231))
229
+ * handle 0 wheel deltaY ([#15268](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15268))
230
+ * prevent alt menu for firefox ([#15267](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15267))
231
+ * fix: fix syntax errors ([#15179](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15179))
232
+ * restore outputs path ([#15307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15307))
233
+ * Escape btn_copy_path filename ([#15316](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15316))
234
+ * Fix extra networks buttons when filename contains an apostrophe ([#15331](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15331))
235
+ * escape brackets in lora random prompt generator ([#15343](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15343))
236
+ * fix: Python version check for PyTorch installation compatibility ([#15390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15390))
237
+ * fix typo in call_queue.py ([#15386](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15386))
238
+ * fix: when find already_loaded model, remove loaded by array index ([#15382](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15382))
239
+ * minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350))
240
+ * Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414))
241
+ * Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428))
242
+ * fix limited file write (thanks, Sylwia)
243
+ * Fix extra-single-image API not doing upscale failed ([#15465](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15465))
244
+ * error handling paste_field callables ([#15470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15470))
245
+
246
+ ### Hardware:
247
+ * Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981))
248
+ * Update to ROCm5.7 and PyTorch ([#14820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14820))
249
+ * Better workaround for Navi1, removing --pre for Navi3 ([#15224](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15224))
250
+ * Ascend NPU wiki page ([#15228](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15228))
251
+
252
+ ### Other:
253
+ * Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation
254
+ * support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
255
+ * Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
256
+ * Use `absolute` path for normalized filepath ([#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
257
+ * resizeHandle handle double tap ([#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
258
+ * --dat-models-path cmd flag ([#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
259
+ * Add a direct link to the binary release ([#15059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15059))
260
+ * upscaler_utils: Reduce logging ([#15084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15084))
261
+ * Fix various typos with crate-ci/typos ([#15116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15116))
262
+ * fix_jpeg_live_preview ([#15102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15102))
263
+ * [alternative fix] can't load webui if selected wrong extra option in ui ([#15121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15121))
264
+ * Error handling for unsupported transparency ([#14958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14958))
265
+ * Add model description to searched terms ([#15198](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15198))
266
+ * bump action version ([#15272](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15272))
267
+ * PEP 604 annotations ([#15259](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15259))
268
+ * Automatically Set the Scale by value when user selects an Upscale Model ([#15244](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15244))
269
+ * move postprocessing-for-training into builtin extensions ([#15222](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15222))
270
+ * type hinting in shared.py ([#15211](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15211))
271
+ * update ruff to 0.3.3
272
+ * Update pytorch lightning utilities ([#15310](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15310))
273
+ * Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354))
274
+ * Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443))
275
+ * re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446))
276
+ * create_infotext allow index and callable, re-work Hires prompt infotext ([#15460](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15460))
277
+ * update restricted_opts to include more options for --hide-ui-dir-config ([#15492](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15492))
278
+
279
+
280
+ ## 1.8.0
281
+
282
+ ### Features:
283
+ * Update torch to version 2.1.2
284
+ * Soft Inpainting ([#14208](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14208))
285
+ * FP8 support ([#14031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14031), [#14327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14327))
286
+ * Support for SDXL-Inpaint Model ([#14390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14390))
287
+ * Use Spandrel for upscaling and face restoration architectures ([#14425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14425), [#14467](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14467), [#14473](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14473), [#14474](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14474), [#14477](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14477), [#14476](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14476), [#14484](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14484), [#14500](https://github.com/AUTOMATIC1111/stable-difusion-webui/pull/14500), [#14501](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14501), [#14504](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14504), [#14524](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14524), [#14809](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14809))
288
+ * Automatic backwards version compatibility (when loading infotexts from old images with program version specified, will add compatibility settings)
289
+ * Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145), [#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
290
+ * Add a [✨] button to run hires fix on selected image in the gallery (with help from [#14598](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14598), [#14626](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14626), [#14728](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14728))
291
+ * [Separate assets repository](https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets); serve fonts locally rather than from google's servers
292
+ * Official LCM Sampler Support ([#14583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14583))
293
+ * Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
294
+ * Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900))
295
+ * NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801))
296
+ * Prompt comments support
297
+
298
+ ### Minor:
299
+ * Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296))
300
+ * add option: Live preview in full page image viewer ([#14230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14230), [#14307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14307))
301
+ * Add keyboard shortcuts for generate/skip/interrupt ([#14269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14269))
302
+ * Better TCMALLOC support on different platforms ([#14227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14227), [#14883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14883), [#14910](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14910))
303
+ * Lora not found warning ([#14464](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14464))
304
+ * Adding negative prompts to Loras in extra networks ([#14475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14475))
305
+ * xyz_grid: allow varying the seed along an axis separate from axis options ([#12180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12180))
306
+ * option to convert VAE to bfloat16 (implementation of [#9295](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9295))
307
+ * Better IPEX support ([#14229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14229), [#14353](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14353), [#14559](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14559), [#14562](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14562), [#14597](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14597))
308
+ * Option to interrupt after current generation rather than immediately ([#13653](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13653), [#14659](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14659))
309
+ * Fullscreen Preview control fading/disable ([#14291](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14291))
310
+ * Finer settings freezing control ([#13789](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13789))
311
+ * Increase Upscaler Limits ([#14589](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14589))
312
+ * Adjust brush size with hotkeys ([#14638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14638))
313
+ * Add checkpoint info to csv log file when saving images ([#14663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14663))
314
+ * Make more columns resizable ([#14740](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14740), [#14884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14884))
315
+ * Add an option to not overlay original image for inpainting for #14727
316
+ * Add Pad conds v0 option to support same generation with DDIM as before 1.6.0
317
+ * Add "Interrupting..." placeholder.
318
+ * Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857))
319
+ * Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874))
320
+ * When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior)
321
+ * Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
322
+ * Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
323
+ * support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
324
+
325
+ ### Extensions and API:
326
+ * Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve
327
+ * Enable task ids for API ([#14314](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14314))
328
+ * add override_settings support for infotext API
329
+ * rename generation_parameters_copypaste module to infotext_utils
330
+ * prevent crash due to Script __init__ exception ([#14407](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14407))
331
+ * Bump numpy to 1.26.2 ([#14471](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14471))
332
+ * Add utility to inspect a model's dtype/device ([#14478](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14478))
333
+ * Implement general forward method for all method in built-in lora ext ([#14547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14547))
334
+ * Execute model_loaded_callback after moving to target device ([#14563](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14563))
335
+ * Add self to CFGDenoiserParams ([#14573](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14573))
336
+ * Allow TLS with API only mode (--nowebui) ([#14593](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14593))
337
+ * New callback: postprocess_image_after_composite ([#14657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14657))
338
+ * modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715))
339
+ * set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773))
340
+ * add before_token_counter callback and use it for prompt comments
341
+ * ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
342
+
343
+ ### Performance:
344
+ * Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528))
345
+ * Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512))
346
+ * Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527))
347
+
348
+ ### Bug Fixes:
349
+ * fix multiple bugs related to styles multi-file support ([#14203](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14203), [#14276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14276), [#14707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14707))
350
+ * Lora fixes ([#14300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14300), [#14237](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14237), [#14546](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14546), [#14726](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14726))
351
+ * Re-add setting lost as part of e294e46 ([#14266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14266))
352
+ * fix extras caption BLIP ([#14330](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14330))
353
+ * include infotext into saved init image for img2img ([#14452](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14452))
354
+ * xyz grid handle axis_type is None ([#14394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14394))
355
+ * Update Added (Fixed) IPV6 Functionality When there is No Webui Argument Passed webui.py ([#14354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14354))
356
+ * fix API thread safe issues of txt2img and img2img ([#14421](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14421))
357
+ * handle selectable script_index is None ([#14487](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14487))
358
+ * handle config.json failed to load ([#14525](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14525), [#14767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14767))
359
+ * paste infotext cast int as float ([#14523](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14523))
360
+ * Ensure GRADIO_ANALYTICS_ENABLED is set early enough ([#14537](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14537))
361
+ * Fix logging configuration again ([#14538](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14538))
362
+ * Handle CondFunc exception when resolving attributes ([#14560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14560))
363
+ * Fix extras big batch crashes ([#14699](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14699))
364
+ * Fix using wrong model caused by alias ([#14655](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14655))
365
+ * Add # to the invalid_filename_chars list ([#14640](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14640))
366
+ * Fix extension check for requirements ([#14639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14639))
367
+ * Fix tab indexes are reset after restart UI ([#14637](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14637))
368
+ * Fix nested manual cast ([#14689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14689))
369
+ * Keep postprocessing upscale selected tab after restart ([#14702](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14702))
370
+ * XYZ grid: filter out blank vals when axis is int or float type (like int axis seed) ([#14754](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14754))
371
+ * fix CLIP Interrogator topN regex ([#14775](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14775))
372
+ * Fix dtype error in MHA layer/change dtype checking mechanism for manual cast ([#14791](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14791))
373
+ * catch load style.csv error ([#14814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14814))
374
+ * fix error when editing extra networks card
375
+ * fix extra networks metadata failing to work properly when you create the .json file with metadata for the first time.
376
+ * util.walk_files extensions case insensitive ([#14879](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14879))
377
+ * if extensions page not loaded, prevent apply ([#14873](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14873))
378
+ * call the right function for token counter in img2img
379
+ * Fix the bugs that search/reload will disappear when using other ExtraNetworks extensions ([#14939](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14939))
380
+ * Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933))
381
+ * Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932))
382
+ * Disable prompt token counters option actually disables token counting rather than just hiding results.
383
+ * avoid double upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
384
+ * Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
385
+ * fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
386
+ * Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
387
+
388
+ ### Other:
389
+ * Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270))
390
+ * change state dict comparison to ref compare ([#14216](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14216))
391
+ * Bump torch-rocm to 5.6/5.7 ([#14293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14293))
392
+ * Base output path off data path ([#14446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14446))
393
+ * reorder training preprocessing modules in extras tab ([#14367](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14367))
394
+ * Remove `cleanup_models` code ([#14472](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14472))
395
+ * only rewrite ui-config when there is change ([#14352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14352))
396
+ * Fix lint issue from 501993eb ([#14495](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14495))
397
+ * Update README.md ([#14548](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14548))
398
+ * hires button, fix seeds ()
399
+ * Logging: set formatter correctly for fallback logger too ([#14618](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14618))
400
+ * Read generation info from infotexts rather than json for internal needs (save, extract seed from generated pic) ([#14645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14645))
401
+ * improve get_crop_region ([#14709](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14709))
402
+ * Bump safetensors' version to 0.4.2 ([#14782](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14782))
403
+ * add tooltip create_submit_box ([#14803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14803))
404
+ * extensions tab table row hover highlight ([#14885](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14885))
405
+ * Always add timestamp to displayed image ([#14890](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14890))
406
+ * Added core.filemode=false so doesn't track changes in file permission… ([#14930](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14930))
407
+ * Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934), [#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
408
+ * Use original App Title in progress bar ([#14916](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14916))
409
+ * register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
410
+
411
+ ## 1.7.0
412
+
413
+ ### Features:
414
+ * settings tab rework: add search field, add categories, split UI settings page into many
415
+ * add altdiffusion-m18 support ([#13364](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13364))
416
+ * support inference with LyCORIS GLora networks ([#13610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13610))
417
+ * add lora-embedding bundle system ([#13568](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13568))
418
+ * option to move prompt from top row into generation parameters
419
+ * add support for SSD-1B ([#13865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13865))
420
+ * support inference with OFT networks ([#13692](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13692))
421
+ * script metadata and DAG sorting mechanism ([#13944](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13944))
422
+ * support HyperTile optimization ([#13948](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13948))
423
+ * add support for SD 2.1 Turbo ([#14170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14170))
424
+ * remove Train->Preprocessing tab and put all its functionality into Extras tab
425
+ * initial IPEX support for Intel Arc GPU ([#14171](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14171))
426
+
427
+ ### Minor:
428
+ * allow reading model hash from images in img2img batch mode ([#12767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12767))
429
+ * add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
430
+ * extra field for lora metadata viewer: `ss_output_name` ([#12838](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12838))
431
+ * add action in settings page to calculate all SD checkpoint hashes ([#12909](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12909))
432
+ * add button to copy prompt to style editor ([#12975](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12975))
433
+ * add --skip-load-model-at-start option ([#13253](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13253))
434
+ * write infotext to gif images
435
+ * read infotext from gif images ([#13068](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13068))
436
+ * allow configuring the initial state of InputAccordion in ui-config.json ([#13189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13189))
437
+ * allow editing whitespace delimiters for ctrl+up/ctrl+down prompt editing ([#13444](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13444))
438
+ * prevent accidentally closing popup dialogs ([#13480](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13480))
439
+ * added option to play notification sound or not ([#13631](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13631))
440
+ * show the preview image in the full screen image viewer if available ([#13459](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13459))
441
+ * support for webui.settings.bat ([#13638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13638))
442
+ * add an option to not print stack traces on ctrl+c
443
+ * start/restart generation by Ctrl (Alt) + Enter ([#13644](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13644))
444
+ * update prompts_from_file script to allow concatenating entries with the general prompt ([#13733](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13733))
445
+ * added a visible checkbox to input accordion
446
+ * added an option to hide all txt2img/img2img parameters in an accordion ([#13826](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13826))
447
+ * added 'Path' sorting option for Extra network cards ([#13968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13968))
448
+ * enable prompt hotkeys in style editor ([#13931](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13931))
449
+ * option to show batch img2img results in UI ([#14009](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14009))
450
+ * infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page
451
+ * add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046))
452
+ * support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126))
453
+ * allow use of multiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125))
454
+ * make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241))
455
+
456
+ ### Extensions and API:
457
+ * update gradio to 3.41.2
458
+ * support installed extensions list api ([#12774](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12774))
459
+ * update pnginfo API to return dict with parsed values
460
+ * add noisy latent to `ExtraNoiseParams` for callback ([#12856](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12856))
461
+ * show extension datetime in UTC ([#12864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12864), [#12865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12865), [#13281](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13281))
462
+ * add an option to choose how to combine hires fix and refiner
463
+ * include program version in info response. ([#13135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13135))
464
+ * sd_unet support for SDXL
465
+ * patch DDPM.register_betas so that users can put given_betas in model yaml ([#13276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13276))
466
+ * xyz_grid: add prepare ([#13266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13266))
467
+ * allow multiple localization files with same language in extensions ([#13077](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13077))
468
+ * add onEdit function for js and rework token-counter.js to use it
469
+ * fix the key error exception when processing override_settings keys ([#13567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13567))
470
+ * ability for extensions to return custom data via api in response.images ([#13463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13463))
471
+ * call state.jobnext() before postproces*() ([#13762](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13762))
472
+ * add option to set notification sound volume ([#13884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13884))
473
+ * update Ruff to 0.1.6 ([#14059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14059))
474
+ * add Block component creation callback ([#14119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14119))
475
+ * catch uncaught exception with ui creation scripts ([#14120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14120))
476
+ * use extension name for determining an extension is installed in the index ([#14063](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14063))
477
+ * update is_installed() from launch_utils.py to fix reinstalling already installed packages ([#14192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14192))
478
+
479
+ ### Bug Fixes:
480
+ * fix pix2pix producing bad results
481
+ * fix defaults settings page breaking when any of main UI tabs are hidden
482
+ * fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
483
+ * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
484
+ * prevent duplicate resize handler ([#12795](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12795))
485
+ * small typo: vae resolve bug ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12797))
486
+ * hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12792))
487
+ * don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12780))
488
+ * fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
489
+ * hide --gradio-auth and --api-auth values from /internal/sysinfo report
490
+ * add missing infotext for RNG in options ([#12819](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12819))
491
+ * fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
492
+ * honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
493
+ * don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12833), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
494
+ * get progressbar to display correctly in extensions tab
495
+ * keep order in list of checkpoints when loading model that doesn't have a checksum
496
+ * fix inpainting models in txt2img creating black pictures
497
+ * fix generation params regex ([#12876](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12876))
498
+ * fix batch img2img output dir with script ([#12926](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12926))
499
+ * fix #13080 - Hypernetwork/TI preview generation ([#13084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13084))
500
+ * fix bug with sigma min/max overrides. ([#12995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12995))
501
+ * more accurate check for enabling cuDNN benchmark on 16XX cards ([#12924](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12924))
502
+ * don't use multicond parser for negative prompt counter ([#13118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13118))
503
+ * fix data-sort-name containing spaces ([#13412](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13412))
504
+ * update card on correct tab when editing metadata ([#13411](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13411))
505
+ * fix viewing/editing metadata when filename contains an apostrophe ([#13395](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13395))
506
+ * fix: --sd_model in "Prompts from file or textbox" script is not working ([#13302](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13302))
507
+ * better Support for Portable Git ([#13231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13231))
508
+ * fix issues when webui_dir is not work_dir ([#13210](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13210))
509
+ * fix: lora-bias-backup don't reset cache ([#13178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13178))
510
+ * account for customizable extra network separators whyen removing extra network text from the prompt ([#12877](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12877))
511
+ * re fix batch img2img output dir with script ([#13170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13170))
512
+ * fix `--ckpt-dir` path separator and option use `short name` for checkpoint dropdown ([#13139](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13139))
513
+ * consolidated allowed preview formats, Fix extra network `.gif` not woking as preview ([#13121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13121))
514
+ * fix venv_dir=- environment variable not working as expected on linux ([#13469](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13469))
515
+ * repair unload sd checkpoint button
516
+ * edit-attention fixes ([#13533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13533))
517
+ * fix bug when using --gfpgan-models-path ([#13718](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13718))
518
+ * properly apply sort order for extra network cards when selected from dropdown
519
+ * fixes generation restart not working for some users when 'Ctrl+Enter' is pressed ([#13962](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13962))
520
+ * thread safe extra network list_items ([#13014](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13014))
521
+ * fix not able to exit metadata popup when pop up is too big ([#14156](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14156))
522
+ * fix auto focal point crop for opencv >= 4.8 ([#14121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14121))
523
+ * make 'use-cpu all' actually apply to 'all' ([#14131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14131))
524
+ * extras tab batch: actually use original filename
525
+ * make webui not crash when running with --disable-all-extensions option
526
+
527
+ ### Other:
528
+ * non-local condition ([#12814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12814))
529
+ * fix minor typos ([#12827](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12827))
530
+ * remove xformers Python version check ([#12842](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12842))
531
+ * style: file-metadata word-break ([#12837](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12837))
532
+ * revert SGM noise multiplier change for img2img because it breaks hires fix
533
+ * do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
534
+ * [RC 1.6.0 - zoom is partly hidden] Update style.css ([#12839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12839))
535
+ * chore: change extension time format ([#12851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12851))
536
+ * WEBUI.SH - Use torch 2.1.0 release candidate for Navi 3 ([#12929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12929))
537
+ * add Fallback at images.read_info_from_image if exif data was invalid ([#13028](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13028))
538
+ * update cmd arg description ([#12986](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12986))
539
+ * fix: update shared.opts.data when add_option ([#12957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12957), [#13213](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13213))
540
+ * restore missing tooltips ([#12976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12976))
541
+ * use default dropdown padding on mobile ([#12880](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12880))
542
+ * put enable console prompts option into settings from commandline args ([#13119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13119))
543
+ * fix some deprecated types ([#12846](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12846))
544
+ * bump to torchsde==0.2.6 ([#13418](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13418))
545
+ * update dragdrop.js ([#13372](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13372))
546
+ * use orderdict as lru cache:opt/bug ([#13313](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13313))
547
+ * XYZ if not include sub grids do not save sub grid ([#13282](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13282))
548
+ * initialize state.time_start befroe state.job_count ([#13229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13229))
549
+ * fix fieldname regex ([#13458](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13458))
550
+ * change denoising_strength default to None. ([#13466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13466))
551
+ * fix regression ([#13475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13475))
552
+ * fix IndexError ([#13630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13630))
553
+ * fix: checkpoints_loaded:{checkpoint:state_dict}, model.load_state_dict issue in dict value empty ([#13535](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13535))
554
+ * update bug_report.yml ([#12991](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12991))
555
+ * requirements_versions httpx==0.24.1 ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
556
+ * fix parenthesis auto selection ([#13829](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13829))
557
+ * fix #13796 ([#13797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13797))
558
+ * corrected a typo in `modules/cmd_args.py` ([#13855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13855))
559
+ * feat: fix randn found element of type float at pos 2 ([#14004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14004))
560
+ * adds tqdm handler to logging_config.py for progress bar integration ([#13996](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13996))
561
+ * hotfix: call shared.state.end() after postprocessing done ([#13977](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13977))
562
+ * fix dependency address patch 1 ([#13929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13929))
563
+ * save sysinfo as .json ([#14035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14035))
564
+ * move exception_records related methods to errors.py ([#14084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14084))
565
+ * compatibility ([#13936](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13936))
566
+ * json.dump(ensure_ascii=False) ([#14108](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14108))
567
+ * dir buttons start with / so only the correct dir will be shown and no… ([#13957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13957))
568
+ * alternate implementation for unet forward replacement that does not depend on hijack being applied
569
+ * re-add `keyedit_delimiters_whitespace` setting lost as part of commit e294e46 ([#14178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14178))
570
+ * fix `save_samples` being checked early when saving masked composite ([#14177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14177))
571
+ * slight optimization for mask and mask_composite ([#14181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14181))
572
+ * add import_hook hack to work around basicsr/torchvision incompatibility ([#14186](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14186))
573
+
574
+ ## 1.6.1
575
+
576
+ ### Bug Fixes:
577
+ * fix an error causing the webui to fail to start ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
578
+
579
+ ## 1.6.0
580
+
581
+ ### Features:
582
+ * refiner support [#12371](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371)
583
+ * add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards
584
+ * add style editor dialog
585
+ * hires fix: add an option to use a different checkpoint for second pass ([#12181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12181))
586
+ * option to keep multiple loaded models in memory ([#12227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12227))
587
+ * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542))
588
+ * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers:
589
+ * makes all of them work with img2img
590
+ * makes prompt composition possible (AND)
591
+ * makes them available for SDXL
592
+ * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808))
593
+ * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599))
594
+ * textual inversion inference support for SDXL
595
+ * extra networks UI: show metadata for SD checkpoints
596
+ * checkpoint merger: add metadata support
597
+ * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) ([#12177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12177))
598
+ * VAE: allow selecting own VAE for each checkpoint (in user metadata editor)
599
+ * VAE: add selected VAE to infotext
600
+ * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count ([#12551](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12551))
601
+ * add resize handle to txt2img and img2img tabs, allowing to change the amount of horizontable space given to generation parameters and resulting image gallery ([#12687](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12687), [#12723](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12723))
602
+ * change default behavior for batching cond/uncond -- now it's on by default, and is disabled by an UI setting (Optimizatios -> Batch cond/uncond) - if you are on lowvram/medvram and are getting OOM exceptions, you will need to enable it
603
+ * show current position in queue and make it so that requests are processed in the order of arrival ([#12707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12707))
604
+ * add `--medvram-sdxl` flag that only enables `--medvram` for SDXL models
605
+ * prompt editing timeline has separate range for first pass and hires-fix pass (seed breaking change) ([#12457](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12457))
606
+
607
+ ### Minor:
608
+ * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch ([#12120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12120), [#12514](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12514), [#12515](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12515))
609
+ * postprocessing/extras: RAM savings ([#12479](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12479))
610
+ * XYZ: in the axis labels, remove pathnames from model filenames
611
+ * XYZ: support hires sampler ([#12298](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12298))
612
+ * XYZ: new option: use text inputs instead of dropdowns ([#12491](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12491))
613
+ * add gradio version warning
614
+ * sort list of VAE checkpoints ([#12297](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12297))
615
+ * use transparent white for mask in inpainting, along with an option to select the color ([#12326](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12326))
616
+ * move some settings to their own section: img2img, VAE
617
+ * add checkbox to show/hide dirs for extra networks
618
+ * Add TAESD(or more) options for all the VAE encode/decode operation ([#12311](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12311))
619
+ * gradio theme cache, new gradio themes, along with explanation that the user can input his own values ([#12346](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12346), [#12355](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12355))
620
+ * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax ([#12354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12354), [#12356](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12356), [#12357](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12357), [#12358](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12358), [#12375](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12375), [#12521](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12521))
621
+ * update README.md with correct instructions for Linux installation ([#12352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12352))
622
+ * option to not save incomplete images, on by default ([#12338](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12338))
623
+ * enable cond cache by default
624
+ * git autofix for repos that are corrupted ([#12230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12230))
625
+ * allow to open images in new browser tab by middle mouse button ([#12379](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12379))
626
+ * automatically open webui in browser when running "locally" ([#12254](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12254))
627
+ * put commonly used samplers on top, make DPM++ 2M Karras the default choice
628
+ * zoom and pan: option to auto-expand a wide image, improved integration ([#12413](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12413), [#12727](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12727))
629
+ * option to cache Lora networks in memory
630
+ * rework hires fix UI to use accordion
631
+ * face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back
632
+ * change quicksettings items to have variable width
633
+ * Lora: add Norm module, add support for bias ([#12503](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12503))
634
+ * Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console
635
+ * support search and display of hashes for all extra network items ([#12510](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12510))
636
+ * add extra noise param for img2img operations ([#12564](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12564))
637
+ * support for Lora with bias ([#12584](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12584))
638
+ * make interrupt quicker ([#12634](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12634))
639
+ * configurable gallery height ([#12648](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12648))
640
+ * make results column sticky ([#12645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12645))
641
+ * more hash filename patterns ([#12639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12639))
642
+ * make image viewer actually fit the whole page ([#12635](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12635))
643
+ * make progress bar work independently from live preview display which results in it being updated a lot more often
644
+ * forbid Full live preview method for medvram and add a setting to undo the forbidding
645
+ * make it possible to localize tooltips and placeholders
646
+ * add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
647
+ * Restore faces and Tiling generation parameters have been moved to settings out of main UI
648
+ * if you want to put them back into main UI, use `Options in main UI` setting on the UI page.
649
+
650
+ ### Extensions and API:
651
+ * gradio 3.41.2
652
+ * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd
653
+ * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world')
654
+ * properly clear the total console progressbar when using txt2img and img2img from API
655
+ * add cmd_arg --disable-extra-extensions and --disable-all-extensions ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294))
656
+ * shared.py and webui.py split into many files
657
+ * add --loglevel commandline argument for logging
658
+ * add a custom UI element that combines accordion and checkbox
659
+ * avoid importing gradio in tests because it spams warnings
660
+ * put infotext label for setting into OptionInfo definition rather than in a separate list
661
+ * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` ([#12470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12470))
662
+ * option to make scripts UI without gr.Group
663
+ * add a way for scripts to register a callback for before/after just a single component's creation
664
+ * use dataclass for StableDiffusionProcessing
665
+ * store patches for Lora in a specialized module instead of inside torch
666
+ * support http/https URLs in API ([#12663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12663), [#12698](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12698))
667
+ * add extra noise callback ([#12616](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12616))
668
+ * dump current stack traces when exiting with SIGINT
669
+ * add type annotations for extra fields of shared.sd_model
670
+
671
+ ### Bug Fixes:
672
+ * Don't crash if out of local storage quota for javascriot localStorage
673
+ * XYZ plot do not fail if an exception occurs
674
+ * fix missing TI hash in infotext if generation uses both negative and positive TI ([#12269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12269))
675
+ * localization fixes ([#12307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12307))
676
+ * fix sdxl model invalid configuration after the hijack
677
+ * correctly toggle extras checkbox for infotext paste ([#12304](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12304))
678
+ * open raw sysinfo link in new page ([#12318](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12318))
679
+ * prompt parser: Account for empty field in alternating words syntax ([#12319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12319))
680
+ * add tab and carriage return to invalid filename chars ([#12327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12327))
681
+ * fix api only Lora not working ([#12387](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12387))
682
+ * fix options in main UI misbehaving when there's just one element
683
+ * make it possible to use a sampler from infotext even if it's hidden in the dropdown
684
+ * fix styles missing from the prompt in infotext when making a grid of batch of multiplie images
685
+ * prevent bogus progress output in console when calculating hires fix dimensions
686
+ * fix --use-textbox-seed
687
+ * fix broken `Lora/Networks: use old method` option ([#12466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12466))
688
+ * properly return `None` for VAE hash when using `--no-hashing` ([#12463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12463))
689
+ * MPS/macOS fixes and optimizations ([#12526](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12526))
690
+ * add second_order to samplers that mistakenly didn't have it
691
+ * when refreshing cards in extra networks UI, do not discard user's custom resolution
692
+ * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are ([#12509](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12509))
693
+ * fix inpaint upload for alpha masks ([#12588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12588))
694
+ * fix exception when image sizes are not integers ([#12586](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12586))
695
+ * fix incorrect TAESD Latent scale ([#12596](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12596))
696
+ * auto add data-dir to gradio-allowed-path ([#12603](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12603))
697
+ * fix exception if extensuions dir is missing ([#12607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12607))
698
+ * fix issues with api model-refresh and vae-refresh ([#12638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12638))
699
+ * fix img2img background color for transparent images option not being used ([#12633](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12633))
700
+ * attempt to resolve NaN issue with unstable VAEs in fp32 mk2 ([#12630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12630))
701
+ * implement missing undo hijack for SDXL
702
+ * fix xyz swap axes ([#12684](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12684))
703
+ * fix errors in backup/restore tab if any of config files are broken ([#12689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12689))
704
+ * fix SD VAE switch error after model reuse ([#12685](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12685))
705
+ * fix trying to create images too large for the chosen format ([#12667](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12667))
706
+ * create Gradio temp directory if necessary ([#12717](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12717))
707
+ * prevent possible cache loss if exiting as it's being written by using an atomic operation to replace the cache with the new version
708
+ * set devices.dtype_unet correctly
709
+ * run RealESRGAN on GPU for non-CUDA devices ([#12737](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
710
+ * prevent extra network buttons being obscured by description for very small card sizes ([#12745](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12745))
711
+ * fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
712
+ * fix defaults settings page breaking when any of main UI tabs are hidden
713
+ * fix incorrect save/display of new values in Defaults page in settings
714
+ * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
715
+ * fix an error that prevents VAE being reloaded after an option change if a VAE near the checkpoint exists ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
716
+ * hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
717
+ * don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
718
+ * fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
719
+ * fix a bug allowing users to bypass gradio and API authentication (reported by vysecurity)
720
+ * fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
721
+ * honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
722
+ * don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
723
+ * do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
724
+ * get progressbar to display correctly in extensions tab
725
+
726
+
727
+ ## 1.5.2
728
+
729
+ ### Bug Fixes:
730
+ * fix memory leak when generation fails
731
+ * update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk
732
+
733
+
734
+ ## 1.5.1
735
+
736
+ ### Minor:
737
+ * support parsing text encoder blocks in some new LoRAs
738
+ * delete scale checker script due to user demand
739
+
740
+ ### Extensions and API:
741
+ * add postprocess_batch_list script callback
742
+
743
+ ### Bug Fixes:
744
+ * fix TI training for SD1
745
+ * fix reload altclip model error
746
+ * prepend the pythonpath instead of overriding it
747
+ * fix typo in SD_WEBUI_RESTARTING
748
+ * if txt2img/img2img raises an exception, finally call state.end()
749
+ * fix composable diffusion weight parsing
750
+ * restyle Startup profile for black users
751
+ * fix webui not launching with --nowebui
752
+ * catch exception for non git extensions
753
+ * fix some options missing from /sdapi/v1/options
754
+ * fix for extension update status always saying "unknown"
755
+ * fix display of extra network cards that have `<>` in the name
756
+ * update lora extension to work with python 3.8
757
+
758
+
759
+ ## 1.5.0
760
+
761
+ ### Features:
762
+ * SD XL support
763
+ * user metadata system for custom networks
764
+ * extended Lora metadata editor: set activation text, default weight, view tags, training info
765
+ * Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension)
766
+ * show github stars for extensions
767
+ * img2img batch mode can read extra stuff from png info
768
+ * img2img batch works with subdirectories
769
+ * hotkeys to move prompt elements: alt+left/right
770
+ * restyle time taken/VRAM display
771
+ * add textual inversion hashes to infotext
772
+ * optimization: cache git extension repo information
773
+ * move generate button next to the generated picture for mobile clients
774
+ * hide cards for networks of incompatible Stable Diffusion version in Lora extra networks interface
775
+ * skip installing packages with pip if they all are already installed - startup speedup of about 2 seconds
776
+
777
+ ### Minor:
778
+ * checkbox to check/uncheck all extensions in the Installed tab
779
+ * add gradio user to infotext and to filename patterns
780
+ * allow gif for extra network previews
781
+ * add options to change colors in grid
782
+ * use natural sort for items in extra networks
783
+ * Mac: use empty_cache() from torch 2 to clear VRAM
784
+ * added automatic support for installing the right libraries for Navi3 (AMD)
785
+ * add option SWIN_torch_compile to accelerate SwinIR upscale
786
+ * suppress printing TI embedding info at start to console by default
787
+ * speedup extra networks listing
788
+ * added `[none]` filename token.
789
+ * removed thumbs extra networks view mode (use settings tab to change width/height/scale to get thumbs)
790
+ * add always_discard_next_to_last_sigma option to XYZ plot
791
+ * automatically switch to 32-bit float VAE if the generated picture has NaNs without the need for `--no-half-vae` commandline flag.
792
+
793
+ ### Extensions and API:
794
+ * api endpoints: /sdapi/v1/server-kill, /sdapi/v1/server-restart, /sdapi/v1/server-stop
795
+ * allow Script to have custom metaclass
796
+ * add model exists status check /sdapi/v1/options
797
+ * rename --add-stop-route to --api-server-stop
798
+ * add `before_hr` script callback
799
+ * add callback `after_extra_networks_activate`
800
+ * disable rich exception output in console for API by default, use WEBUI_RICH_EXCEPTIONS env var to enable
801
+ * return http 404 when thumb file not found
802
+ * allow replacing extensions index with environment variable
803
+
804
+ ### Bug Fixes:
805
+ * fix for catch errors when retrieving extension index #11290
806
+ * fix very slow loading speed of .safetensors files when reading from network drives
807
+ * API cache cleanup
808
+ * fix UnicodeEncodeError when writing to file CLIP Interrogator batch mode
809
+ * fix warning of 'has_mps' deprecated from PyTorch
810
+ * fix problem with extra network saving images as previews losing generation info
811
+ * fix throwing exception when trying to resize image with I;16 mode
812
+ * fix for #11534: canvas zoom and pan extension hijacking shortcut keys
813
+ * fixed launch script to be runnable from any directory
814
+ * don't add "Seed Resize: -1x-1" to API image metadata
815
+ * correctly remove end parenthesis with ctrl+up/down
816
+ * fixing --subpath on newer gradio version
817
+ * fix: check fill size none zero when resize (fixes #11425)
818
+ * use submit and blur for quick settings textbox
819
+ * save img2img batch with images.save_image()
820
+ * prevent running preload.py for disabled extensions
821
+ * fix: previously, model name was added together with directory name to infotext and to [model_name] filename pattern; directory name is now not included
822
+
823
+
824
+ ## 1.4.1
825
+
826
+ ### Bug Fixes:
827
+ * add queue lock for refresh-checkpoints
828
+
829
+ ## 1.4.0
830
+
831
+ ### Features:
832
+ * zoom controls for inpainting
833
+ * run basic torch calculation at startup in parallel to reduce the performance impact of first generation
834
+ * option to pad prompt/neg prompt to be same length
835
+ * remove taming_transformers dependency
836
+ * custom k-diffusion scheduler settings
837
+ * add an option to show selected settings in main txt2img/img2img UI
838
+ * sysinfo tab in settings
839
+ * infer styles from prompts when pasting params into the UI
840
+ * an option to control the behavior of the above
841
+
842
+ ### Minor:
843
+ * bump Gradio to 3.32.0
844
+ * bump xformers to 0.0.20
845
+ * Add option to disable token counters
846
+ * tooltip fixes & optimizations
847
+ * make it possible to configure filename for the zip download
848
+ * `[vae_filename]` pattern for filenames
849
+ * Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
850
+ * change UI reorder setting to multiselect
851
+ * read version info form CHANGELOG.md if git version info is not available
852
+ * link footer API to Wiki when API is not active
853
+ * persistent conds cache (opt-in optimization)
854
+
855
+ ### Extensions:
856
+ * After installing extensions, webui properly restarts the process rather than reloads the UI
857
+ * Added VAE listing to web API. Via: /sdapi/v1/sd-vae
858
+ * custom unet support
859
+ * Add onAfterUiUpdate callback
860
+ * refactor EmbeddingDatabase.register_embedding() to allow unregistering
861
+ * add before_process callback for scripts
862
+ * add ability for alwayson scripts to specify section and let user reorder those sections
863
+
864
+ ### Bug Fixes:
865
+ * Fix dragging text to prompt
866
+ * fix incorrect quoting for infotext values with colon in them
867
+ * fix "hires. fix" prompt sharing same labels with txt2img_prompt
868
+ * Fix s_min_uncond default type int
869
+ * Fix for #10643 (Inpainting mask sometimes not working)
870
+ * fix bad styling for thumbs view in extra networks #10639
871
+ * fix for empty list of optimizations #10605
872
+ * small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
873
+ * fix --ui-debug-mode exit
874
+ * patch GitPython to not use leaky persistent processes
875
+ * fix duplicate Cross attention optimization after UI reload
876
+ * torch.cuda.is_available() check for SdOptimizationXformers
877
+ * fix hires fix using wrong conds in second pass if using Loras.
878
+ * handle exception when parsing generation parameters from png info
879
+ * fix upcast attention dtype error
880
+ * forcing Torch Version to 1.13.1 for RX 5000 series GPUs
881
+ * split mask blur into X and Y components, patch Outpainting MK2 accordingly
882
+ * don't die when a LoRA is a broken symlink
883
+ * allow activation of Generate Forever during generation
884
+
885
+
886
+ ## 1.3.2
887
+
888
+ ### Bug Fixes:
889
+ * fix files served out of tmp directory even if they are saved to disk
890
+ * fix postprocessing overwriting parameters
891
+
892
+ ## 1.3.1
893
+
894
+ ### Features:
895
+ * revert default cross attention optimization to Doggettx
896
+
897
+ ### Bug Fixes:
898
+ * fix bug: LoRA don't apply on dropdown list sd_lora
899
+ * fix png info always added even if setting is not enabled
900
+ * fix some fields not applying in xyz plot
901
+ * fix "hires. fix" prompt sharing same labels with txt2img_prompt
902
+ * fix lora hashes not being added properly to infotex if there is only one lora
903
+ * fix --use-cpu failing to work properly at startup
904
+ * make --disable-opt-split-attention command line option work again
905
+
906
+ ## 1.3.0
907
+
908
+ ### Features:
909
+ * add UI to edit defaults
910
+ * token merging (via dbolya/tomesd)
911
+ * settings tab rework: add a lot of additional explanations and links
912
+ * load extensions' Git metadata in parallel to loading the main program to save a ton of time during startup
913
+ * update extensions table: show branch, show date in separate column, and show version from tags if available
914
+ * TAESD - another option for cheap live previews
915
+ * allow choosing sampler and prompts for second pass of hires fix - hidden by default, enabled in settings
916
+ * calculate hashes for Lora
917
+ * add lora hashes to infotext
918
+ * when pasting infotext, use infotext's lora hashes to find local loras for `<lora:xxx:1>` entries whose hashes match loras the user has
919
+ * select cross attention optimization from UI
920
+
921
+ ### Minor:
922
+ * bump Gradio to 3.31.0
923
+ * bump PyTorch to 2.0.1 for macOS and Linux AMD
924
+ * allow setting defaults for elements in extensions' tabs
925
+ * allow selecting file type for live previews
926
+ * show "Loading..." for extra networks when displaying for the first time
927
+ * suppress ENSD infotext for samplers that don't use it
928
+ * clientside optimizations
929
+ * add options to show/hide hidden files and dirs in extra networks, and to not list models/files in hidden directories
930
+ * allow whitespace in styles.csv
931
+ * add option to reorder tabs
932
+ * move some functionality (swap resolution and set seed to -1) to client
933
+ * option to specify editor height for img2img
934
+ * button to copy image resolution into img2img width/height sliders
935
+ * switch from pyngrok to ngrok-py
936
+ * lazy-load images in extra networks UI
937
+ * set "Navigate image viewer with gamepad" option to false by default, by request
938
+ * change upscalers to download models into user-specified directory (from commandline args) rather than the default models/<...>
939
+ * allow hiding buttons in ui-config.json
940
+
941
+ ### Extensions:
942
+ * add /sdapi/v1/script-info api
943
+ * use Ruff to lint Python code
944
+ * use ESlint to lint Javascript code
945
+ * add/modify CFG callbacks for Self-Attention Guidance extension
946
+ * add command and endpoint for graceful server stopping
947
+ * add some locals (prompts/seeds/etc) from processing function into the Processing class as fields
948
+ * rework quoting for infotext items that have commas in them to use JSON (should be backwards compatible except for cases where it didn't work previously)
949
+ * add /sdapi/v1/refresh-loras api checkpoint post request
950
+ * tests overhaul
951
+
952
+ ### Bug Fixes:
953
+ * fix an issue preventing the program from starting if the user specifies a bad Gradio theme
954
+ * fix broken prompts from file script
955
+ * fix symlink scanning for extra networks
956
+ * fix --data-dir ignored when launching via webui-user.bat COMMANDLINE_ARGS
957
+ * allow web UI to be ran fully offline
958
+ * fix inability to run with --freeze-settings
959
+ * fix inability to merge checkpoint without adding metadata
960
+ * fix extra networks' save preview image not adding infotext for jpeg/webm
961
+ * remove blinking effect from text in hires fix and scale resolution preview
962
+ * make links to `http://<...>.git` extensions work in the extension tab
963
+ * fix bug with webui hanging at startup due to hanging git process
964
+
965
+
966
+ ## 1.2.1
967
+
968
+ ### Features:
969
+ * add an option to always refer to LoRA by filenames
970
+
971
+ ### Bug Fixes:
972
+ * never refer to LoRA by an alias if multiple LoRAs have same alias or the alias is called none
973
+ * fix upscalers disappearing after the user reloads UI
974
+ * allow bf16 in safe unpickler (resolves problems with loading some LoRAs)
975
+ * allow web UI to be ran fully offline
976
+ * fix localizations not working
977
+ * fix error for LoRAs: `'LatentDiffusion' object has no attribute 'lora_layer_mapping'`
978
+
979
+ ## 1.2.0
980
+
981
+ ### Features:
982
+ * do not wait for Stable Diffusion model to load at startup
983
+ * add filename patterns: `[denoising]`
984
+ * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
985
+ * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metadata of the file, if present, instead of filename (both can be used to activate LoRA)
986
+ * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
987
+ * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
988
+ * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
989
+ * add version to infotext, footer and console output when starting
990
+ * add links to wiki for filename pattern settings
991
+ * add extended info for quicksettings setting and use multiselect input instead of a text field
992
+
993
+ ### Minor:
994
+ * bump Gradio to 3.29.0
995
+ * bump PyTorch to 2.0.1
996
+ * `--subpath` option for gradio for use with reverse proxy
997
+ * Linux/macOS: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
998
+ * do not apply localizations if there are none (possible frontend optimization)
999
+ * add extra `None` option for VAE in XYZ plot
1000
+ * print error to console when batch processing in img2img fails
1001
+ * create HTML for extra network pages only on demand
1002
+ * allow directories starting with `.` to still list their models for LoRA, checkpoints, etc
1003
+ * put infotext options into their own category in settings tab
1004
+ * do not show licenses page when user selects Show all pages in settings
1005
+
1006
+ ### Extensions:
1007
+ * tooltip localization support
1008
+ * add API method to get LoRA models with prompt
1009
+
1010
+ ### Bug Fixes:
1011
+ * re-add `/docs` endpoint
1012
+ * fix gamepad navigation
1013
+ * make the lightbox fullscreen image function properly
1014
+ * fix squished thumbnails in extras tab
1015
+ * keep "search" filter for extra networks when user refreshes the tab (previously it showed everything after you refreshed)
1016
+ * fix webui showing the same image if you configure the generation to always save results into same file
1017
+ * fix bug with upscalers not working properly
1018
+ * fix MPS on PyTorch 2.0.1, Intel Macs
1019
+ * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
1020
+ * prevent Reload UI button/link from reloading the page when it's not yet ready
1021
+ * fix prompts from file script failing to read contents from a drag/drop file
1022
+
1023
+
1024
+ ## 1.1.1
1025
+ ### Bug Fixes:
1026
+ * fix an error that prevents running webui on PyTorch<2.0 without --disable-safe-unpickle
1027
+
1028
+ ## 1.1.0
1029
+ ### Features:
1030
+ * switch to PyTorch 2.0.0 (except for AMD GPUs)
1031
+ * visual improvements to custom code scripts
1032
+ * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
1033
+ * add support for saving init images in img2img, and record their hashes in infotext for reproducibility
1034
+ * automatically select current word when adjusting weight with ctrl+up/down
1035
+ * add dropdowns for X/Y/Z plot
1036
+ * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs
1037
+ * support Gradio's theme API
1038
+ * use TCMalloc on Linux by default; possible fix for memory leaks
1039
+ * add optimization option to remove negative conditioning at low sigma values #9177
1040
+ * embed model merge metadata in .safetensors file
1041
+ * extension settings backup/restore feature #9169
1042
+ * add "resize by" and "resize to" tabs to img2img
1043
+ * add option "keep original size" to textual inversion images preprocess
1044
+ * image viewer scrolling via analog stick
1045
+ * button to restore the progress from session lost / tab reload
1046
+
1047
+ ### Minor:
1048
+ * bump Gradio to 3.28.1
1049
+ * change "scale to" to sliders in Extras tab
1050
+ * add labels to tool buttons to make it possible to hide them
1051
+ * add tiled inference support for ScuNET
1052
+ * add branch support for extension installation
1053
+ * change Linux installation script to install into current directory rather than `/home/username`
1054
+ * sort textual inversion embeddings by name (case-insensitive)
1055
+ * allow styles.csv to be symlinked or mounted in docker
1056
+ * remove the "do not add watermark to images" option
1057
+ * make selected tab configurable with UI config
1058
+ * make the extra networks UI fixed height and scrollable
1059
+ * add `disable_tls_verify` arg for use with self-signed certs
1060
+
1061
+ ### Extensions:
1062
+ * add reload callback
1063
+ * add `is_hr_pass` field for processing
1064
+
1065
+ ### Bug Fixes:
1066
+ * fix broken batch image processing on 'Extras/Batch Process' tab
1067
+ * add "None" option to extra networks dropdowns
1068
+ * fix FileExistsError for CLIP Interrogator
1069
+ * fix /sdapi/v1/txt2img endpoint not working on Linux #9319
1070
+ * fix disappearing live previews and progressbar during slow tasks
1071
+ * fix fullscreen image view not working properly in some cases
1072
+ * prevent alwayson_scripts args param resizing script_arg list when they are inserted in it
1073
+ * fix prompt schedule for second order samplers
1074
+ * fix image mask/composite for weird resolutions #9628
1075
+ * use correct images for previews when using AND (see #9491)
1076
+ * one broken image in img2img batch won't stop all processing
1077
+ * fix image orientation bug in train/preprocess
1078
+ * fix Ngrok recreating tunnels every reload
1079
+ * fix `--realesrgan-models-path` and `--ldsr-models-path` not working
1080
+ * fix `--skip-install` not working
1081
+ * use SAMPLE file format in Outpainting Mk2 & Poorman
1082
+ * do not fail all LoRAs if some have failed to load when making a picture
1083
+
1084
+ ## 1.0.0
1085
+ * everything
CITATION.cff ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you use this software, please cite it as below."
3
+ authors:
4
+ - given-names: AUTOMATIC1111
5
+ title: "Stable Diffusion Web UI"
6
+ date-released: 2022-08-22
7
+ url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ * @lllyasviel
LICENSE.txt ADDED
@@ -0,0 +1,688 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (c) 2023 AUTOMATIC1111
5
+
6
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
7
+ Everyone is permitted to copy and distribute verbatim copies
8
+ of this license document, but changing it is not allowed.
9
+
10
+ Preamble
11
+
12
+ The GNU Affero General Public License is a free, copyleft license for
13
+ software and other kinds of works, specifically designed to ensure
14
+ cooperation with the community in the case of network server software.
15
+
16
+ The licenses for most software and other practical works are designed
17
+ to take away your freedom to share and change the works. By contrast,
18
+ our General Public Licenses are intended to guarantee your freedom to
19
+ share and change all versions of a program--to make sure it remains free
20
+ software for all its users.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ Developers that use our General Public Licenses protect your rights
30
+ with two steps: (1) assert copyright on the software, and (2) offer
31
+ you this License which gives you legal permission to copy, distribute
32
+ and/or modify the software.
33
+
34
+ A secondary benefit of defending all users' freedom is that
35
+ improvements made in alternate versions of the program, if they
36
+ receive widespread use, become available for other developers to
37
+ incorporate. Many developers of free software are heartened and
38
+ encouraged by the resulting cooperation. However, in the case of
39
+ software used on network servers, this result may fail to come about.
40
+ The GNU General Public License permits making a modified version and
41
+ letting the public access it on a server without ever releasing its
42
+ source code to the public.
43
+
44
+ The GNU Affero General Public License is designed specifically to
45
+ ensure that, in such cases, the modified source code becomes available
46
+ to the community. It requires the operator of a network server to
47
+ provide the source code of the modified version running there to the
48
+ users of that server. Therefore, public use of a modified version, on
49
+ a publicly accessible server, gives the public access to the source
50
+ code of the modified version.
51
+
52
+ An older license, called the Affero General Public License and
53
+ published by Affero, was designed to accomplish similar goals. This is
54
+ a different license, not a version of the Affero GPL, but Affero has
55
+ released a new version of the Affero GPL which permits relicensing under
56
+ this license.
57
+
58
+ The precise terms and conditions for copying, distribution and
59
+ modification follow.
60
+
61
+ TERMS AND CONDITIONS
62
+
63
+ 0. Definitions.
64
+
65
+ "This License" refers to version 3 of the GNU Affero General Public License.
66
+
67
+ "Copyright" also means copyright-like laws that apply to other kinds of
68
+ works, such as semiconductor masks.
69
+
70
+ "The Program" refers to any copyrightable work licensed under this
71
+ License. Each licensee is addressed as "you". "Licensees" and
72
+ "recipients" may be individuals or organizations.
73
+
74
+ To "modify" a work means to copy from or adapt all or part of the work
75
+ in a fashion requiring copyright permission, other than the making of an
76
+ exact copy. The resulting work is called a "modified version" of the
77
+ earlier work or a work "based on" the earlier work.
78
+
79
+ A "covered work" means either the unmodified Program or a work based
80
+ on the Program.
81
+
82
+ To "propagate" a work means to do anything with it that, without
83
+ permission, would make you directly or secondarily liable for
84
+ infringement under applicable copyright law, except executing it on a
85
+ computer or modifying a private copy. Propagation includes copying,
86
+ distribution (with or without modification), making available to the
87
+ public, and in some countries other activities as well.
88
+
89
+ To "convey" a work means any kind of propagation that enables other
90
+ parties to make or receive copies. Mere interaction with a user through
91
+ a computer network, with no transfer of a copy, is not conveying.
92
+
93
+ An interactive user interface displays "Appropriate Legal Notices"
94
+ to the extent that it includes a convenient and prominently visible
95
+ feature that (1) displays an appropriate copyright notice, and (2)
96
+ tells the user that there is no warranty for the work (except to the
97
+ extent that warranties are provided), that licensees may convey the
98
+ work under this License, and how to view a copy of this License. If
99
+ the interface presents a list of user commands or options, such as a
100
+ menu, a prominent item in the list meets this criterion.
101
+
102
+ 1. Source Code.
103
+
104
+ The "source code" for a work means the preferred form of the work
105
+ for making modifications to it. "Object code" means any non-source
106
+ form of a work.
107
+
108
+ A "Standard Interface" means an interface that either is an official
109
+ standard defined by a recognized standards body, or, in the case of
110
+ interfaces specified for a particular programming language, one that
111
+ is widely used among developers working in that language.
112
+
113
+ The "System Libraries" of an executable work include anything, other
114
+ than the work as a whole, that (a) is included in the normal form of
115
+ packaging a Major Component, but which is not part of that Major
116
+ Component, and (b) serves only to enable use of the work with that
117
+ Major Component, or to implement a Standard Interface for which an
118
+ implementation is available to the public in source code form. A
119
+ "Major Component", in this context, means a major essential component
120
+ (kernel, window system, and so on) of the specific operating system
121
+ (if any) on which the executable work runs, or a compiler used to
122
+ produce the work, or an object code interpreter used to run it.
123
+
124
+ The "Corresponding Source" for a work in object code form means all
125
+ the source code needed to generate, install, and (for an executable
126
+ work) run the object code and to modify the work, including scripts to
127
+ control those activities. However, it does not include the work's
128
+ System Libraries, or general-purpose tools or generally available free
129
+ programs which are used unmodified in performing those activities but
130
+ which are not part of the work. For example, Corresponding Source
131
+ includes interface definition files associated with source files for
132
+ the work, and the source code for shared libraries and dynamically
133
+ linked subprograms that the work is specifically designed to require,
134
+ such as by intimate data communication or control flow between those
135
+ subprograms and other parts of the work.
136
+
137
+ The Corresponding Source need not include anything that users
138
+ can regenerate automatically from other parts of the Corresponding
139
+ Source.
140
+
141
+ The Corresponding Source for a work in source code form is that
142
+ same work.
143
+
144
+ 2. Basic Permissions.
145
+
146
+ All rights granted under this License are granted for the term of
147
+ copyright on the Program, and are irrevocable provided the stated
148
+ conditions are met. This License explicitly affirms your unlimited
149
+ permission to run the unmodified Program. The output from running a
150
+ covered work is covered by this License only if the output, given its
151
+ content, constitutes a covered work. This License acknowledges your
152
+ rights of fair use or other equivalent, as provided by copyright law.
153
+
154
+ You may make, run and propagate covered works that you do not
155
+ convey, without conditions so long as your license otherwise remains
156
+ in force. You may convey covered works to others for the sole purpose
157
+ of having them make modifications exclusively for you, or provide you
158
+ with facilities for running those works, provided that you comply with
159
+ the terms of this License in conveying all material for which you do
160
+ not control copyright. Those thus making or running the covered works
161
+ for you must do so exclusively on your behalf, under your direction
162
+ and control, on terms that prohibit them from making any copies of
163
+ your copyrighted material outside their relationship with you.
164
+
165
+ Conveying under any other circumstances is permitted solely under
166
+ the conditions stated below. Sublicensing is not allowed; section 10
167
+ makes it unnecessary.
168
+
169
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
170
+
171
+ No covered work shall be deemed part of an effective technological
172
+ measure under any applicable law fulfilling obligations under article
173
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
174
+ similar laws prohibiting or restricting circumvention of such
175
+ measures.
176
+
177
+ When you convey a covered work, you waive any legal power to forbid
178
+ circumvention of technological measures to the extent such circumvention
179
+ is effected by exercising rights under this License with respect to
180
+ the covered work, and you disclaim any intention to limit operation or
181
+ modification of the work as a means of enforcing, against the work's
182
+ users, your or third parties' legal rights to forbid circumvention of
183
+ technological measures.
184
+
185
+ 4. Conveying Verbatim Copies.
186
+
187
+ You may convey verbatim copies of the Program's source code as you
188
+ receive it, in any medium, provided that you conspicuously and
189
+ appropriately publish on each copy an appropriate copyright notice;
190
+ keep intact all notices stating that this License and any
191
+ non-permissive terms added in accord with section 7 apply to the code;
192
+ keep intact all notices of the absence of any warranty; and give all
193
+ recipients a copy of this License along with the Program.
194
+
195
+ You may charge any price or no price for each copy that you convey,
196
+ and you may offer support or warranty protection for a fee.
197
+
198
+ 5. Conveying Modified Source Versions.
199
+
200
+ You may convey a work based on the Program, or the modifications to
201
+ produce it from the Program, in the form of source code under the
202
+ terms of section 4, provided that you also meet all of these conditions:
203
+
204
+ a) The work must carry prominent notices stating that you modified
205
+ it, and giving a relevant date.
206
+
207
+ b) The work must carry prominent notices stating that it is
208
+ released under this License and any conditions added under section
209
+ 7. This requirement modifies the requirement in section 4 to
210
+ "keep intact all notices".
211
+
212
+ c) You must license the entire work, as a whole, under this
213
+ License to anyone who comes into possession of a copy. This
214
+ License will therefore apply, along with any applicable section 7
215
+ additional terms, to the whole of the work, and all its parts,
216
+ regardless of how they are packaged. This License gives no
217
+ permission to license the work in any other way, but it does not
218
+ invalidate such permission if you have separately received it.
219
+
220
+ d) If the work has interactive user interfaces, each must display
221
+ Appropriate Legal Notices; however, if the Program has interactive
222
+ interfaces that do not display Appropriate Legal Notices, your
223
+ work need not make them do so.
224
+
225
+ A compilation of a covered work with other separate and independent
226
+ works, which are not by their nature extensions of the covered work,
227
+ and which are not combined with it such as to form a larger program,
228
+ in or on a volume of a storage or distribution medium, is called an
229
+ "aggregate" if the compilation and its resulting copyright are not
230
+ used to limit the access or legal rights of the compilation's users
231
+ beyond what the individual works permit. Inclusion of a covered work
232
+ in an aggregate does not cause this License to apply to the other
233
+ parts of the aggregate.
234
+
235
+ 6. Conveying Non-Source Forms.
236
+
237
+ You may convey a covered work in object code form under the terms
238
+ of sections 4 and 5, provided that you also convey the
239
+ machine-readable Corresponding Source under the terms of this License,
240
+ in one of these ways:
241
+
242
+ a) Convey the object code in, or embodied in, a physical product
243
+ (including a physical distribution medium), accompanied by the
244
+ Corresponding Source fixed on a durable physical medium
245
+ customarily used for software interchange.
246
+
247
+ b) Convey the object code in, or embodied in, a physical product
248
+ (including a physical distribution medium), accompanied by a
249
+ written offer, valid for at least three years and valid for as
250
+ long as you offer spare parts or customer support for that product
251
+ model, to give anyone who possesses the object code either (1) a
252
+ copy of the Corresponding Source for all the software in the
253
+ product that is covered by this License, on a durable physical
254
+ medium customarily used for software interchange, for a price no
255
+ more than your reasonable cost of physically performing this
256
+ conveying of source, or (2) access to copy the
257
+ Corresponding Source from a network server at no charge.
258
+
259
+ c) Convey individual copies of the object code with a copy of the
260
+ written offer to provide the Corresponding Source. This
261
+ alternative is allowed only occasionally and noncommercially, and
262
+ only if you received the object code with such an offer, in accord
263
+ with subsection 6b.
264
+
265
+ d) Convey the object code by offering access from a designated
266
+ place (gratis or for a charge), and offer equivalent access to the
267
+ Corresponding Source in the same way through the same place at no
268
+ further charge. You need not require recipients to copy the
269
+ Corresponding Source along with the object code. If the place to
270
+ copy the object code is a network server, the Corresponding Source
271
+ may be on a different server (operated by you or a third party)
272
+ that supports equivalent copying facilities, provided you maintain
273
+ clear directions next to the object code saying where to find the
274
+ Corresponding Source. Regardless of what server hosts the
275
+ Corresponding Source, you remain obligated to ensure that it is
276
+ available for as long as needed to satisfy these requirements.
277
+
278
+ e) Convey the object code using peer-to-peer transmission, provided
279
+ you inform other peers where the object code and Corresponding
280
+ Source of the work are being offered to the general public at no
281
+ charge under subsection 6d.
282
+
283
+ A separable portion of the object code, whose source code is excluded
284
+ from the Corresponding Source as a System Library, need not be
285
+ included in conveying the object code work.
286
+
287
+ A "User Product" is either (1) a "consumer product", which means any
288
+ tangible personal property which is normally used for personal, family,
289
+ or household purposes, or (2) anything designed or sold for incorporation
290
+ into a dwelling. In determining whether a product is a consumer product,
291
+ doubtful cases shall be resolved in favor of coverage. For a particular
292
+ product received by a particular user, "normally used" refers to a
293
+ typical or common use of that class of product, regardless of the status
294
+ of the particular user or of the way in which the particular user
295
+ actually uses, or expects or is expected to use, the product. A product
296
+ is a consumer product regardless of whether the product has substantial
297
+ commercial, industrial or non-consumer uses, unless such uses represent
298
+ the only significant mode of use of the product.
299
+
300
+ "Installation Information" for a User Product means any methods,
301
+ procedures, authorization keys, or other information required to install
302
+ and execute modified versions of a covered work in that User Product from
303
+ a modified version of its Corresponding Source. The information must
304
+ suffice to ensure that the continued functioning of the modified object
305
+ code is in no case prevented or interfered with solely because
306
+ modification has been made.
307
+
308
+ If you convey an object code work under this section in, or with, or
309
+ specifically for use in, a User Product, and the conveying occurs as
310
+ part of a transaction in which the right of possession and use of the
311
+ User Product is transferred to the recipient in perpetuity or for a
312
+ fixed term (regardless of how the transaction is characterized), the
313
+ Corresponding Source conveyed under this section must be accompanied
314
+ by the Installation Information. But this requirement does not apply
315
+ if neither you nor any third party retains the ability to install
316
+ modified object code on the User Product (for example, the work has
317
+ been installed in ROM).
318
+
319
+ The requirement to provide Installation Information does not include a
320
+ requirement to continue to provide support service, warranty, or updates
321
+ for a work that has been modified or installed by the recipient, or for
322
+ the User Product in which it has been modified or installed. Access to a
323
+ network may be denied when the modification itself materially and
324
+ adversely affects the operation of the network or violates the rules and
325
+ protocols for communication across the network.
326
+
327
+ Corresponding Source conveyed, and Installation Information provided,
328
+ in accord with this section must be in a format that is publicly
329
+ documented (and with an implementation available to the public in
330
+ source code form), and must require no special password or key for
331
+ unpacking, reading or copying.
332
+
333
+ 7. Additional Terms.
334
+
335
+ "Additional permissions" are terms that supplement the terms of this
336
+ License by making exceptions from one or more of its conditions.
337
+ Additional permissions that are applicable to the entire Program shall
338
+ be treated as though they were included in this License, to the extent
339
+ that they are valid under applicable law. If additional permissions
340
+ apply only to part of the Program, that part may be used separately
341
+ under those permissions, but the entire Program remains governed by
342
+ this License without regard to the additional permissions.
343
+
344
+ When you convey a copy of a covered work, you may at your option
345
+ remove any additional permissions from that copy, or from any part of
346
+ it. (Additional permissions may be written to require their own
347
+ removal in certain cases when you modify the work.) You may place
348
+ additional permissions on material, added by you to a covered work,
349
+ for which you have or can give appropriate copyright permission.
350
+
351
+ Notwithstanding any other provision of this License, for material you
352
+ add to a covered work, you may (if authorized by the copyright holders of
353
+ that material) supplement the terms of this License with terms:
354
+
355
+ a) Disclaiming warranty or limiting liability differently from the
356
+ terms of sections 15 and 16 of this License; or
357
+
358
+ b) Requiring preservation of specified reasonable legal notices or
359
+ author attributions in that material or in the Appropriate Legal
360
+ Notices displayed by works containing it; or
361
+
362
+ c) Prohibiting misrepresentation of the origin of that material, or
363
+ requiring that modified versions of such material be marked in
364
+ reasonable ways as different from the original version; or
365
+
366
+ d) Limiting the use for publicity purposes of names of licensors or
367
+ authors of the material; or
368
+
369
+ e) Declining to grant rights under trademark law for use of some
370
+ trade names, trademarks, or service marks; or
371
+
372
+ f) Requiring indemnification of licensors and authors of that
373
+ material by anyone who conveys the material (or modified versions of
374
+ it) with contractual assumptions of liability to the recipient, for
375
+ any liability that these contractual assumptions directly impose on
376
+ those licensors and authors.
377
+
378
+ All other non-permissive additional terms are considered "further
379
+ restrictions" within the meaning of section 10. If the Program as you
380
+ received it, or any part of it, contains a notice stating that it is
381
+ governed by this License along with a term that is a further
382
+ restriction, you may remove that term. If a license document contains
383
+ a further restriction but permits relicensing or conveying under this
384
+ License, you may add to a covered work material governed by the terms
385
+ of that license document, provided that the further restriction does
386
+ not survive such relicensing or conveying.
387
+
388
+ If you add terms to a covered work in accord with this section, you
389
+ must place, in the relevant source files, a statement of the
390
+ additional terms that apply to those files, or a notice indicating
391
+ where to find the applicable terms.
392
+
393
+ Additional terms, permissive or non-permissive, may be stated in the
394
+ form of a separately written license, or stated as exceptions;
395
+ the above requirements apply either way.
396
+
397
+ 8. Termination.
398
+
399
+ You may not propagate or modify a covered work except as expressly
400
+ provided under this License. Any attempt otherwise to propagate or
401
+ modify it is void, and will automatically terminate your rights under
402
+ this License (including any patent licenses granted under the third
403
+ paragraph of section 11).
404
+
405
+ However, if you cease all violation of this License, then your
406
+ license from a particular copyright holder is reinstated (a)
407
+ provisionally, unless and until the copyright holder explicitly and
408
+ finally terminates your license, and (b) permanently, if the copyright
409
+ holder fails to notify you of the violation by some reasonable means
410
+ prior to 60 days after the cessation.
411
+
412
+ Moreover, your license from a particular copyright holder is
413
+ reinstated permanently if the copyright holder notifies you of the
414
+ violation by some reasonable means, this is the first time you have
415
+ received notice of violation of this License (for any work) from that
416
+ copyright holder, and you cure the violation prior to 30 days after
417
+ your receipt of the notice.
418
+
419
+ Termination of your rights under this section does not terminate the
420
+ licenses of parties who have received copies or rights from you under
421
+ this License. If your rights have been terminated and not permanently
422
+ reinstated, you do not qualify to receive new licenses for the same
423
+ material under section 10.
424
+
425
+ 9. Acceptance Not Required for Having Copies.
426
+
427
+ You are not required to accept this License in order to receive or
428
+ run a copy of the Program. Ancillary propagation of a covered work
429
+ occurring solely as a consequence of using peer-to-peer transmission
430
+ to receive a copy likewise does not require acceptance. However,
431
+ nothing other than this License grants you permission to propagate or
432
+ modify any covered work. These actions infringe copyright if you do
433
+ not accept this License. Therefore, by modifying or propagating a
434
+ covered work, you indicate your acceptance of this License to do so.
435
+
436
+ 10. Automatic Licensing of Downstream Recipients.
437
+
438
+ Each time you convey a covered work, the recipient automatically
439
+ receives a license from the original licensors, to run, modify and
440
+ propagate that work, subject to this License. You are not responsible
441
+ for enforcing compliance by third parties with this License.
442
+
443
+ An "entity transaction" is a transaction transferring control of an
444
+ organization, or substantially all assets of one, or subdividing an
445
+ organization, or merging organizations. If propagation of a covered
446
+ work results from an entity transaction, each party to that
447
+ transaction who receives a copy of the work also receives whatever
448
+ licenses to the work the party's predecessor in interest had or could
449
+ give under the previous paragraph, plus a right to possession of the
450
+ Corresponding Source of the work from the predecessor in interest, if
451
+ the predecessor has it or can get it with reasonable efforts.
452
+
453
+ You may not impose any further restrictions on the exercise of the
454
+ rights granted or affirmed under this License. For example, you may
455
+ not impose a license fee, royalty, or other charge for exercise of
456
+ rights granted under this License, and you may not initiate litigation
457
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
458
+ any patent claim is infringed by making, using, selling, offering for
459
+ sale, or importing the Program or any portion of it.
460
+
461
+ 11. Patents.
462
+
463
+ A "contributor" is a copyright holder who authorizes use under this
464
+ License of the Program or a work on which the Program is based. The
465
+ work thus licensed is called the contributor's "contributor version".
466
+
467
+ A contributor's "essential patent claims" are all patent claims
468
+ owned or controlled by the contributor, whether already acquired or
469
+ hereafter acquired, that would be infringed by some manner, permitted
470
+ by this License, of making, using, or selling its contributor version,
471
+ but do not include claims that would be infringed only as a
472
+ consequence of further modification of the contributor version. For
473
+ purposes of this definition, "control" includes the right to grant
474
+ patent sublicenses in a manner consistent with the requirements of
475
+ this License.
476
+
477
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
478
+ patent license under the contributor's essential patent claims, to
479
+ make, use, sell, offer for sale, import and otherwise run, modify and
480
+ propagate the contents of its contributor version.
481
+
482
+ In the following three paragraphs, a "patent license" is any express
483
+ agreement or commitment, however denominated, not to enforce a patent
484
+ (such as an express permission to practice a patent or covenant not to
485
+ sue for patent infringement). To "grant" such a patent license to a
486
+ party means to make such an agreement or commitment not to enforce a
487
+ patent against the party.
488
+
489
+ If you convey a covered work, knowingly relying on a patent license,
490
+ and the Corresponding Source of the work is not available for anyone
491
+ to copy, free of charge and under the terms of this License, through a
492
+ publicly available network server or other readily accessible means,
493
+ then you must either (1) cause the Corresponding Source to be so
494
+ available, or (2) arrange to deprive yourself of the benefit of the
495
+ patent license for this particular work, or (3) arrange, in a manner
496
+ consistent with the requirements of this License, to extend the patent
497
+ license to downstream recipients. "Knowingly relying" means you have
498
+ actual knowledge that, but for the patent license, your conveying the
499
+ covered work in a country, or your recipient's use of the covered work
500
+ in a country, would infringe one or more identifiable patents in that
501
+ country that you have reason to believe are valid.
502
+
503
+ If, pursuant to or in connection with a single transaction or
504
+ arrangement, you convey, or propagate by procuring conveyance of, a
505
+ covered work, and grant a patent license to some of the parties
506
+ receiving the covered work authorizing them to use, propagate, modify
507
+ or convey a specific copy of the covered work, then the patent license
508
+ you grant is automatically extended to all recipients of the covered
509
+ work and works based on it.
510
+
511
+ A patent license is "discriminatory" if it does not include within
512
+ the scope of its coverage, prohibits the exercise of, or is
513
+ conditioned on the non-exercise of one or more of the rights that are
514
+ specifically granted under this License. You may not convey a covered
515
+ work if you are a party to an arrangement with a third party that is
516
+ in the business of distributing software, under which you make payment
517
+ to the third party based on the extent of your activity of conveying
518
+ the work, and under which the third party grants, to any of the
519
+ parties who would receive the covered work from you, a discriminatory
520
+ patent license (a) in connection with copies of the covered work
521
+ conveyed by you (or copies made from those copies), or (b) primarily
522
+ for and in connection with specific products or compilations that
523
+ contain the covered work, unless you entered into that arrangement,
524
+ or that patent license was granted, prior to 28 March 2007.
525
+
526
+ Nothing in this License shall be construed as excluding or limiting
527
+ any implied license or other defenses to infringement that may
528
+ otherwise be available to you under applicable patent law.
529
+
530
+ 12. No Surrender of Others' Freedom.
531
+
532
+ If conditions are imposed on you (whether by court order, agreement or
533
+ otherwise) that contradict the conditions of this License, they do not
534
+ excuse you from the conditions of this License. If you cannot convey a
535
+ covered work so as to satisfy simultaneously your obligations under this
536
+ License and any other pertinent obligations, then as a consequence you may
537
+ not convey it at all. For example, if you agree to terms that obligate you
538
+ to collect a royalty for further conveying from those to whom you convey
539
+ the Program, the only way you could satisfy both those terms and this
540
+ License would be to refrain entirely from conveying the Program.
541
+
542
+ 13. Remote Network Interaction; Use with the GNU General Public License.
543
+
544
+ Notwithstanding any other provision of this License, if you modify the
545
+ Program, your modified version must prominently offer all users
546
+ interacting with it remotely through a computer network (if your version
547
+ supports such interaction) an opportunity to receive the Corresponding
548
+ Source of your version by providing access to the Corresponding Source
549
+ from a network server at no charge, through some standard or customary
550
+ means of facilitating copying of software. This Corresponding Source
551
+ shall include the Corresponding Source for any work covered by version 3
552
+ of the GNU General Public License that is incorporated pursuant to the
553
+ following paragraph.
554
+
555
+ Notwithstanding any other provision of this License, you have
556
+ permission to link or combine any covered work with a work licensed
557
+ under version 3 of the GNU General Public License into a single
558
+ combined work, and to convey the resulting work. The terms of this
559
+ License will continue to apply to the part which is the covered work,
560
+ but the work with which it is combined will remain governed by version
561
+ 3 of the GNU General Public License.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU Affero General Public License from time to time. Such new versions
567
+ will be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU Affero General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU Affero General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU Affero General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU Affero General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU Affero General Public License for more details.
646
+
647
+ You should have received a copy of the GNU Affero General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If your software can interact with users remotely through a computer
653
+ network, you should also make sure that it provides a way for users to
654
+ get its source. For example, if your program is a web application, its
655
+ interface could display a "Source" link that leads users to an archive
656
+ of the code. There are many ways you could offer source, and different
657
+ solutions will be better for different programs; see section 13 for the
658
+ specific requirements.
659
+
660
+ You should also get your employer (if you work as a programmer) or school,
661
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
662
+ For more information on this, and how to apply and follow the GNU AGPL, see
663
+ <https://www.gnu.org/licenses/>.
664
+
665
+
666
+ ---------------------------------Facebook BNB-------------------------------
667
+
668
+ MIT License
669
+
670
+ Copyright (c) Facebook, Inc. and its affiliates.
671
+
672
+ Permission is hereby granted, free of charge, to any person obtaining a copy
673
+ of this software and associated documentation files (the "Software"), to deal
674
+ in the Software without restriction, including without limitation the rights
675
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
676
+ copies of the Software, and to permit persons to whom the Software is
677
+ furnished to do so, subject to the following conditions:
678
+
679
+ The above copyright notice and this permission notice shall be included in all
680
+ copies or substantial portions of the Software.
681
+
682
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
683
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
684
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
685
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
686
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
687
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
688
+ SOFTWARE.
README.md CHANGED
@@ -1,12 +1,203 @@
1
  ---
2
- title: Stable Diffusion Webui Forge
3
- emoji: 📉
4
- colorFrom: red
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
- app_file: app.py
9
- pinned: false
10
  ---
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: stable-diffusion-webui-forge
3
+ app_file: spaces.py
 
 
4
  sdk: gradio
5
+ sdk_version: 4.40.0
 
 
6
  ---
7
+ # Stable Diffusion WebUI Forge
8
 
9
+ Stable Diffusion WebUI Forge is a platform on top of [Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (based on [Gradio](https://www.gradio.app/) <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a>) to make development easier, optimize resource management, speed up inference, and study experimental features.
10
+
11
+ The name "Forge" is inspired from "Minecraft Forge". This project is aimed at becoming SD WebUI's Forge.
12
+
13
+ Forge is currently based on SD-WebUI 1.10.1 at [this commit](https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/82a973c04367123ae98bd9abdf80d9eda9b910e2). (Because original SD-WebUI is almost static now, Forge will sync with original WebUI every 90 days, or when important fixes.)
14
+
15
+ # Quick List
16
+
17
+ [Gradio 4 UI Must Read (TLDR: You need to use RIGHT MOUSE BUTTON to move canvas!)](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/853)
18
+
19
+ [Flux Tutorial (BitsandBytes Models, NF4, VRAM management UI, etc)](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/981)
20
+
21
+ [Flux Tutorial 2 (Seperated Full Models, GGUF, Technically Correct Comparison between GGUF and NF4, etc)](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/1050)
22
+
23
+ [(Save Flux BitsandBytes UNet/Checkpoint)](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/1224#discussioncomment-10384104)
24
+
25
+ [LayerDiffuse Transparent Image Editing](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/854)
26
+
27
+ [(Policy) Soft Advertisement Removal Policy](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/1286)
28
+
29
+ (Flux BNB NF4 / GGUF Q8_0/Q5_0/Q5_1/Q4_0/Q4_1 are all natively supported with GPU weight slider and Quene/Async Swap toggle and swap location toggle. All Flux BNB NF4 / GGUF Q8_0/Q5_0/Q4_0 have LoRA support.)
30
+
31
+ # Installing Forge
32
+
33
+ **Just use this one-click installation package (with git and python included).**
34
+
35
+ [>>> Click Here to Download One-Click Package (CUDA 12.1 + Pytorch 2.3.1) <<<](https://github.com/lllyasviel/stable-diffusion-webui-forge/releases/download/latest/webui_forge_cu121_torch231.7z)
36
+
37
+ Some other CUDA/Torch Versions:
38
+
39
+ [Forge with CUDA 12.1 + Pytorch 2.3.1](https://github.com/lllyasviel/stable-diffusion-webui-forge/releases/download/latest/webui_forge_cu121_torch231.7z) <- **Recommended**
40
+
41
+ [Forge with CUDA 12.4 + Pytorch 2.4](https://github.com/lllyasviel/stable-diffusion-webui-forge/releases/download/latest/webui_forge_cu124_torch24.7z) <- **Fastest**, but MSVC may be broken, xformers may not work
42
+
43
+ [Forge with CUDA 12.1 + Pytorch 2.1](https://github.com/lllyasviel/stable-diffusion-webui-forge/releases/download/latest/webui_forge_cu121_torch21.7z) <- the previously used old environments
44
+
45
+ After you download, you uncompress, use `update.bat` to update, and use `run.bat` to run.
46
+
47
+ Note that running `update.bat` is important, otherwise you may be using a previous version with potential bugs unfixed.
48
+
49
+ ![image](https://github.com/lllyasviel/stable-diffusion-webui-forge/assets/19834515/c49bd60d-82bd-4086-9859-88d472582b94)
50
+
51
+ ### Advanced Install
52
+
53
+ If you are proficient in Git and you want to install Forge as another branch of SD-WebUI, please see [here](https://github.com/continue-revolution/sd-webui-animatediff/blob/forge/master/docs/how-to-use.md#you-have-a1111-and-you-know-git). In this way, you can reuse all SD checkpoints and all extensions you installed previously in your OG SD-WebUI, but you should know what you are doing.
54
+
55
+ If you know what you are doing, you can also install Forge using same method as SD-WebUI. (Install Git, Python, Git Clone the forge repo `https://github.com/lllyasviel/stable-diffusion-webui-forge.git` and then run webui-user.bat).
56
+
57
+ ### Previous Versions
58
+
59
+ You can download previous versions [here](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/849).
60
+
61
+ # Forge Status
62
+
63
+ Based on manual test one-by-one:
64
+
65
+ | Component | Status | Last Test |
66
+ |---------------------------------------------------|---------|--------------|
67
+ | Basic Diffusion | Normal | 2024 July 27 |
68
+ | GPU Memory Management System | Normal | 2024 July 27 |
69
+ | LoRAs | Normal | 2024 July 27 |
70
+ | All Preprocessors | Normal | 2024 July 27 |
71
+ | All ControlNets | Normal | 2024 July 27 |
72
+ | All IP-Adapters | Normal | 2024 July 27 |
73
+ | All Instant-IDs | Normal | 2024 July 27 |
74
+ | All Reference-only Methods | Normal | 2024 July 27 |
75
+ | All Integrated Extensions | Normal | 2024 July 27 |
76
+ | Popular Extensions (Adetailer, etc) | Normal | 2024 July 27 |
77
+ | Gradio 4 UIs | Normal | 2024 July 27 |
78
+ | Gradio 4 Forge Canvas | Normal | 2024 July 27 |
79
+ | LoRA/Checkpoint Selection UI for Gradio 4 | Normal | 2024 July 27 |
80
+ | Photopea/OpenposeEditor/etc for ControlNet | Normal | 2024 July 27 |
81
+ | Wacom 128 level touch pressure support for Canvas | Normal | 2024 July 15 |
82
+ | Microsoft Surface touch pressure support for Canvas | Broken, pending fix | 2024 July 29 |
83
+
84
+ Feel free to open issue if anything is broken and I will take a look every several days. If I do not update this "Forge Status" then it means I cannot reproduce any problem. In that case, fresh re-install should help most.
85
+
86
+ # UnetPatcher
87
+
88
+ Below are self-supported **single file** of all codes to implement FreeU V2.
89
+
90
+ See also `extension-builtin/sd_forge_freeu/scripts/forge_freeu.py`:
91
+
92
+ ```python
93
+ import torch
94
+ import gradio as gr
95
+
96
+ from modules import scripts
97
+
98
+
99
+ def Fourier_filter(x, threshold, scale):
100
+ # FFT
101
+ x_freq = torch.fft.fftn(x.float(), dim=(-2, -1))
102
+ x_freq = torch.fft.fftshift(x_freq, dim=(-2, -1))
103
+
104
+ B, C, H, W = x_freq.shape
105
+ mask = torch.ones((B, C, H, W), device=x.device)
106
+
107
+ crow, ccol = H // 2, W // 2
108
+ mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale
109
+ x_freq = x_freq * mask
110
+
111
+ # IFFT
112
+ x_freq = torch.fft.ifftshift(x_freq, dim=(-2, -1))
113
+ x_filtered = torch.fft.ifftn(x_freq, dim=(-2, -1)).real
114
+
115
+ return x_filtered.to(x.dtype)
116
+
117
+
118
+ def patch_freeu_v2(unet_patcher, b1, b2, s1, s2):
119
+ model_channels = unet_patcher.model.diffusion_model.config["model_channels"]
120
+ scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
121
+ on_cpu_devices = {}
122
+
123
+ def output_block_patch(h, hsp, transformer_options):
124
+ scale = scale_dict.get(h.shape[1], None)
125
+ if scale is not None:
126
+ hidden_mean = h.mean(1).unsqueeze(1)
127
+ B = hidden_mean.shape[0]
128
+ hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
129
+ hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
130
+ hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
131
+
132
+ h[:, :h.shape[1] // 2] = h[:, :h.shape[1] // 2] * ((scale[0] - 1) * hidden_mean + 1)
133
+
134
+ if hsp.device not in on_cpu_devices:
135
+ try:
136
+ hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
137
+ except:
138
+ print("Device", hsp.device, "does not support the torch.fft.")
139
+ on_cpu_devices[hsp.device] = True
140
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
141
+ else:
142
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
143
+
144
+ return h, hsp
145
+
146
+ m = unet_patcher.clone()
147
+ m.set_model_output_block_patch(output_block_patch)
148
+ return m
149
+
150
+
151
+ class FreeUForForge(scripts.Script):
152
+ sorting_priority = 12 # It will be the 12th item on UI.
153
+
154
+ def title(self):
155
+ return "FreeU Integrated"
156
+
157
+ def show(self, is_img2img):
158
+ # make this extension visible in both txt2img and img2img tab.
159
+ return scripts.AlwaysVisible
160
+
161
+ def ui(self, *args, **kwargs):
162
+ with gr.Accordion(open=False, label=self.title()):
163
+ freeu_enabled = gr.Checkbox(label='Enabled', value=False)
164
+ freeu_b1 = gr.Slider(label='B1', minimum=0, maximum=2, step=0.01, value=1.01)
165
+ freeu_b2 = gr.Slider(label='B2', minimum=0, maximum=2, step=0.01, value=1.02)
166
+ freeu_s1 = gr.Slider(label='S1', minimum=0, maximum=4, step=0.01, value=0.99)
167
+ freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95)
168
+
169
+ return freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2
170
+
171
+ def process_before_every_sampling(self, p, *script_args, **kwargs):
172
+ # This will be called before every sampling.
173
+ # If you use highres fix, this will be called twice.
174
+
175
+ freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2 = script_args
176
+
177
+ if not freeu_enabled:
178
+ return
179
+
180
+ unet = p.sd_model.forge_objects.unet
181
+
182
+ unet = patch_freeu_v2(unet, freeu_b1, freeu_b2, freeu_s1, freeu_s2)
183
+
184
+ p.sd_model.forge_objects.unet = unet
185
+
186
+ # Below codes will add some logs to the texts below the image outputs on UI.
187
+ # The extra_generation_params does not influence results.
188
+ p.extra_generation_params.update(dict(
189
+ freeu_enabled=freeu_enabled,
190
+ freeu_b1=freeu_b1,
191
+ freeu_b2=freeu_b2,
192
+ freeu_s1=freeu_s1,
193
+ freeu_s2=freeu_s2,
194
+ ))
195
+
196
+ return
197
+ ```
198
+
199
+ See also [Forge's Unet Implementation](https://github.com/lllyasviel/stable-diffusion-webui-forge/blob/main/backend/nn/unet.py).
200
+
201
+ # Under Construction
202
+
203
+ WebUI Forge is now under some constructions, and docs / UI / functionality may change with updates.
_typos.toml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [default.extend-words]
2
+ # Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
3
+ Ba = "Ba"
4
+ # HSA is something AMD uses for their GPUs
5
+ HSA = "HSA"
backend/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # WIP Backend for Forge
backend/args.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ parser = argparse.ArgumentParser()
4
+
5
+ parser.add_argument("--gpu-device-id", type=int, default=None, metavar="DEVICE_ID")
6
+
7
+ fp_group = parser.add_mutually_exclusive_group()
8
+ fp_group.add_argument("--all-in-fp32", action="store_true")
9
+ fp_group.add_argument("--all-in-fp16", action="store_true")
10
+
11
+ fpunet_group = parser.add_mutually_exclusive_group()
12
+ fpunet_group.add_argument("--unet-in-bf16", action="store_true")
13
+ fpunet_group.add_argument("--unet-in-fp16", action="store_true")
14
+ fpunet_group.add_argument("--unet-in-fp8-e4m3fn", action="store_true")
15
+ fpunet_group.add_argument("--unet-in-fp8-e5m2", action="store_true")
16
+
17
+ fpvae_group = parser.add_mutually_exclusive_group()
18
+ fpvae_group.add_argument("--vae-in-fp16", action="store_true")
19
+ fpvae_group.add_argument("--vae-in-fp32", action="store_true")
20
+ fpvae_group.add_argument("--vae-in-bf16", action="store_true")
21
+
22
+ parser.add_argument("--vae-in-cpu", action="store_true")
23
+
24
+ fpte_group = parser.add_mutually_exclusive_group()
25
+ fpte_group.add_argument("--clip-in-fp8-e4m3fn", action="store_true")
26
+ fpte_group.add_argument("--clip-in-fp8-e5m2", action="store_true")
27
+ fpte_group.add_argument("--clip-in-fp16", action="store_true")
28
+ fpte_group.add_argument("--clip-in-fp32", action="store_true")
29
+
30
+ attn_group = parser.add_mutually_exclusive_group()
31
+ attn_group.add_argument("--attention-split", action="store_true")
32
+ attn_group.add_argument("--attention-quad", action="store_true")
33
+ attn_group.add_argument("--attention-pytorch", action="store_true")
34
+
35
+ upcast = parser.add_mutually_exclusive_group()
36
+ upcast.add_argument("--force-upcast-attention", action="store_true")
37
+ upcast.add_argument("--disable-attention-upcast", action="store_true")
38
+
39
+ parser.add_argument("--disable-xformers", action="store_true")
40
+
41
+ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1)
42
+ parser.add_argument("--disable-ipex-hijack", action="store_true")
43
+
44
+ vram_group = parser.add_mutually_exclusive_group()
45
+ vram_group.add_argument("--always-gpu", action="store_true")
46
+ vram_group.add_argument("--always-high-vram", action="store_true")
47
+ vram_group.add_argument("--always-normal-vram", action="store_true")
48
+ vram_group.add_argument("--always-low-vram", action="store_true")
49
+ vram_group.add_argument("--always-no-vram", action="store_true")
50
+ vram_group.add_argument("--always-cpu", action="store_true")
51
+
52
+ parser.add_argument("--always-offload-from-vram", action="store_true")
53
+ parser.add_argument("--pytorch-deterministic", action="store_true")
54
+
55
+ parser.add_argument("--cuda-malloc", action="store_true")
56
+ parser.add_argument("--cuda-stream", action="store_true")
57
+ parser.add_argument("--pin-shared-memory", action="store_true")
58
+
59
+ parser.add_argument("--disable-gpu-warning", action="store_true")
60
+
61
+ args = parser.parse_known_args()[0]
62
+
63
+ # Some dynamic args that may be changed by webui rather than cmd flags.
64
+ dynamic_args = dict(
65
+ embedding_dir='./embeddings',
66
+ emphasis_name='original'
67
+ )
backend/attention.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import einops
4
+
5
+ from backend.args import args
6
+ from backend import memory_management
7
+ from backend.misc.sub_quadratic_attention import efficient_dot_product_attention
8
+
9
+
10
+ BROKEN_XFORMERS = False
11
+ if memory_management.xformers_enabled():
12
+ import xformers
13
+ import xformers.ops
14
+
15
+ try:
16
+ x_vers = xformers.__version__
17
+ BROKEN_XFORMERS = x_vers.startswith("0.0.2") and not x_vers.startswith("0.0.20")
18
+ except:
19
+ pass
20
+
21
+
22
+ FORCE_UPCAST_ATTENTION_DTYPE = memory_management.force_upcast_attention_dtype()
23
+
24
+
25
+ def get_attn_precision(attn_precision=torch.float32):
26
+ if args.disable_attention_upcast:
27
+ return None
28
+ if FORCE_UPCAST_ATTENTION_DTYPE is not None:
29
+ return FORCE_UPCAST_ATTENTION_DTYPE
30
+ return attn_precision
31
+
32
+
33
+ def exists(val):
34
+ return val is not None
35
+
36
+
37
+ def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
38
+ attn_precision = get_attn_precision(attn_precision)
39
+
40
+ if skip_reshape:
41
+ b, _, _, dim_head = q.shape
42
+ else:
43
+ b, _, dim_head = q.shape
44
+ dim_head //= heads
45
+
46
+ scale = dim_head ** -0.5
47
+
48
+ h = heads
49
+ if skip_reshape:
50
+ q, k, v = map(
51
+ lambda t: t.reshape(b * heads, -1, dim_head),
52
+ (q, k, v),
53
+ )
54
+ else:
55
+ q, k, v = map(
56
+ lambda t: t.unsqueeze(3)
57
+ .reshape(b, -1, heads, dim_head)
58
+ .permute(0, 2, 1, 3)
59
+ .reshape(b * heads, -1, dim_head)
60
+ .contiguous(),
61
+ (q, k, v),
62
+ )
63
+
64
+ if attn_precision == torch.float32:
65
+ sim = torch.einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
66
+ else:
67
+ sim = torch.einsum('b i d, b j d -> b i j', q, k) * scale
68
+
69
+ del q, k
70
+
71
+ if exists(mask):
72
+ if mask.dtype == torch.bool:
73
+ mask = einops.rearrange(mask, 'b ... -> b (...)')
74
+ max_neg_value = -torch.finfo(sim.dtype).max
75
+ mask = einops.repeat(mask, 'b j -> (b h) () j', h=h)
76
+ sim.masked_fill_(~mask, max_neg_value)
77
+ else:
78
+ if len(mask.shape) == 2:
79
+ bs = 1
80
+ else:
81
+ bs = mask.shape[0]
82
+ mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
83
+ sim.add_(mask)
84
+
85
+ sim = sim.softmax(dim=-1)
86
+ out = torch.einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
87
+ out = (
88
+ out.unsqueeze(0)
89
+ .reshape(b, heads, -1, dim_head)
90
+ .permute(0, 2, 1, 3)
91
+ .reshape(b, -1, heads * dim_head)
92
+ )
93
+ return out
94
+
95
+
96
+ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False):
97
+ attn_precision = get_attn_precision(attn_precision)
98
+
99
+ if skip_reshape:
100
+ b, _, _, dim_head = query.shape
101
+ else:
102
+ b, _, dim_head = query.shape
103
+ dim_head //= heads
104
+
105
+ scale = dim_head ** -0.5
106
+
107
+ if skip_reshape:
108
+ query = query.reshape(b * heads, -1, dim_head)
109
+ value = value.reshape(b * heads, -1, dim_head)
110
+ key = key.reshape(b * heads, -1, dim_head).movedim(1, 2)
111
+ else:
112
+ query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
113
+ value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
114
+ key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
115
+
116
+ dtype = query.dtype
117
+ upcast_attention = attn_precision == torch.float32 and query.dtype != torch.float32
118
+ if upcast_attention:
119
+ bytes_per_token = torch.finfo(torch.float32).bits // 8
120
+ else:
121
+ bytes_per_token = torch.finfo(query.dtype).bits // 8
122
+ batch_x_heads, q_tokens, _ = query.shape
123
+ _, _, k_tokens = key.shape
124
+ qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
125
+
126
+ mem_free_total, mem_free_torch = memory_management.get_free_memory(query.device, True)
127
+
128
+ kv_chunk_size_min = None
129
+ kv_chunk_size = None
130
+ query_chunk_size = None
131
+
132
+ for x in [4096, 2048, 1024, 512, 256]:
133
+ count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
134
+ if count >= k_tokens:
135
+ kv_chunk_size = k_tokens
136
+ query_chunk_size = x
137
+ break
138
+
139
+ if query_chunk_size is None:
140
+ query_chunk_size = 512
141
+
142
+ if mask is not None:
143
+ if len(mask.shape) == 2:
144
+ bs = 1
145
+ else:
146
+ bs = mask.shape[0]
147
+ mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
148
+
149
+ hidden_states = efficient_dot_product_attention(
150
+ query,
151
+ key,
152
+ value,
153
+ query_chunk_size=query_chunk_size,
154
+ kv_chunk_size=kv_chunk_size,
155
+ kv_chunk_size_min=kv_chunk_size_min,
156
+ use_checkpoint=False,
157
+ upcast_attention=upcast_attention,
158
+ mask=mask,
159
+ )
160
+
161
+ hidden_states = hidden_states.to(dtype)
162
+
163
+ hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1, 2).flatten(start_dim=2)
164
+ return hidden_states
165
+
166
+
167
+ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
168
+ attn_precision = get_attn_precision(attn_precision)
169
+
170
+ if skip_reshape:
171
+ b, _, _, dim_head = q.shape
172
+ else:
173
+ b, _, dim_head = q.shape
174
+ dim_head //= heads
175
+
176
+ scale = dim_head ** -0.5
177
+
178
+ h = heads
179
+ if skip_reshape:
180
+ q, k, v = map(
181
+ lambda t: t.reshape(b * heads, -1, dim_head),
182
+ (q, k, v),
183
+ )
184
+ else:
185
+ q, k, v = map(
186
+ lambda t: t.unsqueeze(3)
187
+ .reshape(b, -1, heads, dim_head)
188
+ .permute(0, 2, 1, 3)
189
+ .reshape(b * heads, -1, dim_head)
190
+ .contiguous(),
191
+ (q, k, v),
192
+ )
193
+
194
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
195
+
196
+ mem_free_total = memory_management.get_free_memory(q.device)
197
+
198
+ if attn_precision == torch.float32:
199
+ element_size = 4
200
+ upcast = True
201
+ else:
202
+ element_size = q.element_size()
203
+ upcast = False
204
+
205
+ gb = 1024 ** 3
206
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
207
+ modifier = 3
208
+ mem_required = tensor_size * modifier
209
+ steps = 1
210
+
211
+ if mem_required > mem_free_total:
212
+ steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
213
+ # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
214
+ # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
215
+
216
+ if steps > 64:
217
+ max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
218
+ raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
219
+ f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
220
+
221
+ if mask is not None:
222
+ if len(mask.shape) == 2:
223
+ bs = 1
224
+ else:
225
+ bs = mask.shape[0]
226
+ mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
227
+
228
+ # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
229
+ first_op_done = False
230
+ cleared_cache = False
231
+ while True:
232
+ try:
233
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
234
+ for i in range(0, q.shape[1], slice_size):
235
+ end = i + slice_size
236
+ if upcast:
237
+ with torch.autocast(enabled=False, device_type='cuda'):
238
+ s1 = torch.einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
239
+ else:
240
+ s1 = torch.einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale
241
+
242
+ if mask is not None:
243
+ if len(mask.shape) == 2:
244
+ s1 += mask[i:end]
245
+ else:
246
+ s1 += mask[:, i:end]
247
+
248
+ s2 = s1.softmax(dim=-1).to(v.dtype)
249
+ del s1
250
+ first_op_done = True
251
+
252
+ r1[:, i:end] = torch.einsum('b i j, b j d -> b i d', s2, v)
253
+ del s2
254
+ break
255
+ except memory_management.OOM_EXCEPTION as e:
256
+ if first_op_done == False:
257
+ memory_management.soft_empty_cache(True)
258
+ if cleared_cache == False:
259
+ cleared_cache = True
260
+ print("out of memory error, emptying cache and trying again")
261
+ continue
262
+ steps *= 2
263
+ if steps > 64:
264
+ raise e
265
+ print("out of memory error, increasing steps and trying again {}".format(steps))
266
+ else:
267
+ raise e
268
+
269
+ del q, k, v
270
+
271
+ r1 = (
272
+ r1.unsqueeze(0)
273
+ .reshape(b, heads, -1, dim_head)
274
+ .permute(0, 2, 1, 3)
275
+ .reshape(b, -1, heads * dim_head)
276
+ )
277
+ return r1
278
+
279
+
280
+ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
281
+ if skip_reshape:
282
+ b, _, _, dim_head = q.shape
283
+ else:
284
+ b, _, dim_head = q.shape
285
+ dim_head //= heads
286
+
287
+ if BROKEN_XFORMERS and b * heads > 65535:
288
+ return attention_pytorch(q, k, v, heads, mask, skip_reshape=skip_reshape)
289
+
290
+ if skip_reshape:
291
+ q, k, v = map(
292
+ lambda t: t.reshape(b * heads, -1, dim_head),
293
+ (q, k, v),
294
+ )
295
+ else:
296
+ q, k, v = map(
297
+ lambda t: t.reshape(b, -1, heads, dim_head),
298
+ (q, k, v),
299
+ )
300
+
301
+ if mask is not None:
302
+ pad = 8 - q.shape[1] % 8
303
+ mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
304
+ mask_out[:, :, :mask.shape[-1]] = mask
305
+ mask = mask_out[:, :, :mask.shape[-1]]
306
+
307
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
308
+
309
+ if skip_reshape:
310
+ out = (
311
+ out.unsqueeze(0)
312
+ .reshape(b, heads, -1, dim_head)
313
+ .permute(0, 2, 1, 3)
314
+ .reshape(b, -1, heads * dim_head)
315
+ )
316
+ else:
317
+ out = (
318
+ out.reshape(b, -1, heads * dim_head)
319
+ )
320
+
321
+ return out
322
+
323
+
324
+ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
325
+ if skip_reshape:
326
+ b, _, _, dim_head = q.shape
327
+ else:
328
+ b, _, dim_head = q.shape
329
+ dim_head //= heads
330
+ q, k, v = map(
331
+ lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
332
+ (q, k, v),
333
+ )
334
+
335
+ out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
336
+ out = (
337
+ out.transpose(1, 2).reshape(b, -1, heads * dim_head)
338
+ )
339
+ return out
340
+
341
+
342
+ def slice_attention_single_head_spatial(q, k, v):
343
+ r1 = torch.zeros_like(k, device=q.device)
344
+ scale = (int(q.shape[-1]) ** (-0.5))
345
+
346
+ mem_free_total = memory_management.get_free_memory(q.device)
347
+
348
+ gb = 1024 ** 3
349
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
350
+ modifier = 3 if q.element_size() == 2 else 2.5
351
+ mem_required = tensor_size * modifier
352
+ steps = 1
353
+
354
+ if mem_required > mem_free_total:
355
+ steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
356
+
357
+ while True:
358
+ try:
359
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
360
+ for i in range(0, q.shape[1], slice_size):
361
+ end = i + slice_size
362
+ s1 = torch.bmm(q[:, i:end], k) * scale
363
+
364
+ s2 = torch.nn.functional.softmax(s1, dim=2).permute(0, 2, 1)
365
+ del s1
366
+
367
+ r1[:, :, i:end] = torch.bmm(v, s2)
368
+ del s2
369
+ break
370
+ except memory_management.OOM_EXCEPTION as e:
371
+ memory_management.soft_empty_cache(True)
372
+ steps *= 2
373
+ if steps > 128:
374
+ raise e
375
+ print("out of memory error, increasing steps and trying again {}".format(steps))
376
+
377
+ return r1
378
+
379
+
380
+ def normal_attention_single_head_spatial(q, k, v):
381
+ # compute attention
382
+ b, c, h, w = q.shape
383
+
384
+ q = q.reshape(b, c, h * w)
385
+ q = q.permute(0, 2, 1) # b,hw,c
386
+ k = k.reshape(b, c, h * w) # b,c,hw
387
+ v = v.reshape(b, c, h * w)
388
+
389
+ r1 = slice_attention_single_head_spatial(q, k, v)
390
+ h_ = r1.reshape(b, c, h, w)
391
+ del r1
392
+ return h_
393
+
394
+
395
+ def xformers_attention_single_head_spatial(q, k, v):
396
+ # compute attention
397
+ B, C, H, W = q.shape
398
+ q, k, v = map(
399
+ lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(),
400
+ (q, k, v),
401
+ )
402
+
403
+ try:
404
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
405
+ out = out.transpose(1, 2).reshape(B, C, H, W)
406
+ except NotImplementedError as e:
407
+ out = slice_attention_single_head_spatial(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2),
408
+ v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
409
+ return out
410
+
411
+
412
+ def pytorch_attention_single_head_spatial(q, k, v):
413
+ # compute attention
414
+ B, C, H, W = q.shape
415
+ q, k, v = map(
416
+ lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(),
417
+ (q, k, v),
418
+ )
419
+
420
+ try:
421
+ out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
422
+ out = out.transpose(2, 3).reshape(B, C, H, W)
423
+ except memory_management.OOM_EXCEPTION as e:
424
+ print("scaled_dot_product_attention OOMed: switched to slice attention")
425
+ out = slice_attention_single_head_spatial(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2),
426
+ v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
427
+ return out
428
+
429
+
430
+ if memory_management.xformers_enabled():
431
+ print("Using xformers cross attention")
432
+ attention_function = attention_xformers
433
+ elif memory_management.pytorch_attention_enabled():
434
+ print("Using pytorch cross attention")
435
+ attention_function = attention_pytorch
436
+ elif args.attention_split:
437
+ print("Using split optimization for cross attention")
438
+ attention_function = attention_split
439
+ else:
440
+ print("Using sub quadratic optimization for cross attention")
441
+ attention_function = attention_sub_quad
442
+
443
+ if memory_management.xformers_enabled_vae():
444
+ print("Using xformers attention for VAE")
445
+ attention_function_single_head_spatial = xformers_attention_single_head_spatial
446
+ elif memory_management.pytorch_attention_enabled():
447
+ print("Using pytorch attention for VAE")
448
+ attention_function_single_head_spatial = pytorch_attention_single_head_spatial
449
+ else:
450
+ print("Using split attention for VAE")
451
+ attention_function_single_head_spatial = normal_attention_single_head_spatial
452
+
453
+
454
+ class AttentionProcessorForge:
455
+ def __call__(self, attn, hidden_states, encoder_hidden_states, attention_mask=None, temb=None, *args, **kwargs):
456
+ residual = hidden_states
457
+
458
+ if attn.spatial_norm is not None:
459
+ hidden_states = attn.spatial_norm(hidden_states, temb)
460
+
461
+ input_ndim = hidden_states.ndim
462
+
463
+ if input_ndim == 4:
464
+ batch_size, channel, height, width = hidden_states.shape
465
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
466
+
467
+ batch_size, sequence_length, _ = (
468
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
469
+ )
470
+
471
+ if attention_mask is not None:
472
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
473
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
474
+
475
+ if attn.group_norm is not None:
476
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
477
+
478
+ query = attn.to_q(hidden_states)
479
+
480
+ if encoder_hidden_states is None:
481
+ encoder_hidden_states = hidden_states
482
+ elif attn.norm_cross:
483
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
484
+
485
+ key = attn.to_k(encoder_hidden_states)
486
+ value = attn.to_v(encoder_hidden_states)
487
+
488
+ hidden_states = attention_function(query, key, value, heads=attn.heads, mask=attention_mask)
489
+
490
+ hidden_states = attn.to_out[0](hidden_states)
491
+ hidden_states = attn.to_out[1](hidden_states)
492
+
493
+ if input_ndim == 4:
494
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
495
+
496
+ if attn.residual_connection:
497
+ hidden_states = hidden_states + residual
498
+
499
+ hidden_states = hidden_states / attn.rescale_output_factor
500
+
501
+ return hidden_states
backend/diffusion_engine/base.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import safetensors.torch as sf
3
+
4
+ from backend import utils
5
+
6
+
7
+ class ForgeObjects:
8
+ def __init__(self, unet, clip, vae, clipvision):
9
+ self.unet = unet
10
+ self.clip = clip
11
+ self.vae = vae
12
+ self.clipvision = clipvision
13
+
14
+ def shallow_copy(self):
15
+ return ForgeObjects(
16
+ self.unet,
17
+ self.clip,
18
+ self.vae,
19
+ self.clipvision
20
+ )
21
+
22
+
23
+ class ForgeDiffusionEngine:
24
+ matched_guesses = []
25
+
26
+ def __init__(self, estimated_config, huggingface_components):
27
+ self.model_config = estimated_config
28
+ self.is_inpaint = estimated_config.inpaint_model()
29
+
30
+ self.forge_objects = None
31
+ self.forge_objects_original = None
32
+ self.forge_objects_after_applying_lora = None
33
+
34
+ self.current_lora_hash = str([])
35
+
36
+ self.fix_for_webui_backward_compatibility()
37
+
38
+ def set_clip_skip(self, clip_skip):
39
+ pass
40
+
41
+ def get_first_stage_encoding(self, x):
42
+ return x # legacy code, do not change
43
+
44
+ def get_learned_conditioning(self, prompt: list[str]):
45
+ pass
46
+
47
+ def encode_first_stage(self, x):
48
+ pass
49
+
50
+ def decode_first_stage(self, x):
51
+ pass
52
+
53
+ def get_prompt_lengths_on_ui(self, prompt):
54
+ return 0, 75
55
+
56
+ def is_webui_legacy_model(self):
57
+ return self.is_sd1 or self.is_sd2 or self.is_sdxl or self.is_sd3
58
+
59
+ def fix_for_webui_backward_compatibility(self):
60
+ self.tiling_enabled = False
61
+ self.first_stage_model = None
62
+ self.cond_stage_model = None
63
+ self.use_distilled_cfg_scale = False
64
+ self.is_sd1 = False
65
+ self.is_sd2 = False
66
+ self.is_sdxl = False
67
+ self.is_sd3 = False
68
+ return
69
+
70
+ def save_unet(self, filename):
71
+ sd = utils.get_state_dict_after_quant(self.forge_objects.unet.model.diffusion_model)
72
+ sf.save_file(sd, filename)
73
+ return filename
74
+
75
+ def save_checkpoint(self, filename):
76
+ sd = {}
77
+ sd.update(
78
+ utils.get_state_dict_after_quant(self.forge_objects.unet.model.diffusion_model, prefix='model.diffusion_model.')
79
+ )
80
+ sd.update(
81
+ utils.get_state_dict_after_quant(self.forge_objects.clip.cond_stage_model, prefix='text_encoders.')
82
+ )
83
+ sd.update(
84
+ utils.get_state_dict_after_quant(self.forge_objects.vae.first_stage_model, prefix='vae.')
85
+ )
86
+ sf.save_file(sd, filename)
87
+ return filename
backend/diffusion_engine/flux.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from huggingface_guess import model_list
4
+ from backend.diffusion_engine.base import ForgeDiffusionEngine, ForgeObjects
5
+ from backend.patcher.clip import CLIP
6
+ from backend.patcher.vae import VAE
7
+ from backend.patcher.unet import UnetPatcher
8
+ from backend.text_processing.classic_engine import ClassicTextProcessingEngine
9
+ from backend.text_processing.t5_engine import T5TextProcessingEngine
10
+ from backend.args import dynamic_args
11
+ from backend.modules.k_prediction import PredictionFlux
12
+ from backend import memory_management
13
+
14
+
15
+ class Flux(ForgeDiffusionEngine):
16
+ matched_guesses = [model_list.Flux, model_list.FluxSchnell]
17
+
18
+ def __init__(self, estimated_config, huggingface_components):
19
+ super().__init__(estimated_config, huggingface_components)
20
+ self.is_inpaint = False
21
+
22
+ clip = CLIP(
23
+ model_dict={
24
+ 'clip_l': huggingface_components['text_encoder'],
25
+ 't5xxl': huggingface_components['text_encoder_2']
26
+ },
27
+ tokenizer_dict={
28
+ 'clip_l': huggingface_components['tokenizer'],
29
+ 't5xxl': huggingface_components['tokenizer_2']
30
+ }
31
+ )
32
+
33
+ vae = VAE(model=huggingface_components['vae'])
34
+
35
+ if 'schnell' in estimated_config.huggingface_repo.lower():
36
+ k_predictor = PredictionFlux(sigma_data=1.0, prediction_type='const', shift=1.0, timesteps=10000)
37
+ else:
38
+ k_predictor = PredictionFlux(sigma_data=1.0, prediction_type='const', shift=1.15, timesteps=10000)
39
+ self.use_distilled_cfg_scale = True
40
+
41
+ unet = UnetPatcher.from_model(
42
+ model=huggingface_components['transformer'],
43
+ diffusers_scheduler=None,
44
+ k_predictor=k_predictor,
45
+ config=estimated_config
46
+ )
47
+
48
+ self.text_processing_engine_l = ClassicTextProcessingEngine(
49
+ text_encoder=clip.cond_stage_model.clip_l,
50
+ tokenizer=clip.tokenizer.clip_l,
51
+ embedding_dir=dynamic_args['embedding_dir'],
52
+ embedding_key='clip_l',
53
+ embedding_expected_shape=768,
54
+ emphasis_name=dynamic_args['emphasis_name'],
55
+ text_projection=False,
56
+ minimal_clip_skip=1,
57
+ clip_skip=1,
58
+ return_pooled=True,
59
+ final_layer_norm=True,
60
+ )
61
+
62
+ self.text_processing_engine_t5 = T5TextProcessingEngine(
63
+ text_encoder=clip.cond_stage_model.t5xxl,
64
+ tokenizer=clip.tokenizer.t5xxl,
65
+ emphasis_name=dynamic_args['emphasis_name'],
66
+ )
67
+
68
+ self.forge_objects = ForgeObjects(unet=unet, clip=clip, vae=vae, clipvision=None)
69
+ self.forge_objects_original = self.forge_objects.shallow_copy()
70
+ self.forge_objects_after_applying_lora = self.forge_objects.shallow_copy()
71
+
72
+ def set_clip_skip(self, clip_skip):
73
+ self.text_processing_engine_l.clip_skip = clip_skip
74
+
75
+ @torch.inference_mode()
76
+ def get_learned_conditioning(self, prompt: list[str]):
77
+ memory_management.load_model_gpu(self.forge_objects.clip.patcher)
78
+ cond_l, pooled_l = self.text_processing_engine_l(prompt)
79
+ cond_t5 = self.text_processing_engine_t5(prompt)
80
+ cond = dict(crossattn=cond_t5, vector=pooled_l)
81
+
82
+ if self.use_distilled_cfg_scale:
83
+ distilled_cfg_scale = getattr(prompt, 'distilled_cfg_scale', 3.5) or 3.5
84
+ cond['guidance'] = torch.FloatTensor([distilled_cfg_scale] * len(prompt))
85
+ print(f'Distilled CFG Scale: {distilled_cfg_scale}')
86
+ else:
87
+ print('Distilled CFG Scale will be ignored for Schnell')
88
+
89
+ return cond
90
+
91
+ @torch.inference_mode()
92
+ def get_prompt_lengths_on_ui(self, prompt):
93
+ token_count = len(self.text_processing_engine_t5.tokenize([prompt])[0])
94
+ return token_count, max(255, token_count)
95
+
96
+ @torch.inference_mode()
97
+ def encode_first_stage(self, x):
98
+ sample = self.forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
99
+ sample = self.forge_objects.vae.first_stage_model.process_in(sample)
100
+ return sample.to(x)
101
+
102
+ @torch.inference_mode()
103
+ def decode_first_stage(self, x):
104
+ sample = self.forge_objects.vae.first_stage_model.process_out(x)
105
+ sample = self.forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
106
+ return sample.to(x)
backend/diffusion_engine/sd15.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from huggingface_guess import model_list
4
+ from backend.diffusion_engine.base import ForgeDiffusionEngine, ForgeObjects
5
+ from backend.patcher.clip import CLIP
6
+ from backend.patcher.vae import VAE
7
+ from backend.patcher.unet import UnetPatcher
8
+ from backend.text_processing.classic_engine import ClassicTextProcessingEngine
9
+ from backend.args import dynamic_args
10
+ from backend import memory_management
11
+
12
+
13
+ class StableDiffusion(ForgeDiffusionEngine):
14
+ matched_guesses = [model_list.SD15]
15
+
16
+ def __init__(self, estimated_config, huggingface_components):
17
+ super().__init__(estimated_config, huggingface_components)
18
+
19
+ clip = CLIP(
20
+ model_dict={
21
+ 'clip_l': huggingface_components['text_encoder']
22
+ },
23
+ tokenizer_dict={
24
+ 'clip_l': huggingface_components['tokenizer']
25
+ }
26
+ )
27
+
28
+ vae = VAE(model=huggingface_components['vae'])
29
+
30
+ unet = UnetPatcher.from_model(
31
+ model=huggingface_components['unet'],
32
+ diffusers_scheduler=huggingface_components['scheduler'],
33
+ config=estimated_config
34
+ )
35
+
36
+ self.text_processing_engine = ClassicTextProcessingEngine(
37
+ text_encoder=clip.cond_stage_model.clip_l,
38
+ tokenizer=clip.tokenizer.clip_l,
39
+ embedding_dir=dynamic_args['embedding_dir'],
40
+ embedding_key='clip_l',
41
+ embedding_expected_shape=768,
42
+ emphasis_name=dynamic_args['emphasis_name'],
43
+ text_projection=False,
44
+ minimal_clip_skip=1,
45
+ clip_skip=1,
46
+ return_pooled=False,
47
+ final_layer_norm=True,
48
+ )
49
+
50
+ self.forge_objects = ForgeObjects(unet=unet, clip=clip, vae=vae, clipvision=None)
51
+ self.forge_objects_original = self.forge_objects.shallow_copy()
52
+ self.forge_objects_after_applying_lora = self.forge_objects.shallow_copy()
53
+
54
+ # WebUI Legacy
55
+ self.is_sd1 = True
56
+
57
+ def set_clip_skip(self, clip_skip):
58
+ self.text_processing_engine.clip_skip = clip_skip
59
+
60
+ @torch.inference_mode()
61
+ def get_learned_conditioning(self, prompt: list[str]):
62
+ memory_management.load_model_gpu(self.forge_objects.clip.patcher)
63
+ cond = self.text_processing_engine(prompt)
64
+ return cond
65
+
66
+ @torch.inference_mode()
67
+ def get_prompt_lengths_on_ui(self, prompt):
68
+ _, token_count = self.text_processing_engine.process_texts([prompt])
69
+ return token_count, self.text_processing_engine.get_target_prompt_token_count(token_count)
70
+
71
+ @torch.inference_mode()
72
+ def encode_first_stage(self, x):
73
+ sample = self.forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
74
+ sample = self.forge_objects.vae.first_stage_model.process_in(sample)
75
+ return sample.to(x)
76
+
77
+ @torch.inference_mode()
78
+ def decode_first_stage(self, x):
79
+ sample = self.forge_objects.vae.first_stage_model.process_out(x)
80
+ sample = self.forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
81
+ return sample.to(x)
backend/diffusion_engine/sd20.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from huggingface_guess import model_list
4
+ from backend.diffusion_engine.base import ForgeDiffusionEngine, ForgeObjects
5
+ from backend.patcher.clip import CLIP
6
+ from backend.patcher.vae import VAE
7
+ from backend.patcher.unet import UnetPatcher
8
+ from backend.text_processing.classic_engine import ClassicTextProcessingEngine
9
+ from backend.args import dynamic_args
10
+ from backend import memory_management
11
+
12
+
13
+ class StableDiffusion2(ForgeDiffusionEngine):
14
+ matched_guesses = [model_list.SD20]
15
+
16
+ def __init__(self, estimated_config, huggingface_components):
17
+ super().__init__(estimated_config, huggingface_components)
18
+
19
+ clip = CLIP(
20
+ model_dict={
21
+ 'clip_h': huggingface_components['text_encoder']
22
+ },
23
+ tokenizer_dict={
24
+ 'clip_h': huggingface_components['tokenizer']
25
+ }
26
+ )
27
+
28
+ vae = VAE(model=huggingface_components['vae'])
29
+
30
+ unet = UnetPatcher.from_model(
31
+ model=huggingface_components['unet'],
32
+ diffusers_scheduler=huggingface_components['scheduler'],
33
+ config=estimated_config
34
+ )
35
+
36
+ self.text_processing_engine = ClassicTextProcessingEngine(
37
+ text_encoder=clip.cond_stage_model.clip_h,
38
+ tokenizer=clip.tokenizer.clip_h,
39
+ embedding_dir=dynamic_args['embedding_dir'],
40
+ embedding_key='clip_h',
41
+ embedding_expected_shape=1024,
42
+ emphasis_name=dynamic_args['emphasis_name'],
43
+ text_projection=False,
44
+ minimal_clip_skip=1,
45
+ clip_skip=1,
46
+ return_pooled=False,
47
+ final_layer_norm=True,
48
+ )
49
+
50
+ self.forge_objects = ForgeObjects(unet=unet, clip=clip, vae=vae, clipvision=None)
51
+ self.forge_objects_original = self.forge_objects.shallow_copy()
52
+ self.forge_objects_after_applying_lora = self.forge_objects.shallow_copy()
53
+
54
+ # WebUI Legacy
55
+ self.is_sd2 = True
56
+
57
+ def set_clip_skip(self, clip_skip):
58
+ self.text_processing_engine.clip_skip = clip_skip
59
+
60
+ @torch.inference_mode()
61
+ def get_learned_conditioning(self, prompt: list[str]):
62
+ memory_management.load_model_gpu(self.forge_objects.clip.patcher)
63
+ cond = self.text_processing_engine(prompt)
64
+ return cond
65
+
66
+ @torch.inference_mode()
67
+ def get_prompt_lengths_on_ui(self, prompt):
68
+ _, token_count = self.text_processing_engine.process_texts([prompt])
69
+ return token_count, self.text_processing_engine.get_target_prompt_token_count(token_count)
70
+
71
+ @torch.inference_mode()
72
+ def encode_first_stage(self, x):
73
+ sample = self.forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
74
+ sample = self.forge_objects.vae.first_stage_model.process_in(sample)
75
+ return sample.to(x)
76
+
77
+ @torch.inference_mode()
78
+ def decode_first_stage(self, x):
79
+ sample = self.forge_objects.vae.first_stage_model.process_out(x)
80
+ sample = self.forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
81
+ return sample.to(x)
backend/diffusion_engine/sdxl.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from huggingface_guess import model_list
4
+ from backend.diffusion_engine.base import ForgeDiffusionEngine, ForgeObjects
5
+ from backend.patcher.clip import CLIP
6
+ from backend.patcher.vae import VAE
7
+ from backend.patcher.unet import UnetPatcher
8
+ from backend.text_processing.classic_engine import ClassicTextProcessingEngine
9
+ from backend.args import dynamic_args
10
+ from backend import memory_management
11
+ from backend.nn.unet import Timestep
12
+
13
+
14
+ class StableDiffusionXL(ForgeDiffusionEngine):
15
+ matched_guesses = [model_list.SDXL]
16
+
17
+ def __init__(self, estimated_config, huggingface_components):
18
+ super().__init__(estimated_config, huggingface_components)
19
+
20
+ clip = CLIP(
21
+ model_dict={
22
+ 'clip_l': huggingface_components['text_encoder'],
23
+ 'clip_g': huggingface_components['text_encoder_2']
24
+ },
25
+ tokenizer_dict={
26
+ 'clip_l': huggingface_components['tokenizer'],
27
+ 'clip_g': huggingface_components['tokenizer_2']
28
+ }
29
+ )
30
+
31
+ vae = VAE(model=huggingface_components['vae'])
32
+
33
+ unet = UnetPatcher.from_model(
34
+ model=huggingface_components['unet'],
35
+ diffusers_scheduler=huggingface_components['scheduler'],
36
+ config=estimated_config
37
+ )
38
+
39
+ self.text_processing_engine_l = ClassicTextProcessingEngine(
40
+ text_encoder=clip.cond_stage_model.clip_l,
41
+ tokenizer=clip.tokenizer.clip_l,
42
+ embedding_dir=dynamic_args['embedding_dir'],
43
+ embedding_key='clip_l',
44
+ embedding_expected_shape=2048,
45
+ emphasis_name=dynamic_args['emphasis_name'],
46
+ text_projection=False,
47
+ minimal_clip_skip=2,
48
+ clip_skip=2,
49
+ return_pooled=False,
50
+ final_layer_norm=False,
51
+ )
52
+
53
+ self.text_processing_engine_g = ClassicTextProcessingEngine(
54
+ text_encoder=clip.cond_stage_model.clip_g,
55
+ tokenizer=clip.tokenizer.clip_g,
56
+ embedding_dir=dynamic_args['embedding_dir'],
57
+ embedding_key='clip_g',
58
+ embedding_expected_shape=2048,
59
+ emphasis_name=dynamic_args['emphasis_name'],
60
+ text_projection=True,
61
+ minimal_clip_skip=2,
62
+ clip_skip=2,
63
+ return_pooled=True,
64
+ final_layer_norm=False,
65
+ )
66
+
67
+ self.embedder = Timestep(256)
68
+
69
+ self.forge_objects = ForgeObjects(unet=unet, clip=clip, vae=vae, clipvision=None)
70
+ self.forge_objects_original = self.forge_objects.shallow_copy()
71
+ self.forge_objects_after_applying_lora = self.forge_objects.shallow_copy()
72
+
73
+ # WebUI Legacy
74
+ self.is_sdxl = True
75
+
76
+ def set_clip_skip(self, clip_skip):
77
+ self.text_processing_engine_l.clip_skip = clip_skip
78
+ self.text_processing_engine_g.clip_skip = clip_skip
79
+
80
+ @torch.inference_mode()
81
+ def get_learned_conditioning(self, prompt: list[str]):
82
+ memory_management.load_model_gpu(self.forge_objects.clip.patcher)
83
+
84
+ cond_l = self.text_processing_engine_l(prompt)
85
+ cond_g, clip_pooled = self.text_processing_engine_g(prompt)
86
+
87
+ width = getattr(prompt, 'width', 1024) or 1024
88
+ height = getattr(prompt, 'height', 1024) or 1024
89
+ is_negative_prompt = getattr(prompt, 'is_negative_prompt', False)
90
+
91
+ crop_w = 0
92
+ crop_h = 0
93
+ target_width = width
94
+ target_height = height
95
+
96
+ out = [
97
+ self.embedder(torch.Tensor([height])), self.embedder(torch.Tensor([width])),
98
+ self.embedder(torch.Tensor([crop_h])), self.embedder(torch.Tensor([crop_w])),
99
+ self.embedder(torch.Tensor([target_height])), self.embedder(torch.Tensor([target_width]))
100
+ ]
101
+
102
+ flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1).to(clip_pooled)
103
+
104
+ force_zero_negative_prompt = is_negative_prompt and all(x == '' for x in prompt)
105
+
106
+ if force_zero_negative_prompt:
107
+ clip_pooled = torch.zeros_like(clip_pooled)
108
+ cond_l = torch.zeros_like(cond_l)
109
+ cond_g = torch.zeros_like(cond_g)
110
+
111
+ cond = dict(
112
+ crossattn=torch.cat([cond_l, cond_g], dim=2),
113
+ vector=torch.cat([clip_pooled, flat], dim=1),
114
+ )
115
+
116
+ return cond
117
+
118
+ @torch.inference_mode()
119
+ def get_prompt_lengths_on_ui(self, prompt):
120
+ _, token_count = self.text_processing_engine_l.process_texts([prompt])
121
+ return token_count, self.text_processing_engine_l.get_target_prompt_token_count(token_count)
122
+
123
+ @torch.inference_mode()
124
+ def encode_first_stage(self, x):
125
+ sample = self.forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
126
+ sample = self.forge_objects.vae.first_stage_model.process_in(sample)
127
+ return sample.to(x)
128
+
129
+ @torch.inference_mode()
130
+ def decode_first_stage(self, x):
131
+ sample = self.forge_objects.vae.first_stage_model.process_out(x)
132
+ sample = self.forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
133
+ return sample.to(x)
backend/huggingface/Kwai-Kolors/Kolors/model_index.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "force_zeros_for_empty_prompt": true,
5
+ "scheduler": [
6
+ "diffusers",
7
+ "EulerDiscreteScheduler"
8
+ ],
9
+ "text_encoder": [
10
+ "kolors",
11
+ "ChatGLMModel"
12
+ ],
13
+ "tokenizer": [
14
+ "kolors",
15
+ "ChatGLMTokenizer"
16
+ ],
17
+ "unet": [
18
+ "diffusers",
19
+ "UNet2DConditionModel"
20
+ ],
21
+ "vae": [
22
+ "diffusers",
23
+ "AutoencoderKL"
24
+ ]
25
+ }
backend/huggingface/Kwai-Kolors/Kolors/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "beta_schedule": "scaled_linear",
5
+ "beta_start": 0.00085,
6
+ "beta_end": 0.014,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "interpolation_type": "linear",
11
+ "num_train_timesteps": 1100,
12
+ "prediction_type": "epsilon",
13
+ "rescale_betas_zero_snr": false,
14
+ "sample_max_value": 1.0,
15
+ "set_alpha_to_one": false,
16
+ "skip_prk_steps": true,
17
+ "steps_offset": 1,
18
+ "thresholding": false,
19
+ "timestep_spacing": "leading",
20
+ "trained_betas": null,
21
+ "use_karras_sigmas": false
22
+ }
backend/huggingface/Kwai-Kolors/Kolors/text_encoder/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "THUDM/chatglm3-6b-base",
3
+ "model_type": "chatglm",
4
+ "architectures": [
5
+ "ChatGLMModel"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_chatglm.ChatGLMConfig",
9
+ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
10
+ "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
11
+ "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
12
+ "AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification"
13
+ },
14
+ "add_bias_linear": false,
15
+ "add_qkv_bias": true,
16
+ "apply_query_key_layer_scaling": true,
17
+ "apply_residual_connection_post_layernorm": false,
18
+ "attention_dropout": 0.0,
19
+ "attention_softmax_in_fp32": true,
20
+ "bias_dropout_fusion": true,
21
+ "ffn_hidden_size": 13696,
22
+ "fp32_residual_connection": false,
23
+ "hidden_dropout": 0.0,
24
+ "hidden_size": 4096,
25
+ "kv_channels": 128,
26
+ "layernorm_epsilon": 1e-05,
27
+ "multi_query_attention": true,
28
+ "multi_query_group_num": 2,
29
+ "num_attention_heads": 32,
30
+ "num_layers": 28,
31
+ "original_rope": true,
32
+ "padded_vocab_size": 65024,
33
+ "post_layer_norm": true,
34
+ "rmsnorm": true,
35
+ "seq_length": 32768,
36
+ "use_cache": true,
37
+ "torch_dtype": "float16",
38
+ "transformers_version": "4.30.2",
39
+ "tie_word_embeddings": false,
40
+ "eos_token_id": 2,
41
+ "pad_token_id": 0
42
+ }
backend/huggingface/Kwai-Kolors/Kolors/text_encoder/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 12487168064
4
+ },
5
+ "weight_map": {
6
+ "transformer.embedding.word_embeddings.weight": "pytorch_model-00001-of-00007.bin",
7
+ "transformer.encoder.final_layernorm.weight": "pytorch_model-00007-of-00007.bin",
8
+ "transformer.encoder.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
9
+ "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
10
+ "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
11
+ "transformer.encoder.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
12
+ "transformer.encoder.layers.0.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
13
+ "transformer.encoder.layers.0.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
14
+ "transformer.encoder.layers.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
15
+ "transformer.encoder.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
16
+ "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
17
+ "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
18
+ "transformer.encoder.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
19
+ "transformer.encoder.layers.1.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
20
+ "transformer.encoder.layers.1.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
21
+ "transformer.encoder.layers.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
22
+ "transformer.encoder.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
23
+ "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
24
+ "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
25
+ "transformer.encoder.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
26
+ "transformer.encoder.layers.10.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
27
+ "transformer.encoder.layers.10.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
28
+ "transformer.encoder.layers.10.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
29
+ "transformer.encoder.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
30
+ "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
31
+ "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
32
+ "transformer.encoder.layers.11.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
33
+ "transformer.encoder.layers.11.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
34
+ "transformer.encoder.layers.11.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
35
+ "transformer.encoder.layers.11.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
36
+ "transformer.encoder.layers.12.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
37
+ "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
38
+ "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
39
+ "transformer.encoder.layers.12.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
40
+ "transformer.encoder.layers.12.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
41
+ "transformer.encoder.layers.12.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
42
+ "transformer.encoder.layers.12.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
43
+ "transformer.encoder.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
44
+ "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
45
+ "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
46
+ "transformer.encoder.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
47
+ "transformer.encoder.layers.13.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
48
+ "transformer.encoder.layers.13.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
49
+ "transformer.encoder.layers.13.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
50
+ "transformer.encoder.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
51
+ "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
52
+ "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
53
+ "transformer.encoder.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
54
+ "transformer.encoder.layers.14.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
55
+ "transformer.encoder.layers.14.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
56
+ "transformer.encoder.layers.14.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
57
+ "transformer.encoder.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
58
+ "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
59
+ "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
60
+ "transformer.encoder.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
61
+ "transformer.encoder.layers.15.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
62
+ "transformer.encoder.layers.15.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
63
+ "transformer.encoder.layers.15.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
64
+ "transformer.encoder.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
65
+ "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
66
+ "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
67
+ "transformer.encoder.layers.16.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
68
+ "transformer.encoder.layers.16.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
69
+ "transformer.encoder.layers.16.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
70
+ "transformer.encoder.layers.16.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
71
+ "transformer.encoder.layers.17.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
72
+ "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
73
+ "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
74
+ "transformer.encoder.layers.17.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
75
+ "transformer.encoder.layers.17.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
76
+ "transformer.encoder.layers.17.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
77
+ "transformer.encoder.layers.17.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
78
+ "transformer.encoder.layers.18.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
79
+ "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
80
+ "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
81
+ "transformer.encoder.layers.18.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
82
+ "transformer.encoder.layers.18.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
83
+ "transformer.encoder.layers.18.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
84
+ "transformer.encoder.layers.18.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
85
+ "transformer.encoder.layers.19.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
86
+ "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
87
+ "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
88
+ "transformer.encoder.layers.19.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
89
+ "transformer.encoder.layers.19.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
90
+ "transformer.encoder.layers.19.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
91
+ "transformer.encoder.layers.19.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
92
+ "transformer.encoder.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
93
+ "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
94
+ "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
95
+ "transformer.encoder.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
96
+ "transformer.encoder.layers.2.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
97
+ "transformer.encoder.layers.2.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
98
+ "transformer.encoder.layers.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
99
+ "transformer.encoder.layers.20.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
100
+ "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
101
+ "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
102
+ "transformer.encoder.layers.20.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
103
+ "transformer.encoder.layers.20.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
104
+ "transformer.encoder.layers.20.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
105
+ "transformer.encoder.layers.20.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
106
+ "transformer.encoder.layers.21.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
107
+ "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
108
+ "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
109
+ "transformer.encoder.layers.21.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
110
+ "transformer.encoder.layers.21.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
111
+ "transformer.encoder.layers.21.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
112
+ "transformer.encoder.layers.21.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
113
+ "transformer.encoder.layers.22.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
114
+ "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
115
+ "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
116
+ "transformer.encoder.layers.22.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
117
+ "transformer.encoder.layers.22.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
118
+ "transformer.encoder.layers.22.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
119
+ "transformer.encoder.layers.22.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
120
+ "transformer.encoder.layers.23.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
121
+ "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
122
+ "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
123
+ "transformer.encoder.layers.23.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
124
+ "transformer.encoder.layers.23.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
125
+ "transformer.encoder.layers.23.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
126
+ "transformer.encoder.layers.23.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
127
+ "transformer.encoder.layers.24.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
128
+ "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
129
+ "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
130
+ "transformer.encoder.layers.24.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
131
+ "transformer.encoder.layers.24.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
132
+ "transformer.encoder.layers.24.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
133
+ "transformer.encoder.layers.24.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
134
+ "transformer.encoder.layers.25.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
135
+ "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
136
+ "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
137
+ "transformer.encoder.layers.25.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
138
+ "transformer.encoder.layers.25.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
139
+ "transformer.encoder.layers.25.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
140
+ "transformer.encoder.layers.25.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
141
+ "transformer.encoder.layers.26.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
142
+ "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
143
+ "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
144
+ "transformer.encoder.layers.26.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
145
+ "transformer.encoder.layers.26.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
146
+ "transformer.encoder.layers.26.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
147
+ "transformer.encoder.layers.26.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
148
+ "transformer.encoder.layers.27.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
149
+ "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
150
+ "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00007.bin",
151
+ "transformer.encoder.layers.27.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
152
+ "transformer.encoder.layers.27.self_attention.dense.weight": "pytorch_model-00007-of-00007.bin",
153
+ "transformer.encoder.layers.27.self_attention.query_key_value.bias": "pytorch_model-00007-of-00007.bin",
154
+ "transformer.encoder.layers.27.self_attention.query_key_value.weight": "pytorch_model-00007-of-00007.bin",
155
+ "transformer.encoder.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
156
+ "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
157
+ "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
158
+ "transformer.encoder.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
159
+ "transformer.encoder.layers.3.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
160
+ "transformer.encoder.layers.3.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
161
+ "transformer.encoder.layers.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
162
+ "transformer.encoder.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
163
+ "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
164
+ "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
165
+ "transformer.encoder.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
166
+ "transformer.encoder.layers.4.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
167
+ "transformer.encoder.layers.4.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
168
+ "transformer.encoder.layers.4.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
169
+ "transformer.encoder.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
170
+ "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
171
+ "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
172
+ "transformer.encoder.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
173
+ "transformer.encoder.layers.5.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
174
+ "transformer.encoder.layers.5.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
175
+ "transformer.encoder.layers.5.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
176
+ "transformer.encoder.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
177
+ "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
178
+ "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
179
+ "transformer.encoder.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
180
+ "transformer.encoder.layers.6.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
181
+ "transformer.encoder.layers.6.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
182
+ "transformer.encoder.layers.6.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
183
+ "transformer.encoder.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
184
+ "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
185
+ "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
186
+ "transformer.encoder.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
187
+ "transformer.encoder.layers.7.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
188
+ "transformer.encoder.layers.7.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
189
+ "transformer.encoder.layers.7.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
190
+ "transformer.encoder.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
191
+ "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
192
+ "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
193
+ "transformer.encoder.layers.8.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
194
+ "transformer.encoder.layers.8.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
195
+ "transformer.encoder.layers.8.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
196
+ "transformer.encoder.layers.8.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
197
+ "transformer.encoder.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
198
+ "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
199
+ "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
200
+ "transformer.encoder.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
201
+ "transformer.encoder.layers.9.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
202
+ "transformer.encoder.layers.9.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
203
+ "transformer.encoder.layers.9.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
204
+ "transformer.output_layer.weight": "pytorch_model-00007-of-00007.bin",
205
+ "transformer.rotary_pos_emb.inv_freq": "pytorch_model-00001-of-00007.bin"
206
+ }
207
+ }
backend/huggingface/Kwai-Kolors/Kolors/text_encoder/tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "THUDM/chatglm3-6b-base",
3
+ "remove_space": false,
4
+ "do_lower_case": false,
5
+ "tokenizer_class": "ChatGLMTokenizer",
6
+ "auto_map": {
7
+ "AutoTokenizer": [
8
+ "tokenization_chatglm.ChatGLMTokenizer",
9
+ null
10
+ ]
11
+ }
12
+ }
backend/huggingface/Kwai-Kolors/Kolors/text_encoder/vocab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
backend/huggingface/Kwai-Kolors/Kolors/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "THUDM/chatglm3-6b-base",
3
+ "remove_space": false,
4
+ "do_lower_case": false,
5
+ "tokenizer_class": "ChatGLMTokenizer",
6
+ "auto_map": {
7
+ "AutoTokenizer": [
8
+ "tokenization_chatglm.ChatGLMTokenizer",
9
+ null
10
+ ]
11
+ }
12
+ }
backend/huggingface/Kwai-Kolors/Kolors/tokenizer/vocab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
backend/huggingface/Kwai-Kolors/Kolors/unet/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.27.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": 4096,
35
+ "encoder_hid_dim_type": "text_proj",
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 5632,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "reverse_transformer_layers_per_block": null,
54
+ "sample_size": 128,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "transformer_layers_per_block": [
61
+ 1,
62
+ 2,
63
+ 10
64
+ ],
65
+ "up_block_types": [
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D",
68
+ "UpBlock2D"
69
+ ],
70
+ "upcast_attention": false,
71
+ "use_linear_projection": true
72
+ }
backend/huggingface/Kwai-Kolors/Kolors/vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "./vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 1024,
24
+ "scaling_factor": 0.13025,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/model_index.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "HunyuanDiTPipeline",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "requires_safety_checker": true,
9
+ "safety_checker": [
10
+ null,
11
+ null
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "DDPMScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "BertModel"
20
+ ],
21
+ "text_encoder_2": [
22
+ "transformers",
23
+ "T5EncoderModel"
24
+ ],
25
+ "tokenizer": [
26
+ "transformers",
27
+ "BertTokenizer"
28
+ ],
29
+ "tokenizer_2": [
30
+ "transformers",
31
+ "T5Tokenizer"
32
+ ],
33
+ "transformer": [
34
+ "diffusers",
35
+ "HunyuanDiT2DModel"
36
+ ],
37
+ "vae": [
38
+ "diffusers",
39
+ "AutoencoderKL"
40
+ ]
41
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDPMScheduler",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "beta_end": 0.03,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "v_prediction",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null,
20
+ "variance_type": "fixed_small"
21
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.41.1",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 47020
33
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder_2/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5EncoderModel"
4
+ ],
5
+ "classifier_dropout": 0.0,
6
+ "d_ff": 5120,
7
+ "d_kv": 64,
8
+ "d_model": 2048,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "num_decoder_layers": 24,
20
+ "num_heads": 32,
21
+ "num_layers": 24,
22
+ "output_past": true,
23
+ "pad_token_id": 0,
24
+ "relative_attention_max_distance": 128,
25
+ "relative_attention_num_buckets": 32,
26
+ "tie_word_embeddings": false,
27
+ "tokenizer_class": "T5Tokenizer",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.41.1",
30
+ "use_cache": true,
31
+ "vocab_size": 250112
32
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/text_encoder_2/model.safetensors.index.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 6679834624
4
+ },
5
+ "weight_map": {
6
+ "encoder.block.0.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
7
+ "encoder.block.0.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
8
+ "encoder.block.0.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
9
+ "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "model-00001-of-00002.safetensors",
10
+ "encoder.block.0.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
11
+ "encoder.block.0.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
12
+ "encoder.block.0.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
13
+ "encoder.block.0.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
14
+ "encoder.block.0.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
15
+ "encoder.block.0.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
16
+ "encoder.block.1.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
17
+ "encoder.block.1.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
18
+ "encoder.block.1.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
19
+ "encoder.block.1.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
20
+ "encoder.block.1.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
21
+ "encoder.block.1.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
22
+ "encoder.block.1.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
23
+ "encoder.block.1.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
24
+ "encoder.block.1.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
25
+ "encoder.block.10.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
26
+ "encoder.block.10.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
27
+ "encoder.block.10.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
28
+ "encoder.block.10.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
29
+ "encoder.block.10.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
30
+ "encoder.block.10.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
31
+ "encoder.block.10.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
32
+ "encoder.block.10.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
33
+ "encoder.block.10.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
34
+ "encoder.block.11.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
35
+ "encoder.block.11.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
36
+ "encoder.block.11.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
37
+ "encoder.block.11.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
38
+ "encoder.block.11.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
39
+ "encoder.block.11.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
40
+ "encoder.block.11.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
41
+ "encoder.block.11.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
42
+ "encoder.block.11.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
43
+ "encoder.block.12.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
44
+ "encoder.block.12.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
45
+ "encoder.block.12.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
46
+ "encoder.block.12.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
47
+ "encoder.block.12.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
48
+ "encoder.block.12.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
49
+ "encoder.block.12.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
50
+ "encoder.block.12.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
51
+ "encoder.block.12.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
52
+ "encoder.block.13.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
53
+ "encoder.block.13.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
54
+ "encoder.block.13.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
55
+ "encoder.block.13.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
56
+ "encoder.block.13.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
57
+ "encoder.block.13.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
58
+ "encoder.block.13.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
59
+ "encoder.block.13.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
60
+ "encoder.block.13.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
61
+ "encoder.block.14.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
62
+ "encoder.block.14.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
63
+ "encoder.block.14.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
64
+ "encoder.block.14.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
65
+ "encoder.block.14.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
66
+ "encoder.block.14.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
67
+ "encoder.block.14.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
68
+ "encoder.block.14.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
69
+ "encoder.block.14.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
70
+ "encoder.block.15.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
71
+ "encoder.block.15.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
72
+ "encoder.block.15.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
73
+ "encoder.block.15.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
74
+ "encoder.block.15.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
75
+ "encoder.block.15.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
76
+ "encoder.block.15.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
77
+ "encoder.block.15.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
78
+ "encoder.block.15.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
79
+ "encoder.block.16.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
80
+ "encoder.block.16.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
81
+ "encoder.block.16.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
82
+ "encoder.block.16.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
83
+ "encoder.block.16.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
84
+ "encoder.block.16.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
85
+ "encoder.block.16.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
86
+ "encoder.block.16.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
87
+ "encoder.block.16.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
88
+ "encoder.block.17.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
89
+ "encoder.block.17.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
90
+ "encoder.block.17.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
91
+ "encoder.block.17.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
92
+ "encoder.block.17.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
93
+ "encoder.block.17.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
94
+ "encoder.block.17.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
95
+ "encoder.block.17.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
96
+ "encoder.block.17.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
97
+ "encoder.block.18.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
98
+ "encoder.block.18.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
99
+ "encoder.block.18.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
100
+ "encoder.block.18.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
101
+ "encoder.block.18.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
102
+ "encoder.block.18.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
103
+ "encoder.block.18.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
104
+ "encoder.block.18.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
105
+ "encoder.block.18.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
106
+ "encoder.block.19.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
107
+ "encoder.block.19.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
108
+ "encoder.block.19.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
109
+ "encoder.block.19.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
110
+ "encoder.block.19.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
111
+ "encoder.block.19.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
112
+ "encoder.block.19.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
113
+ "encoder.block.19.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
114
+ "encoder.block.19.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
115
+ "encoder.block.2.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
116
+ "encoder.block.2.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
117
+ "encoder.block.2.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
118
+ "encoder.block.2.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
119
+ "encoder.block.2.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
120
+ "encoder.block.2.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
121
+ "encoder.block.2.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
122
+ "encoder.block.2.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
123
+ "encoder.block.2.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
124
+ "encoder.block.20.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
125
+ "encoder.block.20.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
126
+ "encoder.block.20.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
127
+ "encoder.block.20.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
128
+ "encoder.block.20.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
129
+ "encoder.block.20.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
130
+ "encoder.block.20.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
131
+ "encoder.block.20.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
132
+ "encoder.block.20.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
133
+ "encoder.block.21.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
134
+ "encoder.block.21.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
135
+ "encoder.block.21.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
136
+ "encoder.block.21.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
137
+ "encoder.block.21.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
138
+ "encoder.block.21.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
139
+ "encoder.block.21.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
140
+ "encoder.block.21.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
141
+ "encoder.block.21.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
142
+ "encoder.block.22.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
143
+ "encoder.block.22.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
144
+ "encoder.block.22.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
145
+ "encoder.block.22.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
146
+ "encoder.block.22.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
147
+ "encoder.block.22.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
148
+ "encoder.block.22.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
149
+ "encoder.block.22.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
150
+ "encoder.block.22.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
151
+ "encoder.block.23.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
152
+ "encoder.block.23.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
153
+ "encoder.block.23.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
154
+ "encoder.block.23.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
155
+ "encoder.block.23.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
156
+ "encoder.block.23.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
157
+ "encoder.block.23.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
158
+ "encoder.block.23.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
159
+ "encoder.block.23.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
160
+ "encoder.block.3.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
161
+ "encoder.block.3.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
162
+ "encoder.block.3.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
163
+ "encoder.block.3.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
164
+ "encoder.block.3.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
165
+ "encoder.block.3.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
166
+ "encoder.block.3.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
167
+ "encoder.block.3.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
168
+ "encoder.block.3.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
169
+ "encoder.block.4.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
170
+ "encoder.block.4.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
171
+ "encoder.block.4.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
172
+ "encoder.block.4.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
173
+ "encoder.block.4.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
174
+ "encoder.block.4.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
175
+ "encoder.block.4.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
176
+ "encoder.block.4.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
177
+ "encoder.block.4.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
178
+ "encoder.block.5.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
179
+ "encoder.block.5.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
180
+ "encoder.block.5.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
181
+ "encoder.block.5.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
182
+ "encoder.block.5.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
183
+ "encoder.block.5.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
184
+ "encoder.block.5.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
185
+ "encoder.block.5.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
186
+ "encoder.block.5.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
187
+ "encoder.block.6.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
188
+ "encoder.block.6.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
189
+ "encoder.block.6.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
190
+ "encoder.block.6.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
191
+ "encoder.block.6.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
192
+ "encoder.block.6.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
193
+ "encoder.block.6.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
194
+ "encoder.block.6.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
195
+ "encoder.block.6.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
196
+ "encoder.block.7.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
197
+ "encoder.block.7.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
198
+ "encoder.block.7.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
199
+ "encoder.block.7.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
200
+ "encoder.block.7.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
201
+ "encoder.block.7.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
202
+ "encoder.block.7.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
203
+ "encoder.block.7.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
204
+ "encoder.block.7.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
205
+ "encoder.block.8.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
206
+ "encoder.block.8.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
207
+ "encoder.block.8.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
208
+ "encoder.block.8.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
209
+ "encoder.block.8.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
210
+ "encoder.block.8.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
211
+ "encoder.block.8.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
212
+ "encoder.block.8.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
213
+ "encoder.block.8.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
214
+ "encoder.block.9.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
215
+ "encoder.block.9.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
216
+ "encoder.block.9.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
217
+ "encoder.block.9.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
218
+ "encoder.block.9.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
219
+ "encoder.block.9.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
220
+ "encoder.block.9.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
221
+ "encoder.block.9.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
222
+ "encoder.block.9.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
223
+ "encoder.final_layer_norm.weight": "model-00002-of-00002.safetensors",
224
+ "shared.weight": "model-00001-of-00002.safetensors"
225
+ }
226
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 77,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<pad>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<pad>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<unk>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [],
30
+ "clean_up_tokenization_spaces": true,
31
+ "eos_token": "</s>",
32
+ "extra_ids": 0,
33
+ "legacy": true,
34
+ "model_max_length": 1000000000000000019884624838656,
35
+ "pad_token": "<pad>",
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "T5Tokenizer",
38
+ "unk_token": "<unk>"
39
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/transformer/config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "HunyuanDiT2DModel",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "activation_fn": "gelu-approximate",
5
+ "attention_head_dim": 88,
6
+ "cross_attention_dim": 1024,
7
+ "cross_attention_dim_t5": 2048,
8
+ "hidden_size": 1408,
9
+ "in_channels": 4,
10
+ "learn_sigma": true,
11
+ "mlp_ratio": 4.3637,
12
+ "norm_type": "layer_norm",
13
+ "num_attention_heads": 16,
14
+ "num_layers": 40,
15
+ "patch_size": 2,
16
+ "pooled_projection_dim": 1024,
17
+ "sample_size": 128,
18
+ "text_len": 77,
19
+ "text_len_t5": 256
20
+ }
backend/huggingface/Tencent-Hunyuan/HunyuanDiT-Diffusers/vae/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "force_upcast": false,
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "latents_mean": null,
21
+ "latents_std": null,
22
+ "layers_per_block": 2,
23
+ "norm_num_groups": 32,
24
+ "out_channels": 3,
25
+ "sample_size": 512,
26
+ "scaling_factor": 0.13025,
27
+ "up_block_types": [
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D",
31
+ "UpDecoderBlock2D"
32
+ ]
33
+ }
backend/huggingface/black-forest-labs/FLUX.1-dev/model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FluxPipeline",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "FlowMatchEulerDiscreteScheduler"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "CLIPTextModel"
11
+ ],
12
+ "text_encoder_2": [
13
+ "transformers",
14
+ "T5EncoderModel"
15
+ ],
16
+ "tokenizer": [
17
+ "transformers",
18
+ "CLIPTokenizer"
19
+ ],
20
+ "tokenizer_2": [
21
+ "transformers",
22
+ "T5TokenizerFast"
23
+ ],
24
+ "transformer": [
25
+ "diffusers",
26
+ "FluxTransformer2DModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "AutoencoderKL"
31
+ ]
32
+ }
backend/huggingface/black-forest-labs/FLUX.1-dev/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "base_image_seq_len": 256,
5
+ "base_shift": 0.5,
6
+ "max_image_seq_len": 4096,
7
+ "max_shift": 1.15,
8
+ "num_train_timesteps": 1000,
9
+ "shift": 3.0,
10
+ "use_dynamic_shifting": true
11
+ }