roshikhan301 commited on
Commit
8a37e0a
·
verified ·
1 Parent(s): 1cefbac

Upload 2113 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dev_scripts/diff_images.py +32 -0
  2. .dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png +0 -0
  3. .dev_scripts/sample_command.txt +1 -0
  4. .dev_scripts/test_regression_txt2img_dream_v1_4.sh +19 -0
  5. .dev_scripts/test_regression_txt2img_v1_4.sh +23 -0
  6. .dockerignore +9 -0
  7. .editorconfig +12 -0
  8. .git-blame-ignore-revs +2 -0
  9. .gitattributes +20 -35
  10. .github/CODEOWNERS +32 -0
  11. .github/ISSUE_TEMPLATE/BUG_REPORT.yml +146 -0
  12. .github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml +53 -0
  13. .github/ISSUE_TEMPLATE/config.yml +14 -0
  14. .github/actions/install-frontend-deps/action.yml +33 -0
  15. .github/pr_labels.yml +59 -0
  16. .github/pull_request_template.md +22 -0
  17. .github/stale.yaml +19 -0
  18. .github/workflows/build-container.yml +109 -0
  19. .github/workflows/build-installer.yml +45 -0
  20. .github/workflows/clean-caches.yml +34 -0
  21. .github/workflows/close-inactive-issues.yml +28 -0
  22. .github/workflows/frontend-checks.yml +80 -0
  23. .github/workflows/frontend-tests.yml +60 -0
  24. .github/workflows/label-pr.yml +18 -0
  25. .github/workflows/mkdocs-material.yml +49 -0
  26. .github/workflows/python-checks.yml +76 -0
  27. .github/workflows/python-tests.yml +106 -0
  28. .github/workflows/release.yml +108 -0
  29. .gitignore +190 -0
  30. .gitmodules +0 -0
  31. .pre-commit-config.yaml +24 -0
  32. .prettierrc.yaml +13 -0
  33. InvokeAI_Statement_of_Values.md +84 -0
  34. LICENSE +176 -0
  35. LICENSE-SD1+SD2.txt +294 -0
  36. LICENSE-SDXL.txt +290 -0
  37. Makefile +82 -0
  38. README.md +157 -0
  39. Stable_Diffusion_v1_Model_Card.md +140 -0
  40. coverage/.gitignore +4 -0
  41. docker/.env.sample +27 -0
  42. docker/Dockerfile +124 -0
  43. docker/README.md +117 -0
  44. docker/docker-compose.yml +54 -0
  45. docker/docker-entrypoint.sh +41 -0
  46. docker/run.sh +36 -0
  47. docker/runpod-readme.md +60 -0
  48. docs/CODE_OF_CONDUCT.md +128 -0
  49. docs/RELEASE.md +173 -0
  50. docs/assets/Lincoln-and-Parrot-512-transparent.png +0 -0
.dev_scripts/diff_images.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+
7
+ def read_image_int16(image_path):
8
+ image = Image.open(image_path)
9
+ return np.array(image).astype(np.int16)
10
+
11
+
12
+ def calc_images_mean_L1(image1_path, image2_path):
13
+ image1 = read_image_int16(image1_path)
14
+ image2 = read_image_int16(image2_path)
15
+ assert image1.shape == image2.shape
16
+
17
+ mean_L1 = np.abs(image1 - image2).mean()
18
+ return mean_L1
19
+
20
+
21
+ def parse_args():
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument("image1_path")
24
+ parser.add_argument("image2_path")
25
+ args = parser.parse_args()
26
+ return args
27
+
28
+
29
+ if __name__ == "__main__":
30
+ args = parse_args()
31
+ mean_L1 = calc_images_mean_L1(args.image1_path, args.image2_path)
32
+ print(mean_L1)
.dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png ADDED
.dev_scripts/sample_command.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ "a photograph of an astronaut riding a horse" -s50 -S42
.dev_scripts/test_regression_txt2img_dream_v1_4.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generate an image
2
+ PROMPT_FILE=".dev_scripts/sample_command.txt"
3
+ OUT_DIR="outputs/img-samples/test_regression_txt2img_v1_4"
4
+ SAMPLES_DIR=${OUT_DIR}
5
+ python scripts/dream.py \
6
+ --from_file ${PROMPT_FILE} \
7
+ --outdir ${OUT_DIR} \
8
+ --sampler plms
9
+
10
+ # original output by CompVis/stable-diffusion
11
+ IMAGE1=".dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png"
12
+ # new output
13
+ IMAGE2=`ls -A ${SAMPLES_DIR}/*.png | sort | tail -n 1`
14
+
15
+ echo ""
16
+ echo "comparing the following two images"
17
+ echo "IMAGE1: ${IMAGE1}"
18
+ echo "IMAGE2: ${IMAGE2}"
19
+ python .dev_scripts/diff_images.py ${IMAGE1} ${IMAGE2}
.dev_scripts/test_regression_txt2img_v1_4.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generate an image
2
+ PROMPT="a photograph of an astronaut riding a horse"
3
+ OUT_DIR="outputs/txt2img-samples/test_regression_txt2img_v1_4"
4
+ SAMPLES_DIR="outputs/txt2img-samples/test_regression_txt2img_v1_4/samples"
5
+ python scripts/orig_scripts/txt2img.py \
6
+ --prompt "${PROMPT}" \
7
+ --outdir ${OUT_DIR} \
8
+ --plms \
9
+ --ddim_steps 50 \
10
+ --n_samples 1 \
11
+ --n_iter 1 \
12
+ --seed 42
13
+
14
+ # original output by CompVis/stable-diffusion
15
+ IMAGE1=".dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png"
16
+ # new output
17
+ IMAGE2=`ls -A ${SAMPLES_DIR}/*.png | sort | tail -n 1`
18
+
19
+ echo ""
20
+ echo "comparing the following two images"
21
+ echo "IMAGE1: ${IMAGE1}"
22
+ echo "IMAGE2: ${IMAGE2}"
23
+ python .dev_scripts/diff_images.py ${IMAGE1} ${IMAGE2}
.dockerignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *
2
+ !invokeai
3
+ !pyproject.toml
4
+ !docker/docker-entrypoint.sh
5
+ !LICENSE
6
+
7
+ **/node_modules
8
+ **/__pycache__
9
+ **/*.egg-info
.editorconfig ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # All files
2
+ [*]
3
+ charset = utf-8
4
+ end_of_line = lf
5
+ indent_size = 2
6
+ indent_style = space
7
+ insert_final_newline = true
8
+ trim_trailing_whitespace = true
9
+
10
+ # Python
11
+ [*.py]
12
+ indent_size = 4
.git-blame-ignore-revs ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ b3dccfaeb636599c02effc377cdd8a87d658256c
2
+ 218b6d0546b990fc449c876fb99f44b50c4daa35
.gitattributes CHANGED
@@ -1,35 +1,20 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto normalizes line endings on commit so devs don't need to change local settings.
2
+ # Only affects text files and ignores other file types.
3
+ # For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
4
+ * text=auto
5
+ docker/** text eol=lfdocs/assets/features/restoration-montage.png filter=lfs diff=lfs merge=lfs -text
6
+ docs/assets/features/upscaling-montage.png filter=lfs diff=lfs merge=lfs -text
7
+ docs/assets/invoke_ai_banner.png filter=lfs diff=lfs merge=lfs -text
8
+ docs/assets/invoke_web_server.png filter=lfs diff=lfs merge=lfs -text
9
+ docs/assets/invoke-web-server-1.png filter=lfs diff=lfs merge=lfs -text
10
+ docs/assets/invoke-web-server-9.png filter=lfs diff=lfs merge=lfs -text
11
+ docs/assets/stable-samples/txt2img/merged-0005.png filter=lfs diff=lfs merge=lfs -text
12
+ docs/assets/stable-samples/txt2img/merged-0006.png filter=lfs diff=lfs merge=lfs -text
13
+ docs/assets/stable-samples/txt2img/merged-0007.png filter=lfs diff=lfs merge=lfs -text
14
+ docs/assets/step7.png filter=lfs diff=lfs merge=lfs -text
15
+ docs/assets/truncation_comparison.jpg filter=lfs diff=lfs merge=lfs -text
16
+ invokeai/assets/data/imagenet_train_hr_indices.p filter=lfs diff=lfs merge=lfs -text
17
+ invokeai/assets/results.gif filter=lfs diff=lfs merge=lfs -text
18
+ invokeai/assets/stable-samples/img2img/upscaling-in.png filter=lfs diff=lfs merge=lfs -text
19
+ invokeai/assets/stable-samples/img2img/upscaling-out.png filter=lfs diff=lfs merge=lfs -text
20
+ invokeai/assets/txt2img-preview.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/CODEOWNERS ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # continuous integration
2
+ /.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
3
+
4
+ # documentation
5
+ /docs/ @lstein @blessedcoolant @hipsterusername @Millu
6
+ /mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
7
+
8
+ # nodes
9
+ /invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
10
+
11
+ # installation and configuration
12
+ /pyproject.toml @lstein @blessedcoolant @hipsterusername
13
+ /docker/ @lstein @blessedcoolant @hipsterusername @ebr
14
+ /scripts/ @ebr @lstein @hipsterusername
15
+ /installer/ @lstein @ebr @hipsterusername
16
+ /invokeai/assets @lstein @ebr @hipsterusername
17
+ /invokeai/configs @lstein @hipsterusername
18
+ /invokeai/version @lstein @blessedcoolant @hipsterusername
19
+
20
+ # web ui
21
+ /invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
22
+ /invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
23
+
24
+ # generation, model management, postprocessing
25
+ /invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
26
+
27
+ # front ends
28
+ /invokeai/frontend/CLI @lstein @hipsterusername
29
+ /invokeai/frontend/install @lstein @ebr @hipsterusername
30
+ /invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
31
+ /invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
32
+ /invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
.github/ISSUE_TEMPLATE/BUG_REPORT.yml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 🐞 Bug Report
2
+
3
+ description: File a bug report
4
+
5
+ title: '[bug]: '
6
+
7
+ labels: ['bug']
8
+
9
+ body:
10
+ - type: markdown
11
+ attributes:
12
+ value: |
13
+ Thanks for taking the time to fill out this Bug Report!
14
+
15
+ - type: checkboxes
16
+ attributes:
17
+ label: Is there an existing issue for this problem?
18
+ description: |
19
+ Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
20
+ options:
21
+ - label: I have searched the existing issues
22
+ required: true
23
+
24
+ - type: markdown
25
+ attributes:
26
+ value: __Describe your environment__
27
+
28
+ - type: dropdown
29
+ id: os_dropdown
30
+ attributes:
31
+ label: Operating system
32
+ description: Your computer's operating system.
33
+ multiple: false
34
+ options:
35
+ - 'Linux'
36
+ - 'Windows'
37
+ - 'macOS'
38
+ - 'other'
39
+ validations:
40
+ required: true
41
+
42
+ - type: dropdown
43
+ id: gpu_dropdown
44
+ attributes:
45
+ label: GPU vendor
46
+ description: Your GPU's vendor.
47
+ multiple: false
48
+ options:
49
+ - 'Nvidia (CUDA)'
50
+ - 'AMD (ROCm)'
51
+ - 'Apple Silicon (MPS)'
52
+ - 'None (CPU)'
53
+ validations:
54
+ required: true
55
+
56
+ - type: input
57
+ id: gpu_model
58
+ attributes:
59
+ label: GPU model
60
+ description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
61
+ placeholder: ex. RTX 2080 Ti, Mac M1 Pro
62
+ validations:
63
+ required: false
64
+
65
+ - type: input
66
+ id: vram
67
+ attributes:
68
+ label: GPU VRAM
69
+ description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
70
+ placeholder: 8GB
71
+ validations:
72
+ required: false
73
+
74
+ - type: input
75
+ id: version-number
76
+ attributes:
77
+ label: Version number
78
+ description: |
79
+ The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
80
+ placeholder: ex. 3.6.1
81
+ validations:
82
+ required: true
83
+
84
+ - type: input
85
+ id: browser-version
86
+ attributes:
87
+ label: Browser
88
+ description: Your web browser and version.
89
+ placeholder: ex. Firefox 123.0b3
90
+ validations:
91
+ required: true
92
+
93
+ - type: textarea
94
+ id: python-deps
95
+ attributes:
96
+ label: Python dependencies
97
+ description: |
98
+ If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
99
+ validations:
100
+ required: false
101
+
102
+ - type: textarea
103
+ id: what-happened
104
+ attributes:
105
+ label: What happened
106
+ description: |
107
+ Describe what happened. Include any relevant error messages, stack traces and screenshots here.
108
+ placeholder: I clicked button X and then Y happened.
109
+ validations:
110
+ required: true
111
+
112
+ - type: textarea
113
+ id: what-you-expected
114
+ attributes:
115
+ label: What you expected to happen
116
+ description: Describe what you expected to happen.
117
+ placeholder: I expected Z to happen.
118
+ validations:
119
+ required: true
120
+
121
+ - type: textarea
122
+ id: how-to-repro
123
+ attributes:
124
+ label: How to reproduce the problem
125
+ description: List steps to reproduce the problem.
126
+ placeholder: Start the app, generate an image with these settings, then click button X.
127
+ validations:
128
+ required: false
129
+
130
+ - type: textarea
131
+ id: additional-context
132
+ attributes:
133
+ label: Additional context
134
+ description: Any other context that might help us to understand the problem.
135
+ placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
136
+ validations:
137
+ required: false
138
+
139
+ - type: input
140
+ id: discord-username
141
+ attributes:
142
+ label: Discord username
143
+ description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
144
+ placeholder: supercoolusername123
145
+ validations:
146
+ required: false
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature Request
2
+ description: Contribute a idea or request a new feature
3
+ title: '[enhancement]: '
4
+ labels: ['enhancement']
5
+ # assignees:
6
+ # - lstein
7
+ # - tildebyte
8
+ body:
9
+ - type: markdown
10
+ attributes:
11
+ value: |
12
+ Thanks for taking the time to fill out this feature request!
13
+
14
+ - type: checkboxes
15
+ attributes:
16
+ label: Is there an existing issue for this?
17
+ description: |
18
+ Please make use of the [search function](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
19
+ to see if a similar issue already exists for the feature you want to request
20
+ options:
21
+ - label: I have searched the existing issues
22
+ required: true
23
+
24
+ - type: input
25
+ id: contact
26
+ attributes:
27
+ label: Contact Details
28
+ description: __OPTIONAL__ How could we get in touch with you if we need more info (besides this issue)?
29
+ placeholder: ex. [email protected], discordname, twitter, ...
30
+ validations:
31
+ required: false
32
+
33
+ - type: textarea
34
+ id: whatisexpected
35
+ attributes:
36
+ label: What should this feature add?
37
+ description: Explain the functionality this feature should add. Feature requests should be for single features. Please create multiple requests if you want to request multiple features.
38
+ placeholder: |
39
+ I'd like a button that creates an image of banana sushi every time I press it. Each image should be different. There should be a toggle next to the button that enables strawberry mode, in which the images are of strawberry sushi instead.
40
+ validations:
41
+ required: true
42
+
43
+ - type: textarea
44
+ attributes:
45
+ label: Alternatives
46
+ description: Describe alternatives you've considered
47
+ placeholder: A clear and concise description of any alternative solutions or features you've considered.
48
+
49
+ - type: textarea
50
+ attributes:
51
+ label: Additional Content
52
+ description: Add any other context or screenshots about the feature request here.
53
+ placeholder: This is a mockup of the design how I imagine it <screenshot>
.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Project-Documentation
4
+ url: https://invoke-ai.github.io/InvokeAI/
5
+ about: Should be your first place to go when looking for manuals/FAQs regarding our InvokeAI Toolkit
6
+ - name: Discord
7
+ url: https://discord.gg/ZmtBAhwWhy
8
+ about: Our Discord Community could maybe help you out via live-chat
9
+ - name: GitHub Community Support
10
+ url: https://github.com/orgs/community/discussions
11
+ about: Please ask and answer questions regarding the GitHub Platform here.
12
+ - name: GitHub Security Bug Bounty
13
+ url: https://bounty.github.com/
14
+ about: Please report security vulnerabilities of the GitHub Platform here.
.github/actions/install-frontend-deps/action.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: install frontend dependencies
2
+ description: Installs frontend dependencies with pnpm, with caching
3
+ runs:
4
+ using: 'composite'
5
+ steps:
6
+ - name: setup node 18
7
+ uses: actions/setup-node@v4
8
+ with:
9
+ node-version: '18'
10
+
11
+ - name: setup pnpm
12
+ uses: pnpm/action-setup@v4
13
+ with:
14
+ version: 8.15.6
15
+ run_install: false
16
+
17
+ - name: get pnpm store directory
18
+ shell: bash
19
+ run: |
20
+ echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
21
+
22
+ - name: setup cache
23
+ uses: actions/cache@v4
24
+ with:
25
+ path: ${{ env.STORE_PATH }}
26
+ key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
27
+ restore-keys: |
28
+ ${{ runner.os }}-pnpm-store-
29
+
30
+ - name: install frontend dependencies
31
+ run: pnpm install --prefer-frozen-lockfile
32
+ shell: bash
33
+ working-directory: invokeai/frontend/web
.github/pr_labels.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ root:
2
+ - changed-files:
3
+ - any-glob-to-any-file: '*'
4
+
5
+ python-deps:
6
+ - changed-files:
7
+ - any-glob-to-any-file: 'pyproject.toml'
8
+
9
+ python:
10
+ - changed-files:
11
+ - all-globs-to-any-file:
12
+ - 'invokeai/**'
13
+ - '!invokeai/frontend/web/**'
14
+
15
+ python-tests:
16
+ - changed-files:
17
+ - any-glob-to-any-file: 'tests/**'
18
+
19
+ ci-cd:
20
+ - changed-files:
21
+ - any-glob-to-any-file: .github/**
22
+
23
+ docker:
24
+ - changed-files:
25
+ - any-glob-to-any-file: docker/**
26
+
27
+ installer:
28
+ - changed-files:
29
+ - any-glob-to-any-file: installer/**
30
+
31
+ docs:
32
+ - changed-files:
33
+ - any-glob-to-any-file: docs/**
34
+
35
+ invocations:
36
+ - changed-files:
37
+ - any-glob-to-any-file: 'invokeai/app/invocations/**'
38
+
39
+ backend:
40
+ - changed-files:
41
+ - any-glob-to-any-file: 'invokeai/backend/**'
42
+
43
+ api:
44
+ - changed-files:
45
+ - any-glob-to-any-file: 'invokeai/app/api/**'
46
+
47
+ services:
48
+ - changed-files:
49
+ - any-glob-to-any-file: 'invokeai/app/services/**'
50
+
51
+ frontend-deps:
52
+ - changed-files:
53
+ - any-glob-to-any-file:
54
+ - '**/*/package.json'
55
+ - '**/*/pnpm-lock.yaml'
56
+
57
+ frontend:
58
+ - changed-files:
59
+ - any-glob-to-any-file: 'invokeai/frontend/web/**'
.github/pull_request_template.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Summary
2
+
3
+ <!--A description of the changes in this PR. Include the kind of change (fix, feature, docs, etc), the "why" and the "how". Screenshots or videos are useful for frontend changes.-->
4
+
5
+ ## Related Issues / Discussions
6
+
7
+ <!--WHEN APPLICABLE: List any related issues or discussions on github or discord. If this PR closes an issue, please use the "Closes #1234" format, so that the issue will be automatically closed when the PR merges.-->
8
+
9
+ ## QA Instructions
10
+
11
+ <!--WHEN APPLICABLE: Describe how you have tested the changes in this PR. Provide enough detail that a reviewer can reproduce your tests.-->
12
+
13
+ ## Merge Plan
14
+
15
+ <!--WHEN APPLICABLE: Large PRs, or PRs that touch sensitive things like DB schemas, may need some care when merging. For example, a careful rebase by the change author, timing to not interfere with a pending release, or a message to contributors on discord after merging.-->
16
+
17
+ ## Checklist
18
+
19
+ - [ ] _The PR has a short but descriptive title, suitable for a changelog_
20
+ - [ ] _Tests added / updated (if applicable)_
21
+ - [ ] _Documentation added / updated (if applicable)_
22
+ - [ ] _Updated `What's New` copy (if doing a release after this PR)_
.github/stale.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Number of days of inactivity before an issue becomes stale
2
+ daysUntilStale: 28
3
+ # Number of days of inactivity before a stale issue is closed
4
+ daysUntilClose: 14
5
+ # Issues with these labels will never be considered stale
6
+ exemptLabels:
7
+ - pinned
8
+ - security
9
+ # Label to use when marking an issue as stale
10
+ staleLabel: stale
11
+ # Comment to post when marking an issue as stale. Set to `false` to disable
12
+ markComment: >
13
+ This issue has been automatically marked as stale because it has not had
14
+ recent activity. It will be closed if no further activity occurs. Please
15
+ update the ticket if this is still a problem on the latest release.
16
+ # Comment to post when closing a stale issue. Set to `false` to disable
17
+ closeComment: >
18
+ Due to inactivity, this issue has been automatically closed. If this is
19
+ still a problem on the latest release, please recreate the issue.
.github/workflows/build-container.yml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: build container image
2
+ on:
3
+ push:
4
+ branches:
5
+ - 'main'
6
+ paths:
7
+ - 'pyproject.toml'
8
+ - '.dockerignore'
9
+ - 'invokeai/**'
10
+ - 'docker/Dockerfile'
11
+ - 'docker/docker-entrypoint.sh'
12
+ - 'workflows/build-container.yml'
13
+ tags:
14
+ - 'v*.*.*'
15
+ workflow_dispatch:
16
+ inputs:
17
+ push-to-registry:
18
+ description: Push the built image to the container registry
19
+ required: false
20
+ type: boolean
21
+ default: false
22
+
23
+ permissions:
24
+ contents: write
25
+ packages: write
26
+
27
+ jobs:
28
+ docker:
29
+ if: github.event.pull_request.draft == false
30
+ strategy:
31
+ fail-fast: false
32
+ matrix:
33
+ gpu-driver:
34
+ - cuda
35
+ - cpu
36
+ - rocm
37
+ runs-on: ubuntu-latest
38
+ name: ${{ matrix.gpu-driver }}
39
+ env:
40
+ # torch/arm64 does not support GPU currently, so arm64 builds
41
+ # would not be GPU-accelerated.
42
+ # re-enable arm64 if there is sufficient demand.
43
+ # PLATFORMS: 'linux/amd64,linux/arm64'
44
+ PLATFORMS: 'linux/amd64'
45
+ steps:
46
+ - name: Free up more disk space on the runner
47
+ # https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
48
+ run: |
49
+ echo "----- Free space before cleanup"
50
+ df -h
51
+ sudo rm -rf /usr/share/dotnet
52
+ sudo rm -rf "$AGENT_TOOLSDIRECTORY"
53
+ sudo swapoff /mnt/swapfile
54
+ sudo rm -rf /mnt/swapfile
55
+ echo "----- Free space after cleanup"
56
+ df -h
57
+
58
+ - name: Checkout
59
+ uses: actions/checkout@v4
60
+
61
+ - name: Docker meta
62
+ id: meta
63
+ uses: docker/metadata-action@v5
64
+ with:
65
+ github-token: ${{ secrets.GITHUB_TOKEN }}
66
+ images: |
67
+ ghcr.io/${{ github.repository }}
68
+ tags: |
69
+ type=ref,event=branch
70
+ type=ref,event=tag
71
+ type=pep440,pattern={{version}}
72
+ type=pep440,pattern={{major}}.{{minor}}
73
+ type=pep440,pattern={{major}}
74
+ type=sha,enable=true,prefix=sha-,format=short
75
+ flavor: |
76
+ latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
77
+ suffix=-${{ matrix.gpu-driver }},onlatest=false
78
+
79
+ - name: Set up QEMU
80
+ uses: docker/setup-qemu-action@v3
81
+
82
+ - name: Set up Docker Buildx
83
+ uses: docker/setup-buildx-action@v3
84
+ with:
85
+ platforms: ${{ env.PLATFORMS }}
86
+
87
+ - name: Login to GitHub Container Registry
88
+ if: github.event_name != 'pull_request'
89
+ uses: docker/login-action@v3
90
+ with:
91
+ registry: ghcr.io
92
+ username: ${{ github.repository_owner }}
93
+ password: ${{ secrets.GITHUB_TOKEN }}
94
+
95
+ - name: Build container
96
+ timeout-minutes: 40
97
+ id: docker_build
98
+ uses: docker/build-push-action@v6
99
+ with:
100
+ context: .
101
+ file: docker/Dockerfile
102
+ platforms: ${{ env.PLATFORMS }}
103
+ push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
104
+ tags: ${{ steps.meta.outputs.tags }}
105
+ labels: ${{ steps.meta.outputs.labels }}
106
+ cache-from: |
107
+ type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
108
+ type=gha,scope=main-${{ matrix.gpu-driver }}
109
+ cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
.github/workflows/build-installer.yml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Builds and uploads the installer and python build artifacts.
2
+
3
+ name: build installer
4
+
5
+ on:
6
+ workflow_dispatch:
7
+ workflow_call:
8
+
9
+ jobs:
10
+ build-installer:
11
+ runs-on: ubuntu-latest
12
+ timeout-minutes: 5 # expected run time: <2 min
13
+ steps:
14
+ - name: checkout
15
+ uses: actions/checkout@v4
16
+
17
+ - name: setup python
18
+ uses: actions/setup-python@v5
19
+ with:
20
+ python-version: '3.10'
21
+ cache: pip
22
+ cache-dependency-path: pyproject.toml
23
+
24
+ - name: install pypa/build
25
+ run: pip install --upgrade build
26
+
27
+ - name: setup frontend
28
+ uses: ./.github/actions/install-frontend-deps
29
+
30
+ - name: create installer
31
+ id: create_installer
32
+ run: ./create_installer.sh
33
+ working-directory: installer
34
+
35
+ - name: upload python distribution artifact
36
+ uses: actions/upload-artifact@v4
37
+ with:
38
+ name: dist
39
+ path: ${{ steps.create_installer.outputs.DIST_PATH }}
40
+
41
+ - name: upload installer artifact
42
+ uses: actions/upload-artifact@v4
43
+ with:
44
+ name: installer
45
+ path: ${{ steps.create_installer.outputs.INSTALLER_PATH }}
.github/workflows/clean-caches.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: cleanup caches by a branch
2
+ on:
3
+ pull_request:
4
+ types:
5
+ - closed
6
+ workflow_dispatch:
7
+
8
+ jobs:
9
+ cleanup:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: Check out code
13
+ uses: actions/checkout@v3
14
+
15
+ - name: Cleanup
16
+ run: |
17
+ gh extension install actions/gh-actions-cache
18
+
19
+ REPO=${{ github.repository }}
20
+ BRANCH=${{ github.ref }}
21
+
22
+ echo "Fetching list of cache key"
23
+ cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 )
24
+
25
+ ## Setting this to not fail the workflow while deleting cache keys.
26
+ set +e
27
+ echo "Deleting caches..."
28
+ for cacheKey in $cacheKeysForPR
29
+ do
30
+ gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
31
+ done
32
+ echo "Done"
33
+ env:
34
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/close-inactive-issues.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Close inactive issues
2
+ on:
3
+ schedule:
4
+ - cron: "00 4 * * *"
5
+
6
+ env:
7
+ DAYS_BEFORE_ISSUE_STALE: 30
8
+ DAYS_BEFORE_ISSUE_CLOSE: 14
9
+
10
+ jobs:
11
+ close-issues:
12
+ runs-on: ubuntu-latest
13
+ permissions:
14
+ issues: write
15
+ pull-requests: write
16
+ steps:
17
+ - uses: actions/stale@v8
18
+ with:
19
+ days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
20
+ days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
21
+ stale-issue-label: "Inactive Issue"
22
+ stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
23
+ close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
24
+ days-before-pr-stale: -1
25
+ days-before-pr-close: -1
26
+ exempt-issue-labels: "Active Issue"
27
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
28
+ operations-per-run: 500
.github/workflows/frontend-checks.yml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Runs frontend code quality checks.
2
+ #
3
+ # Checks for changes to frontend files before running the checks.
4
+ # If always_run is true, always runs the checks.
5
+
6
+ name: 'frontend checks'
7
+
8
+ on:
9
+ push:
10
+ branches:
11
+ - 'main'
12
+ pull_request:
13
+ types:
14
+ - 'ready_for_review'
15
+ - 'opened'
16
+ - 'synchronize'
17
+ merge_group:
18
+ workflow_dispatch:
19
+ inputs:
20
+ always_run:
21
+ description: 'Always run the checks'
22
+ required: true
23
+ type: boolean
24
+ default: true
25
+ workflow_call:
26
+ inputs:
27
+ always_run:
28
+ description: 'Always run the checks'
29
+ required: true
30
+ type: boolean
31
+ default: true
32
+
33
+ defaults:
34
+ run:
35
+ working-directory: invokeai/frontend/web
36
+
37
+ jobs:
38
+ frontend-checks:
39
+ runs-on: ubuntu-latest
40
+ timeout-minutes: 10 # expected run time: <2 min
41
+ steps:
42
+ - uses: actions/checkout@v4
43
+
44
+ - name: check for changed frontend files
45
+ if: ${{ inputs.always_run != true }}
46
+ id: changed-files
47
+ uses: tj-actions/changed-files@v42
48
+ with:
49
+ files_yaml: |
50
+ frontend:
51
+ - 'invokeai/frontend/web/**'
52
+
53
+ - name: install dependencies
54
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
55
+ uses: ./.github/actions/install-frontend-deps
56
+
57
+ - name: tsc
58
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
59
+ run: 'pnpm lint:tsc'
60
+ shell: bash
61
+
62
+ - name: dpdm
63
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
64
+ run: 'pnpm lint:dpdm'
65
+ shell: bash
66
+
67
+ - name: eslint
68
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
69
+ run: 'pnpm lint:eslint'
70
+ shell: bash
71
+
72
+ - name: prettier
73
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
74
+ run: 'pnpm lint:prettier'
75
+ shell: bash
76
+
77
+ - name: knip
78
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
79
+ run: 'pnpm lint:knip'
80
+ shell: bash
.github/workflows/frontend-tests.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Runs frontend tests.
2
+ #
3
+ # Checks for changes to frontend files before running the tests.
4
+ # If always_run is true, always runs the tests.
5
+
6
+ name: 'frontend tests'
7
+
8
+ on:
9
+ push:
10
+ branches:
11
+ - 'main'
12
+ pull_request:
13
+ types:
14
+ - 'ready_for_review'
15
+ - 'opened'
16
+ - 'synchronize'
17
+ merge_group:
18
+ workflow_dispatch:
19
+ inputs:
20
+ always_run:
21
+ description: 'Always run the tests'
22
+ required: true
23
+ type: boolean
24
+ default: true
25
+ workflow_call:
26
+ inputs:
27
+ always_run:
28
+ description: 'Always run the tests'
29
+ required: true
30
+ type: boolean
31
+ default: true
32
+
33
+ defaults:
34
+ run:
35
+ working-directory: invokeai/frontend/web
36
+
37
+ jobs:
38
+ frontend-tests:
39
+ runs-on: ubuntu-latest
40
+ timeout-minutes: 10 # expected run time: <2 min
41
+ steps:
42
+ - uses: actions/checkout@v4
43
+
44
+ - name: check for changed frontend files
45
+ if: ${{ inputs.always_run != true }}
46
+ id: changed-files
47
+ uses: tj-actions/changed-files@v42
48
+ with:
49
+ files_yaml: |
50
+ frontend:
51
+ - 'invokeai/frontend/web/**'
52
+
53
+ - name: install dependencies
54
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
55
+ uses: ./.github/actions/install-frontend-deps
56
+
57
+ - name: vitest
58
+ if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
59
+ run: 'pnpm test:no-watch'
60
+ shell: bash
.github/workflows/label-pr.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 'label PRs'
2
+ on:
3
+ - pull_request_target
4
+
5
+ jobs:
6
+ labeler:
7
+ permissions:
8
+ contents: read
9
+ pull-requests: write
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: checkout
13
+ uses: actions/checkout@v4
14
+
15
+ - name: label PRs
16
+ uses: actions/labeler@v5
17
+ with:
18
+ configuration-path: .github/pr_labels.yml
.github/workflows/mkdocs-material.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a mostly a copy-paste from https://github.com/squidfunk/mkdocs-material/blob/master/docs/publishing-your-site.md
2
+
3
+ name: mkdocs
4
+
5
+ on:
6
+ push:
7
+ branches:
8
+ - main
9
+ workflow_dispatch:
10
+
11
+ permissions:
12
+ contents: write
13
+
14
+ jobs:
15
+ deploy:
16
+ if: github.event.pull_request.draft == false
17
+ runs-on: ubuntu-latest
18
+ env:
19
+ REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
20
+ REPO_NAME: '${{ github.repository }}'
21
+ SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
22
+
23
+ steps:
24
+ - name: checkout
25
+ uses: actions/checkout@v4
26
+
27
+ - name: setup python
28
+ uses: actions/setup-python@v5
29
+ with:
30
+ python-version: '3.10'
31
+ cache: pip
32
+ cache-dependency-path: pyproject.toml
33
+
34
+ - name: set cache id
35
+ run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
36
+
37
+ - name: use cache
38
+ uses: actions/cache@v4
39
+ with:
40
+ key: mkdocs-material-${{ env.cache_id }}
41
+ path: .cache
42
+ restore-keys: |
43
+ mkdocs-material-
44
+
45
+ - name: install dependencies
46
+ run: python -m pip install ".[docs]"
47
+
48
+ - name: build & deploy
49
+ run: mkdocs gh-deploy --force
.github/workflows/python-checks.yml ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Runs python code quality checks.
2
+ #
3
+ # Checks for changes to python files before running the checks.
4
+ # If always_run is true, always runs the checks.
5
+ #
6
+ # TODO: Add mypy or pyright to the checks.
7
+
8
+ name: 'python checks'
9
+
10
+ on:
11
+ push:
12
+ branches:
13
+ - 'main'
14
+ pull_request:
15
+ types:
16
+ - 'ready_for_review'
17
+ - 'opened'
18
+ - 'synchronize'
19
+ merge_group:
20
+ workflow_dispatch:
21
+ inputs:
22
+ always_run:
23
+ description: 'Always run the checks'
24
+ required: true
25
+ type: boolean
26
+ default: true
27
+ workflow_call:
28
+ inputs:
29
+ always_run:
30
+ description: 'Always run the checks'
31
+ required: true
32
+ type: boolean
33
+ default: true
34
+
35
+ jobs:
36
+ python-checks:
37
+ runs-on: ubuntu-latest
38
+ timeout-minutes: 5 # expected run time: <1 min
39
+ steps:
40
+ - name: checkout
41
+ uses: actions/checkout@v4
42
+
43
+ - name: check for changed python files
44
+ if: ${{ inputs.always_run != true }}
45
+ id: changed-files
46
+ uses: tj-actions/changed-files@v42
47
+ with:
48
+ files_yaml: |
49
+ python:
50
+ - 'pyproject.toml'
51
+ - 'invokeai/**'
52
+ - '!invokeai/frontend/web/**'
53
+ - 'tests/**'
54
+
55
+ - name: setup python
56
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
57
+ uses: actions/setup-python@v5
58
+ with:
59
+ python-version: '3.10'
60
+ cache: pip
61
+ cache-dependency-path: pyproject.toml
62
+
63
+ - name: install ruff
64
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
65
+ run: pip install ruff==0.6.0
66
+ shell: bash
67
+
68
+ - name: ruff check
69
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
70
+ run: ruff check --output-format=github .
71
+ shell: bash
72
+
73
+ - name: ruff format
74
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
75
+ run: ruff format --check .
76
+ shell: bash
.github/workflows/python-tests.yml ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Runs python tests on a matrix of python versions and platforms.
2
+ #
3
+ # Checks for changes to python files before running the tests.
4
+ # If always_run is true, always runs the tests.
5
+
6
+ name: 'python tests'
7
+
8
+ on:
9
+ push:
10
+ branches:
11
+ - 'main'
12
+ pull_request:
13
+ types:
14
+ - 'ready_for_review'
15
+ - 'opened'
16
+ - 'synchronize'
17
+ merge_group:
18
+ workflow_dispatch:
19
+ inputs:
20
+ always_run:
21
+ description: 'Always run the tests'
22
+ required: true
23
+ type: boolean
24
+ default: true
25
+ workflow_call:
26
+ inputs:
27
+ always_run:
28
+ description: 'Always run the tests'
29
+ required: true
30
+ type: boolean
31
+ default: true
32
+
33
+ concurrency:
34
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
35
+ cancel-in-progress: true
36
+
37
+ jobs:
38
+ matrix:
39
+ strategy:
40
+ matrix:
41
+ python-version:
42
+ - '3.10'
43
+ - '3.11'
44
+ platform:
45
+ - linux-cuda-11_7
46
+ - linux-rocm-5_2
47
+ - linux-cpu
48
+ - macos-default
49
+ - windows-cpu
50
+ include:
51
+ - platform: linux-cuda-11_7
52
+ os: ubuntu-22.04
53
+ github-env: $GITHUB_ENV
54
+ - platform: linux-rocm-5_2
55
+ os: ubuntu-22.04
56
+ extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
57
+ github-env: $GITHUB_ENV
58
+ - platform: linux-cpu
59
+ os: ubuntu-22.04
60
+ extra-index-url: 'https://download.pytorch.org/whl/cpu'
61
+ github-env: $GITHUB_ENV
62
+ - platform: macos-default
63
+ os: macOS-14
64
+ github-env: $GITHUB_ENV
65
+ - platform: windows-cpu
66
+ os: windows-2022
67
+ github-env: $env:GITHUB_ENV
68
+ name: 'py${{ matrix.python-version }}: ${{ matrix.platform }}'
69
+ runs-on: ${{ matrix.os }}
70
+ timeout-minutes: 15 # expected run time: 2-6 min, depending on platform
71
+ env:
72
+ PIP_USE_PEP517: '1'
73
+ steps:
74
+ - name: checkout
75
+ uses: actions/checkout@v4
76
+
77
+ - name: check for changed python files
78
+ if: ${{ inputs.always_run != true }}
79
+ id: changed-files
80
+ uses: tj-actions/changed-files@v42
81
+ with:
82
+ files_yaml: |
83
+ python:
84
+ - 'pyproject.toml'
85
+ - 'invokeai/**'
86
+ - '!invokeai/frontend/web/**'
87
+ - 'tests/**'
88
+
89
+ - name: setup python
90
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
91
+ uses: actions/setup-python@v5
92
+ with:
93
+ python-version: ${{ matrix.python-version }}
94
+ cache: pip
95
+ cache-dependency-path: pyproject.toml
96
+
97
+ - name: install dependencies
98
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
99
+ env:
100
+ PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
101
+ run: >
102
+ pip3 install --editable=".[test]"
103
+
104
+ - name: run pytest
105
+ if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
106
+ run: pytest
.github/workflows/release.yml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Main release workflow. Triggered on tag push or manual trigger.
2
+ #
3
+ # - Runs all code checks and tests
4
+ # - Verifies the app version matches the tag version.
5
+ # - Builds the installer and build, uploading them as artifacts.
6
+ # - Publishes to TestPyPI and PyPI. Both are conditional on the previous steps passing and require a manual approval.
7
+ #
8
+ # See docs/RELEASE.md for more information on the release process.
9
+
10
+ name: release
11
+
12
+ on:
13
+ push:
14
+ tags:
15
+ - 'v*'
16
+ workflow_dispatch:
17
+
18
+ jobs:
19
+ check-version:
20
+ runs-on: ubuntu-latest
21
+ steps:
22
+ - name: checkout
23
+ uses: actions/checkout@v4
24
+
25
+ - name: check python version
26
+ uses: samuelcolvin/check-python-version@v4
27
+ id: check-python-version
28
+ with:
29
+ version_file_path: invokeai/version/invokeai_version.py
30
+
31
+ frontend-checks:
32
+ uses: ./.github/workflows/frontend-checks.yml
33
+ with:
34
+ always_run: true
35
+
36
+ frontend-tests:
37
+ uses: ./.github/workflows/frontend-tests.yml
38
+ with:
39
+ always_run: true
40
+
41
+ python-checks:
42
+ uses: ./.github/workflows/python-checks.yml
43
+ with:
44
+ always_run: true
45
+
46
+ python-tests:
47
+ uses: ./.github/workflows/python-tests.yml
48
+ with:
49
+ always_run: true
50
+
51
+ build:
52
+ uses: ./.github/workflows/build-installer.yml
53
+
54
+ publish-testpypi:
55
+ runs-on: ubuntu-latest
56
+ timeout-minutes: 5 # expected run time: <1 min
57
+ needs:
58
+ [
59
+ check-version,
60
+ frontend-checks,
61
+ frontend-tests,
62
+ python-checks,
63
+ python-tests,
64
+ build,
65
+ ]
66
+ environment:
67
+ name: testpypi
68
+ url: https://test.pypi.org/p/invokeai
69
+ permissions:
70
+ id-token: write
71
+ steps:
72
+ - name: download distribution from build job
73
+ uses: actions/download-artifact@v4
74
+ with:
75
+ name: dist
76
+ path: dist/
77
+
78
+ - name: publish distribution to TestPyPI
79
+ uses: pypa/gh-action-pypi-publish@release/v1
80
+ with:
81
+ repository-url: https://test.pypi.org/legacy/
82
+
83
+ publish-pypi:
84
+ runs-on: ubuntu-latest
85
+ timeout-minutes: 5 # expected run time: <1 min
86
+ needs:
87
+ [
88
+ check-version,
89
+ frontend-checks,
90
+ frontend-tests,
91
+ python-checks,
92
+ python-tests,
93
+ build,
94
+ ]
95
+ environment:
96
+ name: pypi
97
+ url: https://pypi.org/p/invokeai
98
+ permissions:
99
+ id-token: write
100
+ steps:
101
+ - name: download distribution from build job
102
+ uses: actions/download-artifact@v4
103
+ with:
104
+ name: dist
105
+ path: dist/
106
+
107
+ - name: publish distribution to PyPI
108
+ uses: pypa/gh-action-pypi-publish@release/v1
.gitignore ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # emacs autosave and recovery files
12
+ *~
13
+ .#*
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ pip-wheel-metadata/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coveragerc
50
+ .coverage
51
+ .coverage.*
52
+ .cache
53
+ nosetests.xml
54
+ coverage.xml
55
+ cov.xml
56
+ *.cover
57
+ *.py,cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+ .pytest.ini
61
+ cover/
62
+ junit/
63
+ notes/
64
+
65
+ # Translations
66
+ *.mo
67
+ *.pot
68
+
69
+ # Django stuff:
70
+ *.log
71
+ local_settings.py
72
+ db.sqlite3
73
+ db.sqlite3-journal
74
+
75
+ # Flask stuff:
76
+ instance/
77
+ .webassets-cache
78
+
79
+ # Scrapy stuff:
80
+ .scrapy
81
+
82
+ # Sphinx documentation
83
+ docs/_build/
84
+
85
+ # PyBuilder
86
+ .pybuilder/
87
+ target/
88
+
89
+ # Jupyter Notebook
90
+ .ipynb_checkpoints
91
+
92
+ # IPython
93
+ profile_default/
94
+ ipython_config.py
95
+
96
+ # pyenv
97
+ # For a library or package, you might want to ignore these files since the code is
98
+ # intended to run in multiple environments; otherwise, check them in:
99
+ # .python-version
100
+ .python-version
101
+
102
+ # pipenv
103
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
104
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
105
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
106
+ # install all needed dependencies.
107
+ #Pipfile.lock
108
+
109
+ # poetry
110
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
111
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
112
+ # commonly ignored for libraries.
113
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
114
+ #poetry.lock
115
+
116
+ # pdm
117
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
118
+ #pdm.lock
119
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
120
+ # in version control.
121
+ # https://pdm.fming.dev/#use-with-ide
122
+ .pdm.toml
123
+
124
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
125
+ __pypackages__/
126
+
127
+ # Celery stuff
128
+ celerybeat-schedule
129
+ celerybeat.pid
130
+
131
+ # SageMath parsed files
132
+ *.sage.py
133
+
134
+ # Environments
135
+ .env
136
+ .venv*
137
+ env/
138
+ venv/
139
+ ENV/
140
+
141
+ # Spyder project settings
142
+ .spyderproject
143
+ .spyproject
144
+
145
+ # Rope project settings
146
+ .ropeproject
147
+
148
+ # mkdocs documentation
149
+ /site
150
+
151
+ # mypy
152
+ .mypy_cache/
153
+ .dmypy.json
154
+ dmypy.json
155
+
156
+ # Pyre type checker
157
+ .pyre/
158
+
159
+ # pytype static type analyzer
160
+ .pytype/
161
+
162
+ # Cython debug symbols
163
+ cython_debug/
164
+
165
+ # PyCharm
166
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
167
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
168
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
169
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
170
+ #.idea/
171
+
172
+ **/__pycache__/
173
+
174
+ # If it's a Mac
175
+ .DS_Store
176
+
177
+ # Let the frontend manage its own gitignore
178
+ !invokeai/frontend/web/*
179
+
180
+ # Scratch folder
181
+ .scratch/
182
+ .vscode/
183
+
184
+ # source installer files
185
+ installer/*zip
186
+ installer/install.bat
187
+ installer/install.sh
188
+ installer/update.bat
189
+ installer/update.sh
190
+ installer/InvokeAI-Installer/
.gitmodules ADDED
File without changes
.pre-commit-config.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://pre-commit.com/ for usage and config
2
+ repos:
3
+ - repo: local
4
+ hooks:
5
+ - id: black
6
+ name: black
7
+ stages: [commit]
8
+ language: system
9
+ entry: black
10
+ types: [python]
11
+
12
+ - id: flake8
13
+ name: flake8
14
+ stages: [commit]
15
+ language: system
16
+ entry: flake8
17
+ types: [python]
18
+
19
+ - id: isort
20
+ name: isort
21
+ stages: [commit]
22
+ language: system
23
+ entry: isort
24
+ types: [python]
.prettierrc.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ endOfLine: lf
2
+ tabWidth: 2
3
+ useTabs: false
4
+ singleQuote: true
5
+ quoteProps: as-needed
6
+ embeddedLanguageFormatting: auto
7
+ overrides:
8
+ - files: '*.md'
9
+ options:
10
+ proseWrap: preserve
11
+ printWidth: 80
12
+ parser: markdown
13
+ cursorOffset: -1
InvokeAI_Statement_of_Values.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <img src="docs/assets/invoke_ai_banner.png" align="center">
2
+
3
+ Invoke-AI is a community of software developers, researchers, and user
4
+ interface experts who have come together on a voluntary basis to build
5
+ software tools which support cutting edge AI text-to-image
6
+ applications. This community is open to anyone who wishes to
7
+ contribute to the effort and has the skill and time to do so.
8
+
9
+ # Our Values
10
+
11
+ The InvokeAI team is a diverse community which includes individuals
12
+ from various parts of the world and many walks of life. Despite our
13
+ differences, we share a number of core values which we ask prospective
14
+ contributors to understand and respect. We believe:
15
+
16
+ 1. That Open Source Software is a positive force in the world. We
17
+ create software that can be used, reused, and redistributed, without
18
+ restrictions, under a straightforward Open Source license (MIT). We
19
+ believe that Open Source benefits society as a whole by increasing the
20
+ availability of high quality software to all.
21
+
22
+ 2. That those who create software should receive proper attribution
23
+ for their creative work. While we support the exchange and reuse of
24
+ Open Source Software, we feel strongly that the original authors of a
25
+ piece of code should receive credit for their contribution, and we
26
+ endeavor to do so whenever possible.
27
+
28
+ 3. That there is moral ambiguity surrounding AI-assisted art. We are
29
+ aware of the moral and ethical issues surrounding the release of the
30
+ Stable Diffusion model and similar products. We are aware that, due to
31
+ the composition of their training sets, current AI-generated image
32
+ models are biased against certain ethnic groups, cultural concepts of
33
+ beauty, ethnic stereotypes, and gender roles.
34
+
35
+ 1. We recognize the potential for harm to these groups that these biases
36
+ represent and trust that future AI models will take steps towards
37
+ reducing or eliminating the biases noted above, respect and give due
38
+ credit to the artists whose work is sourced, and call on developers
39
+ and users to favor these models over the older ones as they become
40
+ available.
41
+
42
+ 4. We are deeply committed to ensuring that this technology benefits
43
+ everyone, including artists. We see AI art not as a replacement for
44
+ the artist, but rather as a tool to empower them. With that
45
+ in mind, we are constantly debating how to build systems that put
46
+ artists’ needs first: tools which can be readily integrated into an
47
+ artist’s existing workflows and practices, enhancing their work and
48
+ helping them to push it further. Every decision we take as a team,
49
+ which includes several artists, aims to build towards that goal.
50
+
51
+ 5. That artificial intelligence can be a force for good in the world,
52
+ but must be used responsibly. Artificial intelligence technologies
53
+ have the potential to improve society, in everything from cancer care,
54
+ to customer service, to creative writing.
55
+
56
+ 1. While we do not believe that software should arbitrarily limit what
57
+ users can do with it, we recognize that when used irresponsibly, AI
58
+ has the potential to do much harm. Our Discord server is actively
59
+ moderated in order to minimize the potential of harm from
60
+ user-contributed images. In addition, we ask users of our software to
61
+ refrain from using it in any way that would cause mental, emotional or
62
+ physical harm to individuals and vulnerable populations including (but
63
+ not limited to) women; minors; ethnic minorities; religious groups;
64
+ members of LGBTQIA communities; and people with disabilities or
65
+ impairments.
66
+
67
+ 2. Note that some of the image generation AI models which the Invoke-AI
68
+ toolkit supports carry licensing agreements which impose restrictions
69
+ on how the model is used. We ask that our users read and agree to
70
+ these terms if they wish to make use of these models. These agreements
71
+ are distinct from the MIT license which applies to the InvokeAI
72
+ software and source code.
73
+
74
+ 6. That mutual respect is key to a healthy software development
75
+ community. Members of the InvokeAI community are expected to treat
76
+ each other with respect, beneficence, and empathy. Each of us has a
77
+ different background and a unique set of skills. We strive to help
78
+ each other grow and gain new skills, and we apportion expectations in
79
+ a way that balances the members' time, skillset, and interest
80
+ area. Disputes are resolved by open and honest communication.
81
+
82
+ ## Signature
83
+
84
+ This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, **keturn**, and **ebr** (Eugene Brodsky). Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
LICENSE ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+
LICENSE-SD1+SD2.txt ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
2
+
3
+ CreativeML Open RAIL-M
4
+ dated August 22, 2022
5
+
6
+ Section I: PREAMBLE
7
+
8
+ Multimodal generative models are being widely adopted and used, and
9
+ have the potential to transform the way artists, among other
10
+ individuals, conceive and benefit from AI or ML technologies as a tool
11
+ for content creation.
12
+
13
+ Notwithstanding the current and potential benefits that these
14
+ artifacts can bring to society at large, there are also concerns about
15
+ potential misuses of them, either due to their technical limitations
16
+ or ethical considerations.
17
+
18
+ In short, this license strives for both the open and responsible
19
+ downstream use of the accompanying model. When it comes to the open
20
+ character, we took inspiration from open source permissive licenses
21
+ regarding the grant of IP rights. Referring to the downstream
22
+ responsible use, we added use-based restrictions not permitting the
23
+ use of the Model in very specific scenarios, in order for the licensor
24
+ to be able to enforce the license in case potential misuses of the
25
+ Model may occur. At the same time, we strive to promote open and
26
+ responsible research on generative models for art and content
27
+ generation.
28
+
29
+ Even though downstream derivative versions of the model could be
30
+ released under different licensing terms, the latter will always have
31
+ to include - at minimum - the same use-based restrictions as the ones
32
+ in the original license (this license). We believe in the intersection
33
+ between open and responsible AI development; thus, this License aims
34
+ to strike a balance between both in order to enable responsible
35
+ open-science in the field of AI.
36
+
37
+ This License governs the use of the model (and its derivatives) and is
38
+ informed by the model card associated with the model.
39
+
40
+ NOW THEREFORE, You and Licensor agree as follows:
41
+
42
+ 1. Definitions
43
+
44
+ - "License" means the terms and conditions for use, reproduction, and
45
+ Distribution as defined in this document.
46
+
47
+ - "Data" means a collection of information and/or content extracted
48
+ from the dataset used with the Model, including to train, pretrain,
49
+ or otherwise evaluate the Model. The Data is not licensed under this
50
+ License.
51
+
52
+ - "Output" means the results of operating a Model as embodied in
53
+ informational content resulting therefrom.
54
+
55
+ - "Model" means any accompanying machine-learning based assemblies
56
+ (including checkpoints), consisting of learnt weights, parameters
57
+ (including optimizer states), corresponding to the model
58
+ architecture as embodied in the Complementary Material, that have
59
+ been trained or tuned, in whole or in part on the Data, using the
60
+ Complementary Material.
61
+
62
+ - "Derivatives of the Model" means all modifications to the Model,
63
+ works based on the Model, or any other model which is created or
64
+ initialized by transfer of patterns of the weights, parameters,
65
+ activations or output of the Model, to the other model, in order to
66
+ cause the other model to perform similarly to the Model, including -
67
+ but not limited to - distillation methods entailing the use of
68
+ intermediate data representations or methods based on the generation
69
+ of synthetic data by the Model for training the other model.
70
+
71
+ - "Complementary Material" means the accompanying source code and
72
+ scripts used to define, run, load, benchmark or evaluate the Model,
73
+ and used to prepare data for training or evaluation, if any. This
74
+ includes any accompanying documentation, tutorials, examples, etc,
75
+ if any.
76
+
77
+ - "Distribution" means any transmission, reproduction, publication or
78
+ other sharing of the Model or Derivatives of the Model to a third
79
+ party, including providing the Model as a hosted service made
80
+ available by electronic or other remote means - e.g. API-based or
81
+ web access.
82
+
83
+ - "Licensor" means the copyright owner or entity authorized by the
84
+ copyright owner that is granting the License, including the persons
85
+ or entities that may have rights in the Model and/or distributing
86
+ the Model.
87
+
88
+ - "You" (or "Your") means an individual or Legal Entity exercising
89
+ permissions granted by this License and/or making use of the Model
90
+ for whichever purpose and in any field of use, including usage of
91
+ the Model in an end-use application - e.g. chatbot, translator,
92
+ image generator.
93
+
94
+ - "Third Parties" means individuals or legal entities that are not
95
+ under common control with Licensor or You.
96
+
97
+ - "Contribution" means any work of authorship, including the original
98
+ version of the Model and any modifications or additions to that
99
+ Model or Derivatives of the Model thereof, that is intentionally
100
+ submitted to Licensor for inclusion in the Model by the copyright
101
+ owner or by an individual or Legal Entity authorized to submit on
102
+ behalf of the copyright owner. For the purposes of this definition,
103
+ "submitted" means any form of electronic, verbal, or written
104
+ communication sent to the Licensor or its representatives, including
105
+ but not limited to communication on electronic mailing lists, source
106
+ code control systems, and issue tracking systems that are managed
107
+ by, or on behalf of, the Licensor for the purpose of discussing and
108
+ improving the Model, but excluding communication that is
109
+ conspicuously marked or otherwise designated in writing by the
110
+ copyright owner as "Not a Contribution."
111
+
112
+ - "Contributor" means Licensor and any individual or Legal Entity on
113
+ behalf of whom a Contribution has been received by Licensor and
114
+ subsequently incorporated within the Model.
115
+
116
+ Section II: INTELLECTUAL PROPERTY RIGHTS
117
+
118
+ Both copyright and patent grants apply to the Model, Derivatives of
119
+ the Model and Complementary Material. The Model and Derivatives of the
120
+ Model are subject to additional terms as described in Section III.
121
+
122
+ 2. Grant of Copyright License. Subject to the terms and conditions of
123
+ this License, each Contributor hereby grants to You a perpetual,
124
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
125
+ copyright license to reproduce, prepare, publicly display, publicly
126
+ perform, sublicense, and distribute the Complementary Material, the
127
+ Model, and Derivatives of the Model.
128
+
129
+ 3. Grant of Patent License. Subject to the terms and conditions of
130
+ this License and where and as applicable, each Contributor hereby
131
+ grants to You a perpetual, worldwide, non-exclusive, no-charge,
132
+ royalty-free, irrevocable (except as stated in this paragraph) patent
133
+ license to make, have made, use, offer to sell, sell, import, and
134
+ otherwise transfer the Model and the Complementary Material, where
135
+ such license applies only to those patent claims licensable by such
136
+ Contributor that are necessarily infringed by their Contribution(s)
137
+ alone or by combination of their Contribution(s) with the Model to
138
+ which such Contribution(s) was submitted. If You institute patent
139
+ litigation against any entity (including a cross-claim or counterclaim
140
+ in a lawsuit) alleging that the Model and/or Complementary Material or
141
+ a Contribution incorporated within the Model and/or Complementary
142
+ Material constitutes direct or contributory patent infringement, then
143
+ any patent licenses granted to You under this License for the Model
144
+ and/or Work shall terminate as of the date such litigation is asserted
145
+ or filed.
146
+
147
+ Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
148
+
149
+ 4. Distribution and Redistribution. You may host for Third Party
150
+ remote access purposes (e.g. software-as-a-service), reproduce and
151
+ distribute copies of the Model or Derivatives of the Model thereof in
152
+ any medium, with or without modifications, provided that You meet the
153
+ following conditions: Use-based restrictions as referenced in
154
+ paragraph 5 MUST be included as an enforceable provision by You in any
155
+ type of legal agreement (e.g. a license) governing the use and/or
156
+ distribution of the Model or Derivatives of the Model, and You shall
157
+ give notice to subsequent users You Distribute to, that the Model or
158
+ Derivatives of the Model are subject to paragraph 5. This provision
159
+ does not apply to the use of Complementary Material. You must give
160
+ any Third Party recipients of the Model or Derivatives of the Model a
161
+ copy of this License; You must cause any modified files to carry
162
+ prominent notices stating that You changed the files; You must retain
163
+ all copyright, patent, trademark, and attribution notices excluding
164
+ those notices that do not pertain to any part of the Model,
165
+ Derivatives of the Model. You may add Your own copyright statement to
166
+ Your modifications and may provide additional or different license
167
+ terms and conditions - respecting paragraph 4.a. - for use,
168
+ reproduction, or Distribution of Your modifications, or for any such
169
+ Derivatives of the Model as a whole, provided Your use, reproduction,
170
+ and Distribution of the Model otherwise complies with the conditions
171
+ stated in this License.
172
+
173
+ 5. Use-based restrictions. The restrictions set forth in Attachment A
174
+ are considered Use-based restrictions. Therefore You cannot use the
175
+ Model and the Derivatives of the Model for the specified restricted
176
+ uses. You may use the Model subject to this License, including only
177
+ for lawful purposes and in accordance with the License. Use may
178
+ include creating any content with, finetuning, updating, running,
179
+ training, evaluating and/or reparametrizing the Model. You shall
180
+ require all of Your users who use the Model or a Derivative of the
181
+ Model to comply with the terms of this paragraph (paragraph 5).
182
+
183
+ 6. The Output You Generate. Except as set forth herein, Licensor
184
+ claims no rights in the Output You generate using the Model. You are
185
+ accountable for the Output you generate and its subsequent uses. No
186
+ use of the output can contravene any provision as stated in the
187
+ License.
188
+
189
+ Section IV: OTHER PROVISIONS
190
+
191
+ 7. Updates and Runtime Restrictions. To the maximum extent permitted
192
+ by law, Licensor reserves the right to restrict (remotely or
193
+ otherwise) usage of the Model in violation of this License, update the
194
+ Model through electronic means, or modify the Output of the Model
195
+ based on updates. You shall undertake reasonable efforts to use the
196
+ latest version of the Model.
197
+
198
+ 8. Trademarks and related. Nothing in this License permits You to make
199
+ use of Licensors’ trademarks, trade names, logos or to otherwise
200
+ suggest endorsement or misrepresent the relationship between the
201
+ parties; and any rights not expressly granted herein are reserved by
202
+ the Licensors.
203
+
204
+ 9. Disclaimer of Warranty. Unless required by applicable law or agreed
205
+ to in writing, Licensor provides the Model and the Complementary
206
+ Material (and each Contributor provides its Contributions) on an "AS
207
+ IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
208
+ express or implied, including, without limitation, any warranties or
209
+ conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
210
+ A PARTICULAR PURPOSE. You are solely responsible for determining the
211
+ appropriateness of using or redistributing the Model, Derivatives of
212
+ the Model, and the Complementary Material and assume any risks
213
+ associated with Your exercise of permissions under this License.
214
+
215
+ 10. Limitation of Liability. In no event and under no legal theory,
216
+ whether in tort (including negligence), contract, or otherwise, unless
217
+ required by applicable law (such as deliberate and grossly negligent
218
+ acts) or agreed to in writing, shall any Contributor be liable to You
219
+ for damages, including any direct, indirect, special, incidental, or
220
+ consequential damages of any character arising as a result of this
221
+ License or out of the use or inability to use the Model and the
222
+ Complementary Material (including but not limited to damages for loss
223
+ of goodwill, work stoppage, computer failure or malfunction, or any
224
+ and all other commercial damages or losses), even if such Contributor
225
+ has been advised of the possibility of such damages.
226
+
227
+ 11. Accepting Warranty or Additional Liability. While redistributing
228
+ the Model, Derivatives of the Model and the Complementary Material
229
+ thereof, You may choose to offer, and charge a fee for, acceptance of
230
+ support, warranty, indemnity, or other liability obligations and/or
231
+ rights consistent with this License. However, in accepting such
232
+ obligations, You may act only on Your own behalf and on Your sole
233
+ responsibility, not on behalf of any other Contributor, and only if
234
+ You agree to indemnify, defend, and hold each Contributor harmless for
235
+ any liability incurred by, or claims asserted against, such
236
+ Contributor by reason of your accepting any such warranty or
237
+ additional liability.
238
+
239
+ 12. If any provision of this License is held to be invalid, illegal or
240
+ unenforceable, the remaining provisions shall be unaffected thereby
241
+ and remain valid as if such provision had not been set forth herein.
242
+
243
+ END OF TERMS AND CONDITIONS
244
+
245
+
246
+
247
+
248
+ Attachment A
249
+
250
+ Use Restrictions
251
+
252
+ You agree not to use the Model or Derivatives of the Model:
253
+
254
+ - In any way that violates any applicable national, federal, state,
255
+ local or international law or regulation;
256
+
257
+ - For the purpose of exploiting, harming or attempting to exploit or
258
+ harm minors in any way;
259
+
260
+ - To generate or disseminate verifiably false information and/or
261
+ content with the purpose of harming others;
262
+
263
+ - To generate or disseminate personal identifiable information that
264
+ can be used to harm an individual;
265
+
266
+ - To defame, disparage or otherwise harass others;
267
+
268
+ - For fully automated decision making that adversely impacts an
269
+ individual’s legal rights or otherwise creates or modifies a
270
+ binding, enforceable obligation;
271
+
272
+ pp- For any use intended to or which has the effect of discriminating
273
+ against or harming individuals or groups based on online or offline
274
+ social behavior or known or predicted personal or personality
275
+ characteristics;
276
+
277
+ - To exploit any of the vulnerabilities of a specific group of persons
278
+ based on their age, social, physical or mental characteristics, in
279
+ order to materially distort the behavior of a person pertaining to
280
+ that group in a manner that causes or is likely to cause that person
281
+ or another person physical or psychological harm;
282
+
283
+ - For any use intended to or which has the effect of discriminating
284
+ against individuals or groups based on legally protected
285
+ characteristics or categories;
286
+
287
+ - To provide medical advice and medical results interpretation;
288
+
289
+ - To generate or disseminate information for the purpose to be used
290
+ for administration of justice, law enforcement, immigration or
291
+ asylum processes, such as predicting an individual will commit
292
+ fraud/crime commitment (e.g. by text profiling, drawing causal
293
+ relationships between assertions made in documents, indiscriminate
294
+ and arbitrarily-targeted use).
LICENSE-SDXL.txt ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2023 Stability AI
2
+ CreativeML Open RAIL++-M License dated July 26, 2023
3
+
4
+ Section I: PREAMBLE
5
+
6
+ Multimodal generative models are being widely adopted and used, and
7
+ have the potential to transform the way artists, among other
8
+ individuals, conceive and benefit from AI or ML technologies as a tool
9
+ for content creation.
10
+
11
+ Notwithstanding the current and potential benefits that these
12
+ artifacts can bring to society at large, there are also concerns about
13
+ potential misuses of them, either due to their technical limitations
14
+ or ethical considerations.
15
+
16
+ In short, this license strives for both the open and responsible
17
+ downstream use of the accompanying model. When it comes to the open
18
+ character, we took inspiration from open source permissive licenses
19
+ regarding the grant of IP rights. Referring to the downstream
20
+ responsible use, we added use-based restrictions not permitting the
21
+ use of the model in very specific scenarios, in order for the licensor
22
+ to be able to enforce the license in case potential misuses of the
23
+ Model may occur. At the same time, we strive to promote open and
24
+ responsible research on generative models for art and content
25
+ generation.
26
+
27
+ Even though downstream derivative versions of the model could be
28
+ released under different licensing terms, the latter will always have
29
+ to include - at minimum - the same use-based restrictions as the ones
30
+ in the original license (this license). We believe in the intersection
31
+ between open and responsible AI development; thus, this agreement aims
32
+ to strike a balance between both in order to enable responsible
33
+ open-science in the field of AI.
34
+
35
+ This CreativeML Open RAIL++-M License governs the use of the model
36
+ (and its derivatives) and is informed by the model card associated
37
+ with the model.
38
+
39
+ NOW THEREFORE, You and Licensor agree as follows:
40
+
41
+ Definitions
42
+
43
+ "License" means the terms and conditions for use, reproduction, and
44
+ Distribution as defined in this document.
45
+
46
+ "Data" means a collection of information and/or content extracted from
47
+ the dataset used with the Model, including to train, pretrain, or
48
+ otherwise evaluate the Model. The Data is not licensed under this
49
+ License.
50
+
51
+ "Output" means the results of operating a Model as embodied in
52
+ informational content resulting therefrom.
53
+
54
+ "Model" means any accompanying machine-learning based assemblies
55
+ (including checkpoints), consisting of learnt weights, parameters
56
+ (including optimizer states), corresponding to the model architecture
57
+ as embodied in the Complementary Material, that have been trained or
58
+ tuned, in whole or in part on the Data, using the Complementary
59
+ Material.
60
+
61
+ "Derivatives of the Model" means all modifications to the Model, works
62
+ based on the Model, or any other model which is created or initialized
63
+ by transfer of patterns of the weights, parameters, activations or
64
+ output of the Model, to the other model, in order to cause the other
65
+ model to perform similarly to the Model, including - but not limited
66
+ to - distillation methods entailing the use of intermediate data
67
+ representations or methods based on the generation of synthetic data
68
+ by the Model for training the other model.
69
+
70
+ "Complementary Material" means the accompanying source code and
71
+ scripts used to define, run, load, benchmark or evaluate the Model,
72
+ and used to prepare data for training or evaluation, if any. This
73
+ includes any accompanying documentation, tutorials, examples, etc, if
74
+ any.
75
+
76
+ "Distribution" means any transmission, reproduction, publication or
77
+ other sharing of the Model or Derivatives of the Model to a third
78
+ party, including providing the Model as a hosted service made
79
+ available by electronic or other remote means - e.g. API-based or web
80
+ access.
81
+
82
+ "Licensor" means the copyright owner or entity authorized by the
83
+ copyright owner that is granting the License, including the persons or
84
+ entities that may have rights in the Model and/or distributing the
85
+ Model.
86
+
87
+ "You" (or "Your") means an individual or Legal Entity exercising
88
+ permissions granted by this License and/or making use of the Model for
89
+ whichever purpose and in any field of use, including usage of the
90
+ Model in an end-use application - e.g. chatbot, translator, image
91
+ generator.
92
+
93
+ "Third Parties" means individuals or legal entities that are not under
94
+ common control with Licensor or You.
95
+
96
+ "Contribution" means any work of authorship, including the original
97
+ version of the Model and any modifications or additions to that Model
98
+ or Derivatives of the Model thereof, that is intentionally submitted
99
+ to Licensor for inclusion in the Model by the copyright owner or by an
100
+ individual or Legal Entity authorized to submit on behalf of the
101
+ copyright owner. For the purposes of this definition, "submitted"
102
+ means any form of electronic, verbal, or written communication sent to
103
+ the Licensor or its representatives, including but not limited to
104
+ communication on electronic mailing lists, source code control
105
+ systems, and issue tracking systems that are managed by, or on behalf
106
+ of, the Licensor for the purpose of discussing and improving the
107
+ Model, but excluding communication that is conspicuously marked or
108
+ otherwise designated in writing by the copyright owner as "Not a
109
+ Contribution."
110
+
111
+ "Contributor" means Licensor and any individual or Legal Entity on
112
+ behalf of whom a Contribution has been received by Licensor and
113
+ subsequently incorporated within the Model.
114
+
115
+ Section II: INTELLECTUAL PROPERTY RIGHTS
116
+
117
+ Both copyright and patent grants apply to the Model, Derivatives of
118
+ the Model and Complementary Material. The Model and Derivatives of the
119
+ Model are subject to additional terms as described in
120
+
121
+ Section III.
122
+
123
+ Grant of Copyright License. Subject to the terms and conditions of
124
+ this License, each Contributor hereby grants to You a perpetual,
125
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
126
+ copyright license to reproduce, prepare, publicly display, publicly
127
+ perform, sublicense, and distribute the Complementary Material, the
128
+ Model, and Derivatives of the Model.
129
+
130
+ Grant of Patent License. Subject to the terms and conditions of this
131
+ License and where and as applicable, each Contributor hereby grants to
132
+ You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
133
+ irrevocable (except as stated in this paragraph) patent license to
134
+ make, have made, use, offer to sell, sell, import, and otherwise
135
+ transfer the Model and the Complementary Material, where such license
136
+ applies only to those patent claims licensable by such Contributor
137
+ that are necessarily infringed by their Contribution(s) alone or by
138
+ combination of their Contribution(s) with the Model to which such
139
+ Contribution(s) was submitted. If You institute patent litigation
140
+ against any entity (including a cross-claim or counterclaim in a
141
+ lawsuit) alleging that the Model and/or Complementary Material or a
142
+ Contribution incorporated within the Model and/or Complementary
143
+ Material constitutes direct or contributory patent infringement, then
144
+ any patent licenses granted to You under this License for the Model
145
+ and/or Work shall terminate as of the date such litigation is asserted
146
+ or filed.
147
+
148
+ Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
149
+
150
+ Distribution and Redistribution. You may host for Third Party remote
151
+ access purposes (e.g. software-as-a-service), reproduce and distribute
152
+ copies of the Model or Derivatives of the Model thereof in any medium,
153
+ with or without modifications, provided that You meet the following
154
+ conditions: Use-based restrictions as referenced in paragraph 5 MUST
155
+ be included as an enforceable provision by You in any type of legal
156
+ agreement (e.g. a license) governing the use and/or distribution of
157
+ the Model or Derivatives of the Model, and You shall give notice to
158
+ subsequent users You Distribute to, that the Model or Derivatives of
159
+ the Model are subject to paragraph 5. This provision does not apply to
160
+ the use of Complementary Material. You must give any Third Party
161
+ recipients of the Model or Derivatives of the Model a copy of this
162
+ License; You must cause any modified files to carry prominent notices
163
+ stating that You changed the files; You must retain all copyright,
164
+ patent, trademark, and attribution notices excluding those notices
165
+ that do not pertain to any part of the Model, Derivatives of the
166
+ Model. You may add Your own copyright statement to Your modifications
167
+ and may provide additional or different license terms and conditions -
168
+ respecting paragraph 4.a. - for use, reproduction, or Distribution of
169
+ Your modifications, or for any such Derivatives of the Model as a
170
+ whole, provided Your use, reproduction, and Distribution of the Model
171
+ otherwise complies with the conditions stated in this License.
172
+
173
+ Use-based restrictions. The restrictions set forth in Attachment A are
174
+ considered Use-based restrictions. Therefore You cannot use the Model
175
+ and the Derivatives of the Model for the specified restricted
176
+ uses. You may use the Model subject to this License, including only
177
+ for lawful purposes and in accordance with the License. Use may
178
+ include creating any content with, finetuning, updating, running,
179
+ training, evaluating and/or reparametrizing the Model. You shall
180
+ require all of Your users who use the Model or a Derivative of the
181
+ Model to comply with the terms of this paragraph (paragraph 5).
182
+
183
+ The Output You Generate. Except as set forth herein, Licensor claims
184
+ no rights in the Output You generate using the Model. You are
185
+ accountable for the Output you generate and its subsequent uses. No
186
+ use of the output can contravene any provision as stated in the
187
+ License.
188
+
189
+ Section IV: OTHER PROVISIONS
190
+
191
+ Updates and Runtime Restrictions. To the maximum extent permitted by
192
+ law, Licensor reserves the right to restrict (remotely or otherwise)
193
+ usage of the Model in violation of this License.
194
+
195
+ Trademarks and related. Nothing in this License permits You to make
196
+ use of Licensors’ trademarks, trade names, logos or to otherwise
197
+ suggest endorsement or misrepresent the relationship between the
198
+ parties; and any rights not expressly granted herein are reserved by
199
+ the Licensors.
200
+
201
+ Disclaimer of Warranty. Unless required by applicable law or agreed to
202
+ in writing, Licensor provides the Model and the Complementary Material
203
+ (and each Contributor provides its Contributions) on an "AS IS" BASIS,
204
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
205
+ implied, including, without limitation, any warranties or conditions
206
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
207
+ PARTICULAR PURPOSE. You are solely responsible for determining the
208
+ appropriateness of using or redistributing the Model, Derivatives of
209
+ the Model, and the Complementary Material and assume any risks
210
+ associated with Your exercise of permissions under this License.
211
+
212
+ Limitation of Liability. In no event and under no legal theory,
213
+ whether in tort (including negligence), contract, or otherwise, unless
214
+ required by applicable law (such as deliberate and grossly negligent
215
+ acts) or agreed to in writing, shall any Contributor be liable to You
216
+ for damages, including any direct, indirect, special, incidental, or
217
+ consequential damages of any character arising as a result of this
218
+ License or out of the use or inability to use the Model and the
219
+ Complementary Material (including but not limited to damages for loss
220
+ of goodwill, work stoppage, computer failure or malfunction, or any
221
+ and all other commercial damages or losses), even if such Contributor
222
+ has been advised of the possibility of such damages.
223
+
224
+ Accepting Warranty or Additional Liability. While redistributing the
225
+ Model, Derivatives of the Model and the Complementary Material
226
+ thereof, You may choose to offer, and charge a fee for, acceptance of
227
+ support, warranty, indemnity, or other liability obligations and/or
228
+ rights consistent with this License. However, in accepting such
229
+ obligations, You may act only on Your own behalf and on Your sole
230
+ responsibility, not on behalf of any other Contributor, and only if
231
+ You agree to indemnify, defend, and hold each Contributor harmless for
232
+ any liability incurred by, or claims asserted against, such
233
+ Contributor by reason of your accepting any such warranty or
234
+ additional liability.
235
+
236
+ If any provision of this License is held to be invalid, illegal or
237
+ unenforceable, the remaining provisions shall be unaffected thereby
238
+ and remain valid as if such provision had not been set forth herein.
239
+
240
+
241
+ END OF TERMS AND CONDITIONS
242
+
243
+ Attachment A
244
+
245
+ Use Restrictions
246
+
247
+ You agree not to use the Model or Derivatives of the Model:
248
+
249
+ * In any way that violates any applicable national, federal, state,
250
+ local or international law or regulation;
251
+
252
+ * For the purpose of exploiting, harming or attempting to exploit or
253
+ harm minors in any way;
254
+
255
+ * To generate or disseminate verifiably false information and/or
256
+ content with the purpose of harming others;
257
+
258
+ * To generate or disseminate personal identifiable information that
259
+ can be used to harm an individual;
260
+
261
+ * To defame, disparage or otherwise harass others;
262
+
263
+ * For fully automated decision making that adversely impacts an
264
+ individual’s legal rights or otherwise creates or modifies a
265
+ binding, enforceable obligation;
266
+
267
+ * For any use intended to or which has the effect of discriminating
268
+ against or harming individuals or groups based on online or offline
269
+ social behavior or known or predicted personal or personality
270
+ characteristics;
271
+
272
+ * To exploit any of the vulnerabilities of a specific group of persons
273
+ based on their age, social, physical or mental characteristics, in
274
+ order to materially distort the behavior of a person pertaining to
275
+ that group in a manner that causes or is likely to cause that person
276
+ or another person physical or psychological harm;
277
+
278
+ * For any use intended to or which has the effect of discriminating
279
+ against individuals or groups based on legally protected
280
+ characteristics or categories;
281
+
282
+ * To provide medical advice and medical results interpretation;
283
+
284
+ * To generate or disseminate information for the purpose to be used
285
+ for administration of justice, law enforcement, immigration or
286
+ asylum processes, such as predicting an individual will commit
287
+ fraud/crime commitment (e.g. by text profiling, drawing causal
288
+ relationships between assertions made in documents, indiscriminate
289
+ and arbitrarily-targeted use).
290
+
Makefile ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # simple Makefile with scripts that are otherwise hard to remember
2
+ # to use, run from the repo root `make <command>`
3
+
4
+ default: help
5
+
6
+ help:
7
+ @echo Developer commands:
8
+ @echo
9
+ @echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
10
+ @echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
11
+ @echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
12
+ @echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
13
+ @echo "test Run the unit tests."
14
+ @echo "update-config-docstring Update the app's config docstring so mkdocs can autogenerate it correctly."
15
+ @echo "frontend-install Install the pnpm modules needed for the front end"
16
+ @echo "frontend-build Build the frontend in order to run on localhost:9090"
17
+ @echo "frontend-dev Run the frontend in developer mode on localhost:5173"
18
+ @echo "frontend-typegen Generate types for the frontend from the OpenAPI schema"
19
+ @echo "installer-zip Build the installer .zip file for the current version"
20
+ @echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
21
+ @echo "openapi Generate the OpenAPI schema for the app, outputting to stdout"
22
+ @echo "docs Serve the mkdocs site with live reload"
23
+
24
+ # Runs ruff, fixing any safely-fixable errors and formatting
25
+ ruff:
26
+ ruff check . --fix
27
+ ruff format .
28
+
29
+ # Runs ruff, fixing all errors it can fix and formatting
30
+ ruff-unsafe:
31
+ ruff check . --fix --unsafe-fixes
32
+ ruff format .
33
+
34
+ # Runs mypy, using the config in pyproject.toml
35
+ mypy:
36
+ mypy scripts/invokeai-web.py
37
+
38
+ # Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports
39
+ # (many files are ignored by the config, so this is useful for checking all files)
40
+ mypy-all:
41
+ mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
42
+
43
+ # Run the unit tests
44
+ test:
45
+ pytest ./tests
46
+
47
+ # Update config docstring
48
+ update-config-docstring:
49
+ python scripts/update_config_docstring.py
50
+
51
+ # Install the pnpm modules needed for the front end
52
+ frontend-install:
53
+ rm -rf invokeai/frontend/web/node_modules
54
+ cd invokeai/frontend/web && pnpm install
55
+
56
+ # Build the frontend
57
+ frontend-build:
58
+ cd invokeai/frontend/web && pnpm build
59
+
60
+ # Run the frontend in dev mode
61
+ frontend-dev:
62
+ cd invokeai/frontend/web && pnpm dev
63
+
64
+ frontend-typegen:
65
+ cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen
66
+
67
+ # Installer zip file
68
+ installer-zip:
69
+ cd installer && ./create_installer.sh
70
+
71
+ # Tag the release
72
+ tag-release:
73
+ cd installer && ./tag_release.sh
74
+
75
+ # Generate the OpenAPI Schema for the app
76
+ openapi:
77
+ python scripts/generate_openapi_schema.py
78
+
79
+ # Serve the mkdocs site w/ live reload
80
+ .PHONY: docs
81
+ docs:
82
+ mkdocs serve
README.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ ![project hero](https://github.com/invoke-ai/InvokeAI/assets/31807370/6e3728c7-e90e-4711-905c-3b55844ff5be)
4
+
5
+ # Invoke - Professional Creative AI Tools for Visual Media
6
+
7
+ #### To learn more about Invoke, or implement our Business solutions, visit [invoke.com]
8
+
9
+ [![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
10
+
11
+ </div>
12
+
13
+ Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products.
14
+
15
+ Invoke is available in two editions:
16
+
17
+ | **Community Edition** | **Professional Edition** |
18
+ |----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|
19
+ | **For users looking for a locally installed, self-hosted and self-managed service** | **For users or teams looking for a cloud-hosted, fully managed service** |
20
+ | - Free to use under a commercially-friendly license | - Monthly subscription fee with three different plan levels |
21
+ | - Download and install on compatible hardware | - Offers additional benefits, including multi-user support, improved model training, and more |
22
+ | - Includes all core studio features: generate, refine, iterate on images, and build workflows | - Hosted in the cloud for easy, secure model access and scalability |
23
+ | Quick Start -> [Installation and Updates][installation docs] | More Information -> [www.invoke.com/pricing](https://www.invoke.com/pricing) |
24
+
25
+
26
+ ![Highlighted Features - Canvas and Workflows](https://github.com/invoke-ai/InvokeAI/assets/31807370/708f7a82-084f-4860-bfbe-e2588c53548d)
27
+
28
+ # Documentation
29
+ | **Quick Links** |
30
+ |----------------------------------------------------------------------------------------------------------------------------|
31
+ | [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
32
+
33
+ </div>
34
+
35
+ ## Quick Start
36
+
37
+ 1. Download and unzip the installer from the bottom of the [latest release][latest release link].
38
+ 2. Run the installer script.
39
+
40
+ - **Windows**: Double-click on the `install.bat` script.
41
+ - **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
42
+ - **Linux**: Run `install.sh`.
43
+
44
+ 3. When prompted, enter a location for the install and select your GPU type.
45
+ 4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
46
+ 5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
47
+ 6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
48
+ 7. Open the model manager tab to install a starter model and then you'll be ready to generate.
49
+
50
+ More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
51
+
52
+ ## Docker Container
53
+
54
+ We publish official container images in Github Container Registry: https://github.com/invoke-ai/InvokeAI/pkgs/container/invokeai. Both CUDA and ROCm images are available. Check the above link for relevant tags.
55
+
56
+ > [!IMPORTANT]
57
+ > Ensure that Docker is set up to use the GPU. Refer to [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] documentation.
58
+
59
+ ### Generate!
60
+
61
+ Run the container, modifying the command as necessary:
62
+
63
+ ```bash
64
+ docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
65
+ ```
66
+
67
+ Then open `http://localhost:9090` and install some models using the Model Manager tab to begin generating.
68
+
69
+ For ROCm, add `--device /dev/kfd --device /dev/dri` to the `docker run` command.
70
+
71
+ ### Persist your data
72
+
73
+ You will likely want to persist your workspace outside of the container. Use the `--volume /home/myuser/invokeai:/invokeai` flag to mount some local directory (using its **absolute** path) to the `/invokeai` path inside the container. Your generated images and models will reside there. You can use this directory with other InvokeAI installations, or switch between runtime directories as needed.
74
+
75
+ ### DIY
76
+
77
+ Build your own image and customize the environment to match your needs using our `docker-compose` stack. See [README.md](./docker/README.md) in the [docker](./docker) directory.
78
+
79
+ ## Troubleshooting, FAQ and Support
80
+
81
+ Please review our [FAQ][faq] for solutions to common installation problems and other issues.
82
+
83
+ For more help, please join our [Discord][discord link].
84
+
85
+ ## Features
86
+
87
+ Full details on features can be found in [our documentation][features docs].
88
+
89
+ ### Web Server & UI
90
+
91
+ Invoke runs a locally hosted web server & React UI with an industry-leading user experience.
92
+
93
+ ### Unified Canvas
94
+
95
+ The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/out-painting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
96
+
97
+ ### Workflows & Nodes
98
+
99
+ Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
100
+
101
+ ### Board & Gallery Management
102
+
103
+ Invoke features an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
104
+
105
+ ### Other features
106
+
107
+ - Support for both ckpt and diffusers models
108
+ - SD1.5, SD2.0, SDXL, and FLUX support
109
+ - Upscaling Tools
110
+ - Embedding Manager & Support
111
+ - Model Manager & Support
112
+ - Workflow creation & management
113
+ - Node-Based Architecture
114
+
115
+ ## Contributing
116
+
117
+ Anyone who wishes to contribute to this project - whether documentation, features, bug fixes, code cleanup, testing, or code reviews - is very much encouraged to do so.
118
+
119
+ Get started with contributing by reading our [contribution documentation][contributing docs], joining the [#dev-chat] or the GitHub discussion board.
120
+
121
+ We hope you enjoy using Invoke as much as we enjoy creating it, and we hope you will elect to become part of our community.
122
+
123
+ ## Thanks
124
+
125
+ Invoke is a combined effort of [passionate and talented people from across the world][contributors]. We thank them for their time, hard work and effort.
126
+
127
+ Original portions of the software are Copyright © 2024 by respective contributors.
128
+
129
+ [features docs]: https://invoke-ai.github.io/InvokeAI/features/database/
130
+ [faq]: https://invoke-ai.github.io/InvokeAI/faq/
131
+ [contributors]: https://invoke-ai.github.io/InvokeAI/contributing/contributors/
132
+ [invoke.com]: https://www.invoke.com/about
133
+ [github issues]: https://github.com/invoke-ai/InvokeAI/issues
134
+ [docs home]: https://invoke-ai.github.io/InvokeAI
135
+ [installation docs]: https://invoke-ai.github.io/InvokeAI/installation/
136
+ [#dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
137
+ [contributing docs]: https://invoke-ai.github.io/InvokeAI/contributing/
138
+ [CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
139
+ [CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
140
+ [discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
141
+ [discord link]: https://discord.gg/ZmtBAhwWhy
142
+ [github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
143
+ [github forks link]: https://useful-forks.github.io/?repo=invoke-ai%2FInvokeAI
144
+ [github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
145
+ [github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
146
+ [github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
147
+ [github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
148
+ [github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
149
+ [github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
150
+ [latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900
151
+ [latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
152
+ [latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
153
+ [latest release link]: https://github.com/invoke-ai/InvokeAI/releases/latest
154
+ [translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
155
+ [translation status link]: https://hosted.weblate.org/engage/invokeai/
156
+ [nvidia docker docs]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
157
+ [amd docker docs]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html
Stable_Diffusion_v1_Model_Card.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stable Diffusion v1 Model Card
2
+ This model card focuses on the model associated with the Stable Diffusion model, available [here](https://github.com/CompVis/stable-diffusion).
3
+
4
+ ## Model Details
5
+ - **Developed by:** Robin Rombach, Patrick Esser
6
+ - **Model type:** Diffusion-based text-to-image generation model
7
+ - **Language(s):** English
8
+ - **License:** [Proprietary](LICENSE)
9
+ - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
10
+ - **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752).
11
+ - **Cite as:**
12
+
13
+ @InProceedings{Rombach_2022_CVPR,
14
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
15
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
16
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
17
+ month = {June},
18
+ year = {2022},
19
+ pages = {10684-10695}
20
+ }
21
+
22
+ # Uses
23
+
24
+ ## Direct Use
25
+ The model is intended for research purposes only. Possible research areas and
26
+ tasks include
27
+
28
+ - Safe deployment of models which have the potential to generate harmful content.
29
+ - Probing and understanding the limitations and biases of generative models.
30
+ - Generation of artworks and use in design and other artistic processes.
31
+ - Applications in educational or creative tools.
32
+ - Research on generative models.
33
+
34
+ Excluded uses are described below.
35
+
36
+ ### Misuse, Malicious Use, and Out-of-Scope Use
37
+ _Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_.
38
+
39
+
40
+ The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
41
+ #### Out-of-Scope Use
42
+ The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
43
+ #### Misuse and Malicious Use
44
+ Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
45
+
46
+ - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
47
+ - Intentionally promoting or propagating discriminatory content or harmful stereotypes.
48
+ - Impersonating individuals without their consent.
49
+ - Sexual content without consent of the people who might see it.
50
+ - Mis- and disinformation
51
+ - Representations of egregious violence and gore
52
+ - Sharing of copyrighted or licensed material in violation of its terms of use.
53
+ - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
54
+
55
+ ## Limitations and Bias
56
+
57
+ ### Limitations
58
+
59
+ - The model does not achieve perfect photorealism
60
+ - The model cannot render legible text
61
+ - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
62
+ - Faces and people in general may not be generated properly.
63
+ - The model was trained mainly with English captions and will not work as well in other languages.
64
+ - The autoencoding part of the model is lossy
65
+ - The model was trained on a large-scale dataset
66
+ [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material
67
+ and is not fit for product use without additional safety mechanisms and
68
+ considerations.
69
+
70
+ ### Bias
71
+ While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
72
+ Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
73
+ which consists of images that are primarily limited to English descriptions.
74
+ Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
75
+ This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
76
+ ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
77
+
78
+
79
+ ## Training
80
+
81
+ **Training Data**
82
+ The model developers used the following dataset for training the model:
83
+
84
+ - LAION-2B (en) and subsets thereof (see next section)
85
+
86
+ **Training Procedure**
87
+ Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
88
+
89
+ - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
90
+ - Text prompts are encoded through a ViT-L/14 text-encoder.
91
+ - The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention.
92
+ - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet.
93
+
94
+ We currently provide three checkpoints, `sd-v1-1.ckpt`, `sd-v1-2.ckpt` and `sd-v1-3.ckpt`,
95
+ which were trained as follows,
96
+
97
+ - `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en).
98
+ 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`).
99
+ - `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`.
100
+ 515k steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en,
101
+ filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)).
102
+ - `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-improved-aesthetics" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
103
+
104
+
105
+ - **Hardware:** 32 x 8 x A100 GPUs
106
+ - **Optimizer:** AdamW
107
+ - **Gradient Accumulations**: 2
108
+ - **Batch:** 32 x 8 x 2 x 4 = 2048
109
+ - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
110
+
111
+ ## Evaluation Results
112
+ Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
113
+ 5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
114
+ steps show the relative improvements of the checkpoints:
115
+
116
+ ![pareto](assets/v1-variants-scores.jpg)
117
+
118
+ Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
119
+ ## Environmental Impact
120
+
121
+ **Stable Diffusion v1** **Estimated Emissions**
122
+ Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.
123
+
124
+ - **Hardware Type:** A100 PCIe 40GB
125
+ - **Hours used:** 150000
126
+ - **Cloud Provider:** AWS
127
+ - **Compute Region:** US-east
128
+ - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq.
129
+ ## Citation
130
+ @InProceedings{Rombach_2022_CVPR,
131
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
132
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
133
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
134
+ month = {June},
135
+ year = {2022},
136
+ pages = {10684-10695}
137
+ }
138
+
139
+ *This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
140
+
coverage/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Ignore everything in this directory
2
+ *
3
+ # Except this file
4
+ !.gitignore
docker/.env.sample ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Make a copy of this file named `.env` and fill in the values below.
2
+ ## Any environment variables supported by InvokeAI can be specified here,
3
+ ## in addition to the examples below.
4
+
5
+ ## INVOKEAI_ROOT is the path *on the host system* where Invoke will store its data.
6
+ ## It is mounted into the container and allows both containerized and non-containerized usage of Invoke.
7
+ # Usually this is the only variable you need to set. It can be relative or absolute.
8
+ # INVOKEAI_ROOT=~/invokeai
9
+
10
+ ## HOST_INVOKEAI_ROOT and CONTAINER_INVOKEAI_ROOT can be used to control the on-host
11
+ ## and in-container paths separately, if needed.
12
+ ## HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where Invoke will store data.
13
+ ## If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
14
+ ## CONTAINER_INVOKEAI_ROOT is the path within the container where Invoke will expect to find the runtime directory.
15
+ ## It MUST be absolute. There is usually no need to change this.
16
+ # HOST_INVOKEAI_ROOT=../../invokeai-data
17
+ # CONTAINER_INVOKEAI_ROOT=/invokeai
18
+
19
+ ## INVOKEAI_PORT is the port on which the InvokeAI web interface will be available
20
+ # INVOKEAI_PORT=9090
21
+
22
+ ## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
23
+ # GPU_DRIVER=cuda #| rocm
24
+
25
+ ## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
26
+ ## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
27
+ # CONTAINER_UID=1000
docker/Dockerfile ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1.4
2
+
3
+ ## Builder stage
4
+
5
+ FROM library/ubuntu:23.04 AS builder
6
+
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
9
+ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
10
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
11
+ apt update && apt-get install -y \
12
+ git \
13
+ python3-venv \
14
+ python3-pip \
15
+ build-essential
16
+
17
+ ENV INVOKEAI_SRC=/opt/invokeai
18
+ ENV VIRTUAL_ENV=/opt/venv/invokeai
19
+
20
+ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
21
+ ARG GPU_DRIVER=cuda
22
+ ARG TARGETPLATFORM="linux/amd64"
23
+ # unused but available
24
+ ARG BUILDPLATFORM
25
+
26
+ WORKDIR ${INVOKEAI_SRC}
27
+
28
+ COPY invokeai ./invokeai
29
+ COPY pyproject.toml ./
30
+
31
+ # Editable mode helps use the same image for development:
32
+ # the local working copy can be bind-mounted into the image
33
+ # at path defined by ${INVOKEAI_SRC}
34
+ # NOTE: there are no pytorch builds for arm64 + cuda, only cpu
35
+ # x86_64/CUDA is default
36
+ RUN --mount=type=cache,target=/root/.cache/pip \
37
+ python3 -m venv ${VIRTUAL_ENV} &&\
38
+ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
39
+ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
40
+ elif [ "$GPU_DRIVER" = "rocm" ]; then \
41
+ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
42
+ else \
43
+ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
44
+ fi &&\
45
+
46
+ # xformers + triton fails to install on arm64
47
+ if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
48
+ pip install $extra_index_url_arg -e ".[xformers]"; \
49
+ else \
50
+ pip install $extra_index_url_arg -e "."; \
51
+ fi
52
+
53
+ # #### Build the Web UI ------------------------------------
54
+
55
+ FROM node:20-slim AS web-builder
56
+ ENV PNPM_HOME="/pnpm"
57
+ ENV PATH="$PNPM_HOME:$PATH"
58
+ RUN corepack use [email protected]
59
+ RUN corepack enable
60
+
61
+ WORKDIR /build
62
+ COPY invokeai/frontend/web/ ./
63
+ RUN --mount=type=cache,target=/pnpm/store \
64
+ pnpm install --frozen-lockfile
65
+ RUN npx vite build
66
+
67
+ #### Runtime stage ---------------------------------------
68
+
69
+ FROM library/ubuntu:23.04 AS runtime
70
+
71
+ ARG DEBIAN_FRONTEND=noninteractive
72
+ ENV PYTHONUNBUFFERED=1
73
+ ENV PYTHONDONTWRITEBYTECODE=1
74
+
75
+ RUN apt update && apt install -y --no-install-recommends \
76
+ git \
77
+ curl \
78
+ vim \
79
+ tmux \
80
+ ncdu \
81
+ iotop \
82
+ bzip2 \
83
+ gosu \
84
+ magic-wormhole \
85
+ libglib2.0-0 \
86
+ libgl1-mesa-glx \
87
+ python3-venv \
88
+ python3-pip \
89
+ build-essential \
90
+ libopencv-dev \
91
+ libstdc++-10-dev &&\
92
+ apt-get clean && apt-get autoclean
93
+
94
+
95
+ ENV INVOKEAI_SRC=/opt/invokeai
96
+ ENV VIRTUAL_ENV=/opt/venv/invokeai
97
+ ENV INVOKEAI_ROOT=/invokeai
98
+ ENV INVOKEAI_HOST=0.0.0.0
99
+ ENV INVOKEAI_PORT=9090
100
+ ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
101
+ ENV CONTAINER_UID=${CONTAINER_UID:-1000}
102
+ ENV CONTAINER_GID=${CONTAINER_GID:-1000}
103
+
104
+ # --link requires buldkit w/ dockerfile syntax 1.4
105
+ COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
106
+ COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
107
+ COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
108
+
109
+ # Link amdgpu.ids for ROCm builds
110
+ # contributed by https://github.com/Rubonnek
111
+ RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
112
+ ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
113
+
114
+ WORKDIR ${INVOKEAI_SRC}
115
+
116
+ # build patchmatch
117
+ RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
118
+ RUN python3 -c "from patchmatch import patch_match"
119
+
120
+ RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
121
+
122
+ COPY docker/docker-entrypoint.sh ./
123
+ ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
124
+ CMD ["invokeai-web"]
docker/README.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Invoke in Docker
2
+
3
+ First things first:
4
+
5
+ - Ensure that Docker can use your [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] GPU.
6
+ - This document assumes a Linux system, but should work similarly under Windows with WSL2.
7
+ - We don't recommend running Invoke in Docker on macOS at this time. It works, but very slowly.
8
+
9
+ ## Quickstart
10
+
11
+ No `docker compose`, no persistence, single command, using the official images:
12
+
13
+ **CUDA (NVIDIA GPU):**
14
+
15
+ ```bash
16
+ docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
17
+ ```
18
+
19
+ **ROCm (AMD GPU):**
20
+
21
+ ```bash
22
+ docker run --device /dev/kfd --device /dev/dri --publish 9090:9090 ghcr.io/invoke-ai/invokeai:main-rocm
23
+ ```
24
+
25
+ Open `http://localhost:9090` in your browser once the container finishes booting, install some models, and generate away!
26
+
27
+ ### Data persistence
28
+
29
+ To persist your generated images and downloaded models outside of the container, add a `--volume/-v` flag to the above command, e.g.:
30
+
31
+ ```bash
32
+ docker run --volume /some/local/path:/invokeai {...etc...}
33
+ ```
34
+
35
+ `/some/local/path/invokeai` will contain all your data.
36
+ It can *usually* be reused between different installs of Invoke. Tread with caution and read the release notes!
37
+
38
+ ## Customize the container
39
+
40
+ The included `run.sh` script is a convenience wrapper around `docker compose`. It can be helpful for passing additional build arguments to `docker compose`. Alternatively, the familiar `docker compose` commands work just as well.
41
+
42
+ ```bash
43
+ cd docker
44
+ cp .env.sample .env
45
+ # edit .env to your liking if you need to; it is well commented.
46
+ ./run.sh
47
+ ```
48
+
49
+ It will take a few minutes to build the image the first time. Once the application starts up, open `http://localhost:9090` in your browser to invoke!
50
+
51
+ >[!TIP]
52
+ >When using the `run.sh` script, the container will continue running after Ctrl+C. To shut it down, use the `docker compose down` command.
53
+
54
+ ## Docker setup in detail
55
+
56
+ #### Linux
57
+
58
+ 1. Ensure buildkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
59
+ 2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://docs.docker.com/compose/install/linux/#install-using-the-repository).
60
+ - The deprecated `docker-compose` (hyphenated) CLI probably won't work. Update to a recent version.
61
+ 3. Ensure docker daemon is able to access the GPU.
62
+ - [NVIDIA docs](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
63
+ - [AMD docs](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html)
64
+
65
+ #### macOS
66
+
67
+ > [!TIP]
68
+ > You'll be better off installing Invoke directly on your system, because Docker can not use the GPU on macOS.
69
+
70
+ If you are still reading:
71
+
72
+ 1. Ensure Docker has at least 16GB RAM
73
+ 2. Enable VirtioFS for file sharing
74
+ 3. Enable `docker compose` V2 support
75
+
76
+ This is done via Docker Desktop preferences.
77
+
78
+ ### Configure the Invoke Environment
79
+
80
+ 1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to the desired location of the InvokeAI runtime directory. It may be an existing directory from a previous installation (post 4.0.0).
81
+ 1. Execute `run.sh`
82
+
83
+ The image will be built automatically if needed.
84
+
85
+ The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. Navigate to the Model Manager tab and install some models before generating.
86
+
87
+ ### Use a GPU
88
+
89
+ - Linux is *recommended* for GPU support in Docker.
90
+ - WSL2 is *required* for Windows.
91
+ - only `x86_64` architecture is supported.
92
+
93
+ The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker/NVIDIA/AMD documentation for the most up-to-date instructions for using your GPU with Docker.
94
+
95
+ To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file before running `./run.sh`.
96
+
97
+ ## Customize
98
+
99
+ Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
100
+
101
+ You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
102
+
103
+ Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The default is `~/invokeai`. Example:
104
+
105
+ ```bash
106
+ INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
107
+ HUGGINGFACE_TOKEN=the_actual_token
108
+ CONTAINER_UID=1000
109
+ GPU_DRIVER=cuda
110
+ ```
111
+
112
+ Any environment variables supported by InvokeAI can be set here. See the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
113
+
114
+ ---
115
+
116
+ [nvidia docker docs]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
117
+ [amd docker docs]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html
docker/docker-compose.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
2
+
3
+ x-invokeai: &invokeai
4
+ image: "ghcr.io/invoke-ai/invokeai:latest"
5
+ build:
6
+ context: ..
7
+ dockerfile: docker/Dockerfile
8
+
9
+ # Create a .env file in the same directory as this docker-compose.yml file
10
+ # and populate it with environment variables. See .env.sample
11
+ env_file:
12
+ - .env
13
+
14
+ # variables without a default will automatically inherit from the host environment
15
+ environment:
16
+ # if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
17
+ - INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
18
+ - HF_HOME
19
+ ports:
20
+ - "${INVOKEAI_PORT:-9090}:${INVOKEAI_PORT:-9090}"
21
+ volumes:
22
+ - type: bind
23
+ source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
24
+ target: ${CONTAINER_INVOKEAI_ROOT:-/invokeai}
25
+ bind:
26
+ create_host_path: true
27
+ - ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
28
+ tty: true
29
+ stdin_open: true
30
+
31
+
32
+ services:
33
+ invokeai-cuda:
34
+ <<: *invokeai
35
+ deploy:
36
+ resources:
37
+ reservations:
38
+ devices:
39
+ - driver: nvidia
40
+ count: 1
41
+ capabilities: [gpu]
42
+
43
+ invokeai-cpu:
44
+ <<: *invokeai
45
+ profiles:
46
+ - cpu
47
+
48
+ invokeai-rocm:
49
+ <<: *invokeai
50
+ devices:
51
+ - /dev/kfd:/dev/kfd
52
+ - /dev/dri:/dev/dri
53
+ profiles:
54
+ - rocm
docker/docker-entrypoint.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e -o pipefail
3
+
4
+ ### Container entrypoint
5
+ # Runs the CMD as defined by the Dockerfile or passed to `docker run`
6
+ # Can be used to configure the runtime dir
7
+ # Bypass by using ENTRYPOINT or `--entrypoint`
8
+
9
+ ### Set INVOKEAI_ROOT pointing to a valid runtime directory
10
+ # Otherwise configure the runtime dir first.
11
+
12
+ ### Set the CONTAINER_UID envvar to match your user.
13
+ # Ensures files created in the container are owned by you:
14
+ # docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
15
+ # Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
16
+
17
+ USER_ID=${CONTAINER_UID:-1000}
18
+ USER=ubuntu
19
+ usermod -u ${USER_ID} ${USER} 1>/dev/null
20
+
21
+ ### Set the $PUBLIC_KEY env var to enable SSH access.
22
+ # We do not install openssh-server in the image by default to avoid bloat.
23
+ # but it is useful to have the full SSH server e.g. on Runpod.
24
+ # (use SCP to copy files to/from the image, etc)
25
+ if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
26
+ apt-get update
27
+ apt-get install -y openssh-server
28
+ pushd "$HOME"
29
+ mkdir -p .ssh
30
+ echo "${PUBLIC_KEY}" >.ssh/authorized_keys
31
+ chmod -R 700 .ssh
32
+ popd
33
+ service ssh start
34
+ fi
35
+
36
+ mkdir -p "${INVOKEAI_ROOT}"
37
+ chown --recursive ${USER} "${INVOKEAI_ROOT}" || true
38
+ cd "${INVOKEAI_ROOT}"
39
+
40
+ # Run the CMD as the Container User (not root).
41
+ exec gosu ${USER} "$@"
docker/run.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -e -o pipefail
3
+
4
+ run() {
5
+ local scriptdir=$(dirname "${BASH_SOURCE[0]}")
6
+ cd "$scriptdir" || exit 1
7
+
8
+ local build_args=""
9
+ local profile=""
10
+
11
+ # create .env file if it doesn't exist, otherwise docker compose will fail
12
+ touch .env
13
+
14
+ # parse .env file for build args
15
+ build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
16
+ profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
17
+
18
+ # default to 'cuda' profile
19
+ [[ -z "$profile" ]] && profile="cuda"
20
+
21
+ local service_name="invokeai-$profile"
22
+
23
+ if [[ ! -z "$build_args" ]]; then
24
+ printf "%s\n" "docker compose build args:"
25
+ printf "%s\n" "$build_args"
26
+ fi
27
+
28
+ docker compose build $build_args $service_name
29
+ unset build_args
30
+
31
+ printf "%s\n" "starting service $service_name"
32
+ docker compose --profile "$profile" up -d "$service_name"
33
+ docker compose logs -f
34
+ }
35
+
36
+ run
docker/runpod-readme.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # InvokeAI - A Stable Diffusion Toolkit
2
+
3
+ Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai
4
+
5
+ The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs.
6
+
7
+ Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`.
8
+
9
+ > **Only the /workspace directory will persist between pod restarts!**
10
+
11
+ > **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.**
12
+
13
+ ## Quickstart
14
+
15
+ 1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient.
16
+ 1. Wait for the application to load.
17
+ - TIP: you know it's ready when the CPU usage goes idle
18
+ - You can also check the logs for a line that says "_Point your browser at..._"
19
+ 1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button.
20
+ 1. Generate some art!
21
+
22
+ ## Other things you can do
23
+
24
+ At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session.
25
+
26
+ If you need to run *multiple commands*, define them in the Docker Command field like this:
27
+
28
+ `bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"`
29
+
30
+ ### Copying your data in and out of the pod
31
+
32
+ This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod:
33
+
34
+ - **SSH server**:
35
+ 1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions)
36
+ 1. Add an exposed port 22 (TCP) in the pod settings!
37
+ 1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server.
38
+
39
+ - [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html):
40
+ 1. On your computer, `pip install magic-wormhole` (see above instructions for details)
41
+ 1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_.
42
+ 1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files.
43
+ 1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it
44
+ 1. Paste this command into the terminal on your local machine to securely download the payload.
45
+ 1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped.
46
+
47
+ - **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions.
48
+
49
+
50
+ ### Disable the NSFW checker
51
+
52
+ The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command:
53
+
54
+ ```
55
+ invoke --web --host 0.0.0.0 --no-nsfw_checker
56
+ ```
57
+
58
+ ---
59
+
60
+ Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr)
docs/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior
62
+ may be reported to the community leaders responsible for enforcement
63
+ at https://github.com/invoke-ai/InvokeAI/issues. All complaints will
64
+ be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series
86
+ of actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period of time. This
91
+ includes avoiding interactions in community spaces as well as external channels
92
+ like social media. Violating these terms may lead to a temporary or
93
+ permanent ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any sort of public interaction within
113
+ the community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.0, available at
119
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120
+
121
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
122
+ enforcement ladder](https://github.com/mozilla/diversity).
123
+
124
+ [homepage]: https://www.contributor-covenant.org
125
+
126
+ For answers to common questions about this code of conduct, see the FAQ at
127
+ https://www.contributor-covenant.org/faq. Translations are available at
128
+ https://www.contributor-covenant.org/translations.
docs/RELEASE.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Release Process
2
+
3
+ The app is published in twice, in different build formats.
4
+
5
+ - A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
6
+ - An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
7
+
8
+ ## General Prep
9
+
10
+ Make a developer call-out for PRs to merge. Merge and test things out.
11
+
12
+ While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
13
+
14
+ ## Release Workflow
15
+
16
+ The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
17
+
18
+ It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
19
+
20
+ > Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist.
21
+
22
+ ### Triggering the Workflow
23
+
24
+ Run `make tag-release` to tag the current commit and kick off the workflow.
25
+
26
+ The release may also be dispatched [manually].
27
+
28
+ ### Workflow Jobs and Process
29
+
30
+ The workflow consists of a number of concurrently-run jobs, and two final publish jobs.
31
+
32
+ The publish jobs require manual approval and are only run if the other jobs succeed.
33
+
34
+ #### `check-version` Job
35
+
36
+ This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`.
37
+
38
+ When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown.
39
+
40
+ This job uses [samuelcolvin/check-python-version].
41
+
42
+ > Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc.
43
+
44
+ #### Check and Test Jobs
45
+
46
+ - **`python-tests`**: runs `pytest` on matrix of platforms
47
+ - **`python-checks`**: runs `ruff` (format and lint)
48
+ - **`frontend-tests`**: runs `vitest`
49
+ - **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
50
+
51
+ > **TODO** We should add `mypy` or `pyright` to the **`check-python`** job.
52
+
53
+ > **TODO** We should add an end-to-end test job that generates an image.
54
+
55
+ #### `build-installer` Job
56
+
57
+ This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
58
+
59
+ - **`dist`**: the python distribution, to be published on PyPI
60
+ - **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
61
+
62
+ #### Sanity Check & Smoke Test
63
+
64
+ At this point, the release workflow pauses as the remaining publish jobs require approval. Time to test the installer.
65
+
66
+ Because the installer pulls from PyPI, and we haven't published to PyPI yet, you will need to install from the wheel:
67
+
68
+ - Download and unzip `dist.zip` and the installer from the **Summary** tab of the workflow
69
+ - Run the installer script using the `--wheel` CLI arg, pointing at the wheel:
70
+
71
+ ```sh
72
+ ./install.sh --wheel ../InvokeAI-4.0.0rc6-py3-none-any.whl
73
+ ```
74
+
75
+ - Install to a temporary directory so you get the new user experience
76
+ - Download a model and generate
77
+
78
+ > The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation as if the installer got the wheel from PyPI.
79
+
80
+ ##### Something isn't right
81
+
82
+ If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?).
83
+
84
+ Now you can start from the top:
85
+
86
+ - Fix the issues and PR the fixes per usual
87
+ - Get the PR approved and merged per usual
88
+ - Switch to `main` and pull in the fixes
89
+ - Run `make tag-release` to move the tag to `HEAD` (which has the fixes) and kick off the release workflow again
90
+ - Re-do the sanity check
91
+
92
+ #### PyPI Publish Jobs
93
+
94
+ The publish jobs will run if any of the previous jobs fail.
95
+
96
+ They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
97
+
98
+ Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
99
+
100
+ - Click the **Review deployments** button
101
+ - Select the environment (either `testpypi` or `pypi`)
102
+ - Click **Approve and deploy**
103
+
104
+ > **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
105
+
106
+ ##### Failing PyPI Publish
107
+
108
+ Check the [python infrastructure status page] for incidents.
109
+
110
+ If there are no incidents, contact @hipsterusername or @lstein, who have owner access to GH and PyPI, to see if access has expired or something like that.
111
+
112
+ #### `publish-testpypi` Job
113
+
114
+ Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
115
+
116
+ This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release.
117
+
118
+ If approved and successful, you could try out the test release like this:
119
+
120
+ ```sh
121
+ # Create a new virtual environment
122
+ python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
123
+ # Install the distribution from Test PyPI
124
+ pip install --index-url https://test.pypi.org/simple/ invokeai
125
+ # Run and test the app
126
+ invokeai-web
127
+ # Cleanup
128
+ deactivate
129
+ rm -rf ~/.test-invokeai-dist
130
+ ```
131
+
132
+ #### `publish-pypi` Job
133
+
134
+ Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
135
+
136
+ ## Publish the GitHub Release with installer
137
+
138
+ Once the release is published to PyPI, it's time to publish the GitHub release.
139
+
140
+ 1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
141
+ 1. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
142
+ 1. Use `scripts/get_external_contributions.py` to get a list of external contributions to shout out in the release notes.
143
+ 1. Upload the zip file created in **`build`** job into the Assets section of the release notes.
144
+ 1. Check **Set as a pre-release** if it's a pre-release.
145
+ 1. Check **Create a discussion for this release**.
146
+ 1. Publish the release.
147
+ 1. Announce the release in Discord.
148
+
149
+ > **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
150
+
151
+ ## Manual Build
152
+
153
+ The `build installer` workflow can be dispatched manually. This is useful to test the installer for a given branch or tag.
154
+
155
+ No checks are run, it just builds.
156
+
157
+ ## Manual Release
158
+
159
+ The `release` workflow can be dispatched manually. You must dispatch the workflow from the right tag, else it will fail the version check.
160
+
161
+ This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
162
+
163
+ [InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
164
+ [PyPI]: https://pypi.org/
165
+ [Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
166
+ [Test PyPI]: https://test.pypi.org/
167
+ [version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
168
+ [ncipollo/release-action]: https://github.com/ncipollo/release-action
169
+ [GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
170
+ [trusted publishers]: https://docs.pypi.org/trusted-publishers/
171
+ [samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
172
+ [manually]: #manual-release
173
+ [python infrastructure status page]: https://status.python.org/
docs/assets/Lincoln-and-Parrot-512-transparent.png ADDED