magicfixeseverything
commited on
Commit
·
9add88b
1
Parent(s):
9f745a7
Update app.py
Browse files
app.py
CHANGED
@@ -1,65 +1,3034 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
-
from diffusers import DiffusionPipeline
|
7 |
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
if torch.cuda.is_available():
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
else:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
#import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
+
from diffusers import DiffusionPipeline
|
7 |
|
8 |
+
##########
|
9 |
+
|
10 |
+
#
|
11 |
+
# Original script by:
|
12 |
+
# https://huggingface.co/Manjushri
|
13 |
+
#
|
14 |
+
# This version have been adapted from that person's versions.
|
15 |
+
#
|
16 |
+
|
17 |
+
##########
|
18 |
+
|
19 |
+
|
20 |
+
# Tested with gradio version 4.8.0
|
21 |
+
# https://www.gradio.app/main/docs/interface
|
22 |
+
|
23 |
+
##########
|
24 |
+
|
25 |
+
# To launch this script, use the following in the command prompt, taking off
|
26 |
+
# the # at the start. (You will need to adjust the start of the path if you
|
27 |
+
# have changed the location)
|
28 |
+
|
29 |
+
#cd C:\Diffusers && .venv\Scripts\activate.bat && py .venv\sdxl_and_photoreal\sdxl_and_photoreal.py
|
30 |
+
|
31 |
+
##########
|
32 |
+
|
33 |
+
# IMPORTANT NOTES:
|
34 |
+
#
|
35 |
+
# You must have a NVIDIA graphics card in your computer with Cuda
|
36 |
+
# installed to use this script. It will not work on just a CPU on
|
37 |
+
# Windows.
|
38 |
+
#
|
39 |
+
# If not using "enable_model_cpu_offload" or
|
40 |
+
# "enable_sequential_cpu_offload", memory usage will remain high until
|
41 |
+
# command prompt is closed. (whether image is being created or not)
|
42 |
+
|
43 |
+
###############################################################################
|
44 |
+
###############################################################################
|
45 |
+
#
|
46 |
+
#
|
47 |
+
#
|
48 |
+
# Begin Configurations
|
49 |
+
#
|
50 |
+
#
|
51 |
+
#
|
52 |
+
###############################################################################
|
53 |
+
###############################################################################
|
54 |
+
|
55 |
+
#
|
56 |
+
# Main Directory
|
57 |
+
#
|
58 |
+
# This is where everything goes. Your Python virtual environment should
|
59 |
+
# be here. Model data will be stored here. (unless you change the next
|
60 |
+
# configuration) If configured, imagery will also be automatically be
|
61 |
+
# saved here.
|
62 |
+
#
|
63 |
+
|
64 |
+
main_dir = "C:/Diffusers"
|
65 |
+
|
66 |
+
####################
|
67 |
+
|
68 |
+
#
|
69 |
+
# Use Custom HuggingFace Cache Directory
|
70 |
+
#
|
71 |
+
# The folder where model data is stored can get huge. I choose to add it
|
72 |
+
# to a place where I am more likely to notice it more often. If you use
|
73 |
+
# other HuggingFace things however, and will use these models in those
|
74 |
+
# other things, then you might want to consider not having this here as
|
75 |
+
# it would duplicate the model data.
|
76 |
+
#
|
77 |
+
# If set to 1, the data would be here:
|
78 |
+
# C:\Diffusers\model_data
|
79 |
+
#
|
80 |
+
# If set to 0, the data would be here:
|
81 |
+
# %USERPROFILE%/.cache/huggingface/hub
|
82 |
+
# Which would look like this, where {Username} is the username of
|
83 |
+
# your Windows account:
|
84 |
+
# C:\Users\{Username}\.cache\huggingface\hub
|
85 |
+
#
|
86 |
+
# You need to clean out the folder occasionally as this folder will get
|
87 |
+
# extremely large. Eventually, it would take up all the space on your
|
88 |
+
# computer.
|
89 |
+
#
|
90 |
+
|
91 |
+
use_custom_hugging_face_cache_dir = 1
|
92 |
+
|
93 |
+
#####
|
94 |
+
|
95 |
+
#
|
96 |
+
# Name of Model Data Folder
|
97 |
+
#
|
98 |
+
# This is where all the model data will go. (unless you changed it in the
|
99 |
+
# previous configuration) This folder will get very large. You need to
|
100 |
+
# clean it out manually occasionally.
|
101 |
+
#
|
102 |
+
|
103 |
+
cache_directory_folder_name = "model_data"
|
104 |
+
|
105 |
+
####################
|
106 |
+
|
107 |
+
#
|
108 |
+
# Default Base Model
|
109 |
+
#
|
110 |
+
# This will automatically be SDXL Turbo if you are running this on a CPU.
|
111 |
+
#
|
112 |
+
|
113 |
+
default_base_model = "sdxl"
|
114 |
+
|
115 |
+
####################
|
116 |
+
|
117 |
+
#
|
118 |
+
# Auto Save Imagery
|
119 |
+
#
|
120 |
+
# You can automatically save the image file, and a text file with the
|
121 |
+
# prompt details.
|
122 |
+
#
|
123 |
+
|
124 |
+
auto_save_imagery = 1
|
125 |
+
|
126 |
+
#####
|
127 |
+
|
128 |
+
#
|
129 |
+
# Name of Saved Images Folder
|
130 |
+
#
|
131 |
+
# You can change the name of this folder if you want. Imagery will be
|
132 |
+
# saved in a folder called "saved_images" in the directory configured
|
133 |
+
# in "main_dir". (the saved images folder will be created
|
134 |
+
# automatically) A directory for each day will be created in this
|
135 |
+
# folder. Imagery will then be placed in each folder.
|
136 |
+
#
|
137 |
+
|
138 |
+
saved_images_folder_name = "saved_images"
|
139 |
+
|
140 |
+
####################
|
141 |
+
|
142 |
+
#
|
143 |
+
# Auto Open Browser From Command Prompt
|
144 |
+
#
|
145 |
+
|
146 |
+
auto_open_browser = 1
|
147 |
+
|
148 |
+
####################
|
149 |
+
|
150 |
+
#
|
151 |
+
# Make Seed Selection A Textbox
|
152 |
+
#
|
153 |
+
# If you use a slider or number field for the seed, some seeds can't be
|
154 |
+
# duplicated using those fields. If you enter a number greater than
|
155 |
+
# 9007199254740992, the seed won't reliably be used. Check the text
|
156 |
+
# details to see if it was used. This is a technical limitation as of
|
157 |
+
# writing this. See the bug report here:
|
158 |
+
# https://github.com/gradio-app/gradio/issues/5354
|
159 |
+
#
|
160 |
+
# Using the slider, and not entering a number, might be the way to get
|
161 |
+
# reliable numbers above that number. Just don't then use the up and down
|
162 |
+
# arrows in the field to go up or down a number.
|
163 |
+
#
|
164 |
+
# The way to use seeds higher than that reliably is to set this variable
|
165 |
+
# to 1.
|
166 |
+
|
167 |
+
make_seed_selection_a_textbox = 0
|
168 |
+
|
169 |
+
####################
|
170 |
+
|
171 |
+
#
|
172 |
+
# Include Close Command Prompt / Cancel Button
|
173 |
+
#
|
174 |
+
# This doesn't work well at all. It just closes the command prompt.
|
175 |
+
#
|
176 |
+
|
177 |
+
enable_close_command_prompt_button = 0
|
178 |
+
|
179 |
+
####################
|
180 |
+
|
181 |
+
#
|
182 |
+
# Use Denoising Start In Base Model When Using Refiner
|
183 |
+
#
|
184 |
+
# If set to "1", refining will end at the percent (expressed as decimal)
|
185 |
+
# defined in the denoising start for the refiner. If the steps set are
|
186 |
+
# 100, and the denoising start value is 0.75, the base model will run for
|
187 |
+
# 75 steps. The refiner will then run for 25 steps.
|
188 |
+
#
|
189 |
+
|
190 |
+
default_use_denoising_start_in_base_model_when_using_refiner = 0
|
191 |
+
|
192 |
+
####################
|
193 |
+
|
194 |
+
#
|
195 |
+
# Base Model Output To Refiner Is In Latent Space
|
196 |
+
#
|
197 |
+
# If set to "1", base model output is in latent space instead of PIL
|
198 |
+
# image when sent to refiner.
|
199 |
+
#
|
200 |
+
|
201 |
+
default_base_model_output_to_refiner_is_in_latent_space = 1
|
202 |
+
|
203 |
+
####################
|
204 |
+
|
205 |
+
#
|
206 |
+
# Log Generation Times
|
207 |
+
#
|
208 |
+
# Log generation times to saved text output. The initial time it takes to
|
209 |
+
# load a model is not included in the generation time.
|
210 |
+
#
|
211 |
+
|
212 |
+
log_generation_times = 1
|
213 |
+
|
214 |
+
####################
|
215 |
+
|
216 |
+
#
|
217 |
+
# Use Image Gallery
|
218 |
+
#
|
219 |
+
# There is a bug in downloading images:
|
220 |
+
# https://github.com/gradio-app/gradio/issues/6486
|
221 |
+
# Saves as HTML rather than image.
|
222 |
+
#
|
223 |
+
|
224 |
+
use_image_gallery = 1
|
225 |
+
|
226 |
+
####################
|
227 |
+
|
228 |
+
#
|
229 |
+
# Up Next Is Various Configuration Arrays and Objects
|
230 |
+
#
|
231 |
+
|
232 |
+
####################
|
233 |
+
|
234 |
+
base_model_array = [
|
235 |
+
"sdxl",
|
236 |
+
"photoreal",
|
237 |
+
"sdxl_turbo",
|
238 |
+
"sd_1_5_runwayml"
|
239 |
+
]
|
240 |
+
|
241 |
+
base_model_names_object = {
|
242 |
+
"sdxl": "Stable Diffusion XL 1.0",
|
243 |
+
"photoreal": "PhotoReal",
|
244 |
+
"sdxl_turbo": "Stable Diffusion XL Turbo",
|
245 |
+
"sd_1_5_runwayml": "Stable Diffusion 1.5"
|
246 |
+
}
|
247 |
+
|
248 |
+
####################
|
249 |
+
|
250 |
+
#
|
251 |
+
# "sdxl_default"
|
252 |
+
#
|
253 |
+
# - My customized configurations. (subject to change)
|
254 |
+
#
|
255 |
+
# "sdxl_2023-11-12"
|
256 |
+
#
|
257 |
+
# - Valid from November 12th to present.
|
258 |
+
# Number of steps in upscaler changed from 5 to 15.
|
259 |
+
#
|
260 |
+
# "sdxl_2023-09-05"
|
261 |
+
#
|
262 |
+
# - Valid from September 5th to November 12th.
|
263 |
+
# There were changes on this date.
|
264 |
+
#
|
265 |
+
# "photoreal_default"
|
266 |
+
#
|
267 |
+
# - My customized configurations. (subject to change)
|
268 |
+
# "circulus/canvers-real-v3.7.5"
|
269 |
+
#
|
270 |
+
# Seeds do not match the online PhotoReal version.
|
271 |
+
#
|
272 |
+
# "photoreal_2023-11-12"
|
273 |
+
#
|
274 |
+
# - Valid from November 12th to present.
|
275 |
+
# New base model: "circulus/canvers-real-v3.7.5"
|
276 |
+
#
|
277 |
+
# "photoreal_2023-09-01"
|
278 |
+
#
|
279 |
+
# - Valid from September 1st to November 12th.
|
280 |
+
# "circulus/canvers-realistic-v3.6" was already in effect.
|
281 |
+
# But there were changes on this date.
|
282 |
+
#
|
283 |
+
# "sdxl_turbo_default"
|
284 |
+
#
|
285 |
+
# - My customized configurations. (subject to change)
|
286 |
+
#
|
287 |
+
# "sd_1_5_runwayml_default"
|
288 |
+
#
|
289 |
+
# - My customized configurations. (subject to change)
|
290 |
+
#
|
291 |
+
|
292 |
+
base_model_object_of_model_configuration_arrays = {
|
293 |
+
"sdxl": [
|
294 |
+
"sdxl_default",
|
295 |
+
"sdxl_2023-11-12",
|
296 |
+
"sdxl_2023-09-05"
|
297 |
+
],
|
298 |
+
"photoreal": [
|
299 |
+
"photoreal_default",
|
300 |
+
"photoreal_2023-11-12",
|
301 |
+
"photoreal_2023-09-01"
|
302 |
+
],
|
303 |
+
"sdxl_turbo": [
|
304 |
+
"sdxl_turbo_default"
|
305 |
+
],
|
306 |
+
"sd_1_5_runwayml": [
|
307 |
+
"sd_1_5_runwayml_default"
|
308 |
+
]
|
309 |
+
}
|
310 |
+
|
311 |
+
####################
|
312 |
+
|
313 |
+
model_configuration_names_object = {
|
314 |
+
"sdxl_default": "1.0 - Default (subject to change)",
|
315 |
+
"sdxl_2023-11-12": "1.0 (2023-11-12 online config)",
|
316 |
+
"sdxl_2023-09-05": "1.0 (2023-09-05 online config)",
|
317 |
+
"photoreal_default": "3.7.5 - Default (subject to change)",
|
318 |
+
"photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)",
|
319 |
+
"photoreal_2023-09-01": "3.6 (2023-09-01 online config)",
|
320 |
+
"sdxl_turbo_default": "Default (subject to change)",
|
321 |
+
"sd_1_5_runwayml_default": "1.5 - Default (subject to change)"
|
322 |
+
}
|
323 |
+
|
324 |
+
model_configuration_links_object = {
|
325 |
+
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0",
|
326 |
+
"sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0",
|
327 |
+
"sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0",
|
328 |
+
"photoreal_default": "circulus/canvers-real-v3.7.5",
|
329 |
+
"photoreal_2023-11-12": "circulus/canvers-real-v3.7.5",
|
330 |
+
"photoreal_2023-09-01": "circulus/canvers-realistic-v3.6",
|
331 |
+
"sdxl_turbo_default": "stabilityai/sdxl-turbo",
|
332 |
+
"sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5"
|
333 |
+
}
|
334 |
+
|
335 |
+
model_configuration_force_refiner_object = {
|
336 |
+
"sdxl_2023-11-12": 1,
|
337 |
+
"sdxl_2023-09-05": 1
|
338 |
+
}
|
339 |
+
|
340 |
+
####################
|
341 |
+
|
342 |
+
base_model_model_configuration_defaults_object = {
|
343 |
+
"sdxl": "sdxl_default",
|
344 |
+
"photoreal": "photoreal_default",
|
345 |
+
"sdxl_turbo": "sdxl_turbo_default",
|
346 |
+
"sd_1_5_runwayml": "sd_1_5_runwayml_default"
|
347 |
+
}
|
348 |
+
|
349 |
+
####################
|
350 |
+
|
351 |
+
#
|
352 |
+
# Links:
|
353 |
+
#
|
354 |
+
# SD-XL 1.0-base Model Card
|
355 |
+
# https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0
|
356 |
+
#
|
357 |
+
# SD-XL 1.0-refiner Model Card
|
358 |
+
# https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0
|
359 |
+
#
|
360 |
+
# Stable Diffusion x2 latent upscaler model card
|
361 |
+
# https://huggingface.co/stabilityai/sd-x2-latent-upscaler
|
362 |
+
#
|
363 |
+
# PhotoReal
|
364 |
+
# 3.7.5: https://huggingface.co/circulus/canvers-real-v3.7.5
|
365 |
+
# 3.6: https://huggingface.co/circulus/canvers-realistic-v3.6
|
366 |
+
#
|
367 |
+
# SDXL Turbo
|
368 |
+
# https://huggingface.co/stabilityai/sdxl-turbo
|
369 |
+
#
|
370 |
+
# Stable Diffusion v1-5 (runwayml)
|
371 |
+
# https://huggingface.co/runwayml/stable-diffusion-v1-5
|
372 |
+
#
|
373 |
+
|
374 |
+
####################
|
375 |
+
|
376 |
+
#
|
377 |
+
# Determine automatically if on CPU or GPU
|
378 |
+
#
|
379 |
+
# CPU will not work on Windows.
|
380 |
+
#
|
381 |
+
|
382 |
+
device = "cpu"
|
383 |
|
384 |
if torch.cuda.is_available():
|
385 |
+
|
386 |
+
device = "cuda"
|
387 |
+
|
388 |
+
PYTORCH_CUDA_ALLOC_CONF = {
|
389 |
+
"max_split_size_mb": 8000
|
390 |
+
}
|
391 |
+
torch.cuda.max_memory_allocated(
|
392 |
+
device = device
|
393 |
+
)
|
394 |
torch.cuda.empty_cache()
|
395 |
+
|
396 |
+
if device == "cpu":
|
397 |
+
|
398 |
+
default_base_model = "sdxl_turbo"
|
399 |
+
|
400 |
+
####################
|
401 |
+
|
402 |
+
default_prompt = ""
|
403 |
+
default_negative_prompt = ""
|
404 |
+
|
405 |
+
default_width = 768
|
406 |
+
default_height = 768
|
407 |
+
|
408 |
+
default_guidance_scale_value = 7
|
409 |
+
|
410 |
+
default_base_model_base_model_num_inference_steps = 50
|
411 |
+
default_base_model_base_model_num_inference_steps_for_sdxl_turbo = 2
|
412 |
+
|
413 |
+
default_seed_maximum = 999999999999999999
|
414 |
+
default_seed_value = 876678173805928800
|
415 |
+
|
416 |
+
# If you turn off the refiner it will not be available in the display unless
|
417 |
+
# you select an online configuration option that requires it.
|
418 |
+
|
419 |
+
enable_refiner = 1
|
420 |
+
enable_upscaler = 1
|
421 |
+
|
422 |
+
# Selected on form as a default?
|
423 |
+
|
424 |
+
default_refiner_selected = 0
|
425 |
+
default_upscaler_selected = 0
|
426 |
+
|
427 |
+
# xFormers:
|
428 |
+
#
|
429 |
+
# https://huggingface.co/docs/diffusers/optimization/xformers
|
430 |
+
|
431 |
+
use_xformers = 1
|
432 |
+
|
433 |
+
# Scaled dot product attention (SDPA) is used by default for PyTorch 2.0. To
|
434 |
+
# use default instead, set this to 1.
|
435 |
+
#
|
436 |
+
# https://huggingface.co/docs/diffusers/optimization/torch2.0#scaled-dot-product-attention
|
437 |
+
|
438 |
+
use_default_attn_processor = 0
|
439 |
+
|
440 |
+
display_xformers_usage_in_prompt_info = 1
|
441 |
+
include_transformers_version_in_prompt_info = 1
|
442 |
+
display_default_attn_processor_usage_in_prompt_info = 1
|
443 |
+
|
444 |
+
# You can't select both sequential and model cpu offloading. If you select
|
445 |
+
# both, model cpu offloading will be used.
|
446 |
+
|
447 |
+
use_sequential_cpu_offload_for_base_model = 1
|
448 |
+
use_sequential_cpu_offload_for_refiner = 1
|
449 |
+
use_sequential_cpu_offload_for_upscaler = 1
|
450 |
+
|
451 |
+
use_model_cpu_offload_for_base_model = 0
|
452 |
+
use_model_cpu_offload_for_refiner = 0
|
453 |
+
use_model_cpu_offload_for_upscaler = 0
|
454 |
+
|
455 |
+
|
456 |
+
|
457 |
+
if default_base_model == "photoreal":
|
458 |
+
|
459 |
+
# PhotoReal
|
460 |
+
|
461 |
+
default_seed_value = 3648905360627576
|
462 |
+
|
463 |
+
elif default_base_model == "sdxl_turbo":
|
464 |
+
|
465 |
+
# SDXL Turbo
|
466 |
+
|
467 |
+
default_seed_value = 2725116121543
|
468 |
+
|
469 |
+
#elif default_base_model == "sd_1_5_runwayml":
|
470 |
+
|
471 |
+
# SD 1.5
|
472 |
+
|
473 |
|
474 |
+
|
475 |
+
else:
|
476 |
+
|
477 |
+
# SDXL
|
478 |
+
|
479 |
+
default_width = 1024
|
480 |
+
default_height = 1024
|
481 |
+
default_guidance_scale_value = 10
|
482 |
+
|
483 |
+
|
484 |
+
|
485 |
+
# Must be multiple of 8
|
486 |
+
|
487 |
+
width_and_height_input_slider_steps = 8
|
488 |
+
|
489 |
+
|
490 |
+
|
491 |
+
show_messages_in_command_prompt = 1
|
492 |
+
show_messages_in_modal_on_page = 1
|
493 |
+
|
494 |
+
|
495 |
+
|
496 |
+
opening_html = ""
|
497 |
+
|
498 |
+
if device == "cpu":
|
499 |
+
|
500 |
+
opening_html = "<span style=\"font-weight: bold; color: red;\">THIS APP IS EXCEPTIONALLY SLOW!</span><br/>This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take two minutes after that point. For other models, it may take hours to create a single image!"
|
501 |
+
|
502 |
+
|
503 |
+
|
504 |
+
ending_html = """This app allows you to try to match images that can be generated using several tools online. (<a href=\"https://huggingface.co/spaces/Manjushri/SDXL-1.0\" target=\"_blank\">Stable Diffusion XL</a>, <a href=\"https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5\" target=\"_blank\">PhotoReal with SDXL 1.0 Refiner</a> and <a href=\"https://huggingface.co/spaces/diffusers/unofficial-SDXL-Turbo-i2i-t2i\" target=\"_blank\">SDXL Turbo Unofficial Demo</a>) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app.
|
505 |
+
|
506 |
+
Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. If you have a seed greater than 9007199254740992, it may not be processed correctly. Make sure the prompt matches the seed you entered. (shown in the \"Prompt Information\" section once you create an image) If it doesn't, set \"make_seed_selection_a_textbox\" to 1 in the script. This bug is described <a href=\"https://github.com/gradio-app/gradio/issues/5354\" target=\"_blank\">here</a>.
|
507 |
+
|
508 |
+
The original script for this app was written by <a href=\"https://huggingface.co/Manjushri\" target=\"_blank\">Manjushri</a>."""
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
+
refiner_and_upscaler_status_opening_html = "<div style=\"text-align: center;\">"
|
513 |
+
|
514 |
+
refiner_and_upscaler_status_closing_html = "</div>"
|
515 |
+
|
516 |
+
refiner_on_text = "Refiner is on. "
|
517 |
+
refiner_off_text = "Refiner is off. "
|
518 |
+
|
519 |
+
upscaler_on_text = "Upscaler is on. "
|
520 |
+
upscaler_off_text = "Upscaler is off. "
|
521 |
+
|
522 |
+
number_of_reserved_tokens = 2
|
523 |
+
|
524 |
+
|
525 |
+
|
526 |
+
|
527 |
+
|
528 |
+
|
529 |
+
|
530 |
+
# This will eventually be a configuration option...
|
531 |
+
|
532 |
+
# "pil" image
|
533 |
+
# "latent" latent space
|
534 |
+
|
535 |
+
which_output_type_before_refiner_and_upscaler = "latent"
|
536 |
+
|
537 |
+
|
538 |
+
|
539 |
+
###############################################################################
|
540 |
+
###############################################################################
|
541 |
+
#
|
542 |
+
#
|
543 |
+
#
|
544 |
+
# End Configurations
|
545 |
+
#
|
546 |
+
#
|
547 |
+
#
|
548 |
+
###############################################################################
|
549 |
+
###############################################################################
|
550 |
+
|
551 |
+
|
552 |
+
import os
|
553 |
+
|
554 |
+
#script_being_run_on_hugging_face = 0
|
555 |
+
|
556 |
+
try:
|
557 |
+
if (os.uname().find("magicfixeseverything") >= 0):
|
558 |
+
script_being_run_on_hugging_face = 0
|
559 |
+
except:
|
560 |
+
script_being_run_on_hugging_face = 0
|
561 |
+
|
562 |
+
|
563 |
+
|
564 |
+
if device == "cuda":
|
565 |
+
|
566 |
+
PYTORCH_CUDA_ALLOC_CONF = {
|
567 |
+
"max_split_size_mb": 8000
|
568 |
+
}
|
569 |
+
torch.cuda.max_memory_allocated(
|
570 |
+
device = device
|
571 |
+
)
|
572 |
torch.cuda.empty_cache()
|
573 |
+
|
574 |
+
|
575 |
+
|
576 |
+
if script_being_run_on_hugging_face == 1:
|
577 |
+
|
578 |
+
use_custom_hugging_face_cache_dir = 0
|
579 |
+
show_messages_in_modal_on_page = 0
|
580 |
+
|
581 |
+
|
582 |
+
|
583 |
+
saved_images_dir = main_dir + "/" + saved_images_folder_name
|
584 |
+
|
585 |
+
hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name
|
586 |
+
|
587 |
+
if not os.path.exists(hugging_face_cache_dir):
|
588 |
+
os.makedirs(hugging_face_cache_dir)
|
589 |
+
|
590 |
+
|
591 |
+
|
592 |
+
|
593 |
+
|
594 |
+
|
595 |
+
|
596 |
+
os.environ["HF_HUB_OFFLINE"] = "1"
|
597 |
+
|
598 |
+
|
599 |
+
|
600 |
+
|
601 |
+
|
602 |
+
|
603 |
+
|
604 |
+
|
605 |
+
|
606 |
+
|
607 |
+
|
608 |
+
|
609 |
+
|
610 |
+
if auto_save_imagery == 1:
|
611 |
+
|
612 |
+
from datetime import datetime
|
613 |
+
import time
|
614 |
+
|
615 |
+
|
616 |
+
|
617 |
+
if log_generation_times == 1:
|
618 |
+
|
619 |
+
import time
|
620 |
+
|
621 |
+
|
622 |
+
|
623 |
+
if device == "cpu":
|
624 |
+
|
625 |
+
use_sequential_cpu_offload_for_base_model = 0
|
626 |
+
use_sequential_cpu_offload_for_refiner = 0
|
627 |
+
use_sequential_cpu_offload_for_upscaler = 0
|
628 |
+
|
629 |
+
use_model_cpu_offload_for_base_model = 0
|
630 |
+
use_model_cpu_offload_for_refiner = 0
|
631 |
+
use_model_cpu_offload_for_upscaler = 0
|
632 |
+
|
633 |
+
use_xformers = 0
|
634 |
+
|
635 |
+
|
636 |
+
|
637 |
+
if (
|
638 |
+
(use_sequential_cpu_offload_for_base_model == 1) and
|
639 |
+
(use_model_cpu_offload_for_base_model == 1)
|
640 |
+
):
|
641 |
|
642 |
+
use_sequential_cpu_offload_for_base_model = 0
|
643 |
+
|
644 |
+
if (
|
645 |
+
(use_sequential_cpu_offload_for_refiner == 1) and
|
646 |
+
(use_model_cpu_offload_for_refiner == 1)
|
647 |
+
):
|
648 |
|
649 |
+
use_sequential_cpu_offload_for_refiner = 0
|
650 |
+
|
651 |
+
if (
|
652 |
+
(use_sequential_cpu_offload_for_upscaler == 1) and
|
653 |
+
(use_model_cpu_offload_for_upscaler == 1)
|
654 |
+
):
|
655 |
+
|
656 |
+
use_sequential_cpu_offload_for_upscaler = 0
|
657 |
+
|
658 |
+
|
659 |
+
|
660 |
+
def error_function(
|
661 |
+
text_message
|
662 |
+
):
|
663 |
+
|
664 |
+
print (text_message)
|
665 |
+
|
666 |
+
gr.Error(text_message)
|
667 |
+
|
668 |
+
exit(1)
|
669 |
+
|
670 |
+
|
671 |
+
|
672 |
+
# Don't change this one
|
673 |
+
|
674 |
+
default_model_configuration_object = {
|
675 |
+
"sdxl_default": 1,
|
676 |
+
"photoreal_default": 1,
|
677 |
+
"sdxl_turbo_default": 1,
|
678 |
+
"sd_1_5_runwayml_default": 1
|
679 |
+
}
|
680 |
+
|
681 |
+
|
682 |
+
|
683 |
+
additional_prompt_info_html = ""
|
684 |
+
|
685 |
+
if auto_save_imagery == 1:
|
686 |
+
|
687 |
+
additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically."
|
688 |
+
|
689 |
+
|
690 |
+
|
691 |
+
if use_xformers == 1:
|
692 |
+
|
693 |
+
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
|
694 |
+
|
695 |
+
if use_default_attn_processor == 1:
|
696 |
+
|
697 |
+
from diffusers.models.attention_processor import AttnProcessor
|
698 |
+
|
699 |
+
|
700 |
+
|
701 |
+
if (
|
702 |
+
default_base_model and
|
703 |
+
(default_base_model in base_model_object_of_model_configuration_arrays) and
|
704 |
+
(default_base_model in base_model_model_configuration_defaults_object)
|
705 |
+
):
|
706 |
+
|
707 |
+
default_model_configuration = base_model_model_configuration_defaults_object[default_base_model]
|
708 |
+
|
709 |
+
if default_model_configuration in model_configuration_names_object:
|
710 |
+
|
711 |
+
default_model_configuration_choices_array = []
|
712 |
+
|
713 |
+
for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]:
|
714 |
+
|
715 |
+
if model_configuration_names_object[this_model_configuration]:
|
716 |
+
|
717 |
+
default_model_configuration_choices_array.append(
|
718 |
+
model_configuration_names_object[this_model_configuration]
|
719 |
+
)
|
720 |
+
|
721 |
+
else:
|
722 |
+
|
723 |
+
error_function("A default configuration must be properly named in the code.")
|
724 |
+
|
725 |
else:
|
726 |
+
|
727 |
+
error_function("A default configuration must be properly configured in the code.")
|
728 |
+
|
729 |
+
else:
|
730 |
+
|
731 |
+
error_function("A default base model must be properly configured in the code.")
|
732 |
+
|
733 |
+
|
734 |
+
|
735 |
+
default_base_model_nicely_named_value = base_model_names_object[default_base_model]
|
736 |
+
|
737 |
+
default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration]
|
738 |
+
|
739 |
+
|
740 |
+
|
741 |
+
if enable_refiner != 1:
|
742 |
+
|
743 |
+
default_refiner_selected = 0
|
744 |
+
|
745 |
+
if enable_upscaler != 1:
|
746 |
+
|
747 |
+
default_upscaler_selected = 0
|
748 |
+
|
749 |
+
|
750 |
+
|
751 |
+
model_configuration_requires_refiner = 0
|
752 |
+
|
753 |
+
if default_model_configuration in model_configuration_force_refiner_object:
|
754 |
+
|
755 |
+
model_configuration_requires_refiner = model_configuration_force_refiner_object[default_model_configuration]
|
756 |
+
|
757 |
+
if model_configuration_requires_refiner == 1:
|
758 |
+
|
759 |
+
enable_refiner = 1
|
760 |
+
default_refiner_selected = 1
|
761 |
+
|
762 |
+
default_refine_option = "No"
|
763 |
+
|
764 |
+
if default_refiner_selected == 1:
|
765 |
+
|
766 |
+
default_refine_option = "Yes"
|
767 |
+
|
768 |
+
default_upscale_option = "No"
|
769 |
+
|
770 |
+
if default_upscaler_selected == 1:
|
771 |
+
|
772 |
+
default_upscale_option = "Yes"
|
773 |
+
|
774 |
+
is_default_config = 0
|
775 |
+
|
776 |
+
if default_model_configuration in default_model_configuration_object:
|
777 |
+
|
778 |
+
is_default_config = 1
|
779 |
+
|
780 |
+
default_refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
|
781 |
+
|
782 |
+
|
783 |
+
|
784 |
+
refiner_default_config_accordion_visible = True
|
785 |
+
|
786 |
+
if (
|
787 |
+
(enable_refiner != 1) or
|
788 |
+
(is_default_config != 1)
|
789 |
+
):
|
790 |
+
|
791 |
+
refiner_default_config_accordion_visible = False
|
792 |
+
|
793 |
+
refiner_default_config_accordion_open = False
|
794 |
+
|
795 |
+
if (
|
796 |
+
(is_default_config == 1) and
|
797 |
+
(default_refiner_selected == 1)
|
798 |
+
):
|
799 |
+
|
800 |
+
refiner_default_config_accordion_open = True
|
801 |
+
|
802 |
+
|
803 |
+
|
804 |
+
refiner_online_config_accordion_visible = True
|
805 |
+
|
806 |
+
if (
|
807 |
+
(enable_refiner != 1) or
|
808 |
+
(is_default_config == 1)
|
809 |
+
):
|
810 |
+
|
811 |
+
refiner_online_config_accordion_visible = False
|
812 |
+
|
813 |
+
refiner_online_config_accordion_open = False
|
814 |
+
|
815 |
+
if (
|
816 |
+
(is_default_config != 1) and
|
817 |
+
(default_refiner_selected == 1)
|
818 |
+
):
|
819 |
+
|
820 |
+
refiner_online_config_accordion_open = True
|
821 |
+
|
822 |
+
refiner_group_visible = False
|
823 |
+
|
824 |
+
if enable_refiner == 1:
|
825 |
+
|
826 |
+
refiner_group_visible = True
|
827 |
+
|
828 |
+
if default_refiner_selected == 1:
|
829 |
+
|
830 |
+
default_refiner_and_upscaler_status_text += refiner_on_text
|
831 |
+
|
832 |
+
else:
|
833 |
+
|
834 |
+
default_refiner_and_upscaler_status_text += refiner_off_text
|
835 |
+
|
836 |
+
|
837 |
+
|
838 |
+
upscaler_accordion_open = False
|
839 |
+
|
840 |
+
if default_upscaler_selected == 1:
|
841 |
+
|
842 |
+
upscaler_accordion_open = True
|
843 |
+
|
844 |
+
upscaler_group_visible = False
|
845 |
+
|
846 |
+
if enable_upscaler == 1:
|
847 |
+
|
848 |
+
upscaler_group_visible = True
|
849 |
+
|
850 |
+
if default_upscaler_selected == 1:
|
851 |
+
|
852 |
+
default_refiner_and_upscaler_status_text += upscaler_on_text
|
853 |
+
|
854 |
+
else:
|
855 |
+
|
856 |
+
default_refiner_and_upscaler_status_text += upscaler_off_text
|
857 |
+
|
858 |
+
|
859 |
+
|
860 |
+
default_refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
|
861 |
+
|
862 |
+
|
863 |
+
|
864 |
+
image_gallery_array = []
|
865 |
+
prompt_information_array = []
|
866 |
+
|
867 |
+
|
868 |
+
|
869 |
+
default_negative_prompt_field_visibility = True
|
870 |
+
default_negative_prompt_for_sdxl_turbo_field_visibility = False
|
871 |
+
default_base_model_num_inference_steps_field_visibility = True
|
872 |
+
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = False
|
873 |
+
default_guidance_scale_field_visibility = True
|
874 |
+
default_guidance_scale_for_sdxl_turbo_field_visibility = False
|
875 |
+
|
876 |
+
if default_base_model == "sdxl_turbo":
|
877 |
+
|
878 |
+
default_negative_prompt_field_visibility = False
|
879 |
+
default_negative_prompt_for_sdxl_turbo_field_visibility = True
|
880 |
+
default_base_model_num_inference_steps_field_visibility = False
|
881 |
+
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = True
|
882 |
+
default_guidance_scale_field_visibility = False
|
883 |
+
default_guidance_scale_for_sdxl_turbo_field_visibility = True
|
884 |
+
|
885 |
+
|
886 |
+
|
887 |
+
global pipe
|
888 |
+
global refiner
|
889 |
+
global upscaler
|
890 |
+
|
891 |
+
last_model_configuration_name_value = ""
|
892 |
+
last_refiner_selected = ""
|
893 |
+
last_upscaler_selected = ""
|
894 |
+
|
895 |
+
|
896 |
+
|
897 |
+
default_base_model_choices_array = []
|
898 |
+
|
899 |
+
stored_model_configuration_names_object = {}
|
900 |
+
|
901 |
+
for this_base_model in base_model_array:
|
902 |
+
|
903 |
+
default_base_model_choices_array.append(
|
904 |
+
base_model_names_object[this_base_model]
|
905 |
+
)
|
906 |
+
|
907 |
+
stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model]
|
908 |
+
|
909 |
+
stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration]
|
910 |
+
|
911 |
+
|
912 |
+
|
913 |
+
###############################################################################
|
914 |
+
###############################################################################
|
915 |
+
#
|
916 |
+
#
|
917 |
+
#
|
918 |
+
#
|
919 |
+
#
|
920 |
+
#
|
921 |
+
# Functions
|
922 |
+
#
|
923 |
+
#
|
924 |
+
#
|
925 |
+
#
|
926 |
+
#
|
927 |
+
#
|
928 |
+
###############################################################################
|
929 |
+
###############################################################################
|
930 |
+
|
931 |
+
|
932 |
+
|
933 |
+
|
934 |
+
|
935 |
+
|
936 |
+
|
937 |
+
#####################
|
938 |
+
#
|
939 |
+
# Show Message
|
940 |
+
#
|
941 |
+
# Display message to user in model on web form and/or command prompt.
|
942 |
+
#
|
943 |
+
#####################
|
944 |
+
|
945 |
+
def show_message(
|
946 |
+
message_to_display
|
947 |
+
):
|
948 |
+
|
949 |
+
if show_messages_in_command_prompt == 1:
|
950 |
+
|
951 |
+
print (message_to_display)
|
952 |
+
|
953 |
+
if show_messages_in_modal_on_page == 1:
|
954 |
+
|
955 |
+
gr.Info(message_to_display)
|
956 |
+
|
957 |
+
|
958 |
+
|
959 |
+
|
960 |
+
|
961 |
+
|
962 |
+
|
963 |
+
#####################
|
964 |
+
#
|
965 |
+
# Convert Seconds
|
966 |
+
#
|
967 |
+
# Convert raw seconds to the numer of hours, minutes and seconds.
|
968 |
+
#
|
969 |
+
#####################
|
970 |
+
|
971 |
+
def convert_seconds(
|
972 |
+
seconds
|
973 |
+
):
|
974 |
+
|
975 |
+
# Google AI Code
|
976 |
+
|
977 |
+
hours = seconds // 3600
|
978 |
+
minutes = (seconds % 3600) // 60
|
979 |
+
seconds = seconds % 60
|
980 |
+
return hours, minutes, seconds
|
981 |
+
|
982 |
+
|
983 |
+
|
984 |
+
|
985 |
+
|
986 |
+
|
987 |
+
|
988 |
+
#####################
|
989 |
+
#
|
990 |
+
# Seed Not Valid
|
991 |
+
#
|
992 |
+
# Return True if seed is not valid.
|
993 |
+
#
|
994 |
+
#####################
|
995 |
+
|
996 |
+
def seed_not_valid(seed_num_str):
|
997 |
+
try:
|
998 |
+
seed_num = int(seed_num_str)
|
999 |
+
if (seed_num > 0) and (seed_num < default_seed_maximum):
|
1000 |
+
return False
|
1001 |
+
else:
|
1002 |
+
return True
|
1003 |
+
except ValueError:
|
1004 |
+
return True
|
1005 |
+
|
1006 |
+
|
1007 |
+
|
1008 |
+
|
1009 |
+
|
1010 |
+
|
1011 |
+
|
1012 |
+
#####################
|
1013 |
+
#
|
1014 |
+
# Numerical Bool
|
1015 |
+
#
|
1016 |
+
# Return 1 for anything that is True/Yes/1. Everything else is False.
|
1017 |
+
#
|
1018 |
+
#####################
|
1019 |
+
|
1020 |
+
def numerical_bool(
|
1021 |
+
original_value
|
1022 |
+
):
|
1023 |
+
|
1024 |
+
new_value = 0
|
1025 |
+
|
1026 |
+
if (
|
1027 |
+
(original_value == 1) or
|
1028 |
+
(original_value == "Yes") or
|
1029 |
+
(original_value == "True") or
|
1030 |
+
(original_value == True)
|
1031 |
+
):
|
1032 |
+
|
1033 |
+
new_value = 1
|
1034 |
+
|
1035 |
+
return new_value
|
1036 |
+
|
1037 |
+
|
1038 |
+
|
1039 |
+
|
1040 |
+
|
1041 |
+
|
1042 |
+
|
1043 |
+
#####################
|
1044 |
+
#
|
1045 |
+
# Truncate Prompt
|
1046 |
+
#
|
1047 |
+
# Truncate a prompt. Get the actual prompt that will be used and save the
|
1048 |
+
# part of the prompt that will not be used.
|
1049 |
+
#
|
1050 |
+
#####################
|
1051 |
+
|
1052 |
+
def truncate_prompt (
|
1053 |
+
existing_prompt_text
|
1054 |
+
):
|
1055 |
+
|
1056 |
+
# Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is
|
1057 |
+
# truncated to 75. This happens automatically, but we want to tell people
|
1058 |
+
# that
|
1059 |
+
|
1060 |
+
tokenizer = pipe.tokenizer
|
1061 |
+
|
1062 |
+
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
|
1063 |
+
|
1064 |
+
prompt_text_words_array = existing_prompt_text.split(" ")
|
1065 |
+
|
1066 |
+
prompt_text_words_array_length = len(prompt_text_words_array)
|
1067 |
+
|
1068 |
+
prompt_text_words_index = 0
|
1069 |
+
|
1070 |
+
prompt_text_substring = ""
|
1071 |
+
prompt_text_not_used_substring = ""
|
1072 |
+
|
1073 |
+
for prompt_text_word in prompt_text_words_array:
|
1074 |
+
|
1075 |
+
prompt_text_words_index += 1
|
1076 |
+
|
1077 |
+
substring_to_test = prompt_text_substring
|
1078 |
+
|
1079 |
+
if prompt_text_words_index > 1:
|
1080 |
+
|
1081 |
+
substring_to_test += " "
|
1082 |
+
|
1083 |
+
substring_to_test += prompt_text_word
|
1084 |
+
|
1085 |
+
token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test))
|
1086 |
+
|
1087 |
+
if token_length_of_substring_to_test > max_token_length_of_model:
|
1088 |
+
|
1089 |
+
prompt_text_not_used_substring += prompt_text_word + " "
|
1090 |
+
|
1091 |
+
else:
|
1092 |
+
|
1093 |
+
prompt_text_substring = substring_to_test
|
1094 |
+
|
1095 |
+
return (
|
1096 |
+
prompt_text_substring,
|
1097 |
+
prompt_text_not_used_substring
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
|
1101 |
+
|
1102 |
+
|
1103 |
+
|
1104 |
+
|
1105 |
+
|
1106 |
+
#####################
|
1107 |
+
#
|
1108 |
+
# Update Prompt Info From Gallery
|
1109 |
+
#
|
1110 |
+
# If you select an image in the image gallery, display the prompt
|
1111 |
+
# information for that image.
|
1112 |
+
#
|
1113 |
+
#####################
|
1114 |
+
|
1115 |
+
def update_prompt_info_from_gallery (
|
1116 |
+
gallery_data: gr.SelectData
|
1117 |
+
):
|
1118 |
+
|
1119 |
+
gallery_data_index = gallery_data.index
|
1120 |
+
|
1121 |
+
output_image_field_update = gr.Gallery(
|
1122 |
+
selected_index = gallery_data_index
|
1123 |
+
)
|
1124 |
+
|
1125 |
+
output_text_field_update = prompt_information_array[gallery_data_index]
|
1126 |
+
|
1127 |
+
return {
|
1128 |
+
output_image_field: output_image_field_update,
|
1129 |
+
output_text_field: output_text_field_update
|
1130 |
+
}
|
1131 |
+
|
1132 |
+
|
1133 |
+
|
1134 |
+
|
1135 |
+
|
1136 |
+
|
1137 |
+
|
1138 |
+
#####################
|
1139 |
+
#
|
1140 |
+
# Create Image Function
|
1141 |
+
#
|
1142 |
+
# This is the main image creation function.
|
1143 |
+
#
|
1144 |
+
#####################
|
1145 |
+
|
1146 |
+
def create_image_function (
|
1147 |
+
base_model_field_index,
|
1148 |
+
model_configuration_field_index,
|
1149 |
+
prompt_text,
|
1150 |
+
negative_prompt_text,
|
1151 |
+
image_width,
|
1152 |
+
image_height,
|
1153 |
+
guidance_scale,
|
1154 |
+
base_model_num_inference_steps,
|
1155 |
+
base_model_num_inference_steps_field_for_sdxl_turbo,
|
1156 |
+
actual_seed,
|
1157 |
+
|
1158 |
+
refining_selection_online_config_normal_field_value,
|
1159 |
+
refining_selection_online_config_automatically_selected_field_value,
|
1160 |
+
|
1161 |
+
refining_use_denoising_start_in_base_model_when_using_refiner_field_value,
|
1162 |
+
refining_base_model_output_to_refiner_is_in_latent_space_field_value,
|
1163 |
+
|
1164 |
+
refining_denoise_start_for_online_config_field_value,
|
1165 |
+
refining_number_of_iterations_for_online_config_field_value,
|
1166 |
+
|
1167 |
+
upscaling_selection_field_value,
|
1168 |
+
upscaling_num_inference_steps
|
1169 |
+
):
|
1170 |
+
|
1171 |
+
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
|
1172 |
+
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
|
1173 |
+
|
1174 |
+
|
1175 |
+
refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value)
|
1176 |
+
refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value)
|
1177 |
+
|
1178 |
+
use_upscaler = numerical_bool(upscaling_selection_field_value)
|
1179 |
+
|
1180 |
+
|
1181 |
+
|
1182 |
+
base_model_name_value = base_model_array[base_model_field_index]
|
1183 |
+
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index]
|
1184 |
+
|
1185 |
+
|
1186 |
+
|
1187 |
+
is_config_state = 0
|
1188 |
+
|
1189 |
+
if model_configuration_name_value in default_model_configuration_object:
|
1190 |
+
|
1191 |
+
is_config_state = 1
|
1192 |
+
|
1193 |
+
use_refiner = 0
|
1194 |
+
|
1195 |
+
|
1196 |
+
|
1197 |
+
if (
|
1198 |
+
(
|
1199 |
+
(is_config_state == 1) and
|
1200 |
+
refining_selection_online_config_normal_field_value
|
1201 |
+
) or (
|
1202 |
+
(is_config_state != 1) and
|
1203 |
+
refining_selection_online_config_automatically_selected_field_value
|
1204 |
+
)
|
1205 |
+
):
|
1206 |
+
|
1207 |
+
use_refiner = 1
|
1208 |
+
|
1209 |
+
|
1210 |
+
|
1211 |
+
if base_model_name_value == "sdxl_turbo":
|
1212 |
+
|
1213 |
+
negative_prompt_text = ""
|
1214 |
+
base_model_num_inference_steps = base_model_num_inference_steps_field_for_sdxl_turbo
|
1215 |
+
guidance_scale = 0
|
1216 |
+
|
1217 |
+
|
1218 |
+
|
1219 |
+
global last_model_configuration_name_value
|
1220 |
+
|
1221 |
+
global pipe
|
1222 |
+
global refiner
|
1223 |
+
global upscaler
|
1224 |
+
|
1225 |
+
global image_gallery_array
|
1226 |
+
global prompt_information_array
|
1227 |
+
|
1228 |
+
if (
|
1229 |
+
(last_model_configuration_name_value == "") or
|
1230 |
+
(model_configuration_name_value != last_model_configuration_name_value)
|
1231 |
+
):
|
1232 |
+
|
1233 |
+
show_message("Loading base model...")
|
1234 |
+
|
1235 |
+
if (last_model_configuration_name_value != ""):
|
1236 |
+
|
1237 |
+
del pipe
|
1238 |
+
|
1239 |
+
if 'refiner' in globals():
|
1240 |
+
del refiner
|
1241 |
+
|
1242 |
+
if 'upscaler' in globals():
|
1243 |
+
del upscaler
|
1244 |
+
|
1245 |
+
import gc
|
1246 |
+
|
1247 |
+
gc.collect()
|
1248 |
+
|
1249 |
+
if device == "cuda":
|
1250 |
+
torch.cuda.empty_cache()
|
1251 |
+
|
1252 |
+
if base_model_name_value == "photoreal":
|
1253 |
+
|
1254 |
+
base_model_kwargs = {
|
1255 |
+
"safety_checker": None,
|
1256 |
+
"requires_safety_checker": False
|
1257 |
+
}
|
1258 |
+
|
1259 |
+
elif base_model_name_value == "sdxl_turbo":
|
1260 |
+
|
1261 |
+
base_model_kwargs = {
|
1262 |
+
"use_safetensors": True,
|
1263 |
+
"safety_checker": None
|
1264 |
+
}
|
1265 |
+
|
1266 |
+
if device == "cuda":
|
1267 |
+
|
1268 |
+
base_model_kwargs["variant"] = "fp16"
|
1269 |
+
|
1270 |
+
else:
|
1271 |
+
|
1272 |
+
base_model_kwargs = {
|
1273 |
+
"use_safetensors": True
|
1274 |
+
}
|
1275 |
+
|
1276 |
+
if device == "cuda":
|
1277 |
+
|
1278 |
+
base_model_kwargs["variant"] = "fp16"
|
1279 |
+
|
1280 |
+
if device == "cuda":
|
1281 |
+
|
1282 |
+
base_model_kwargs["torch_dtype"] = torch.float16
|
1283 |
+
|
1284 |
+
if use_custom_hugging_face_cache_dir == 1:
|
1285 |
+
|
1286 |
+
base_model_kwargs["cache_dir"] = hugging_face_cache_dir
|
1287 |
+
|
1288 |
+
pipe = DiffusionPipeline.from_pretrained(
|
1289 |
+
model_configuration_links_object[model_configuration_name_value],
|
1290 |
+
**base_model_kwargs
|
1291 |
+
)
|
1292 |
+
|
1293 |
+
if use_model_cpu_offload_for_base_model == 1:
|
1294 |
+
pipe.enable_model_cpu_offload()
|
1295 |
+
|
1296 |
+
if use_xformers == 1:
|
1297 |
+
pipe.enable_xformers_memory_efficient_attention()
|
1298 |
+
|
1299 |
+
pipe = pipe.to(device)
|
1300 |
+
|
1301 |
+
if use_sequential_cpu_offload_for_base_model == 1:
|
1302 |
+
pipe.enable_sequential_cpu_offload()
|
1303 |
+
|
1304 |
+
if use_default_attn_processor == 1:
|
1305 |
+
pipe.unet.set_default_attn_processor()
|
1306 |
+
|
1307 |
+
if device == "cuda":
|
1308 |
+
torch.cuda.empty_cache()
|
1309 |
+
else:
|
1310 |
+
pipe.unet = torch.compile(
|
1311 |
+
pipe.unet,
|
1312 |
+
mode = "reduce-overhead",
|
1313 |
+
fullgraph = True
|
1314 |
+
)
|
1315 |
+
|
1316 |
+
last_model_configuration_name_value = model_configuration_name_value
|
1317 |
+
|
1318 |
+
|
1319 |
+
|
1320 |
+
if use_refiner == 1:
|
1321 |
+
|
1322 |
+
show_message("Loading refiner...")
|
1323 |
+
|
1324 |
+
refiner_kwargs = {
|
1325 |
+
"use_safetensors": True
|
1326 |
+
}
|
1327 |
+
|
1328 |
+
if device == "cuda":
|
1329 |
+
|
1330 |
+
refiner_kwargs["variant"] = "fp16"
|
1331 |
+
refiner_kwargs["torch_dtype"] = torch.float16
|
1332 |
+
|
1333 |
+
if use_custom_hugging_face_cache_dir == 1:
|
1334 |
+
|
1335 |
+
refiner_kwargs["cache_dir"] = hugging_face_cache_dir
|
1336 |
+
|
1337 |
+
refiner = DiffusionPipeline.from_pretrained(
|
1338 |
+
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
1339 |
+
**refiner_kwargs
|
1340 |
+
)
|
1341 |
+
|
1342 |
+
if use_model_cpu_offload_for_refiner == 1:
|
1343 |
+
|
1344 |
+
refiner.enable_model_cpu_offload()
|
1345 |
+
|
1346 |
+
if use_xformers == 1:
|
1347 |
+
|
1348 |
+
refiner.enable_xformers_memory_efficient_attention()
|
1349 |
+
|
1350 |
+
refiner = refiner.to(device)
|
1351 |
+
|
1352 |
+
if use_sequential_cpu_offload_for_refiner == 1:
|
1353 |
+
|
1354 |
+
refiner.enable_sequential_cpu_offload()
|
1355 |
+
|
1356 |
+
if use_default_attn_processor == 1:
|
1357 |
+
|
1358 |
+
refiner.unet.set_default_attn_processor()
|
1359 |
+
|
1360 |
+
if device == "cuda":
|
1361 |
+
torch.cuda.empty_cache()
|
1362 |
+
else:
|
1363 |
+
refiner.unet = torch.compile(
|
1364 |
+
refiner.unet,
|
1365 |
+
mode = "reduce-overhead",
|
1366 |
+
fullgraph = True
|
1367 |
+
)
|
1368 |
+
|
1369 |
+
|
1370 |
+
|
1371 |
+
if use_upscaler == 1:
|
1372 |
+
|
1373 |
+
show_message("Loading upscaler...")
|
1374 |
+
|
1375 |
+
upscaler_kwargs = {
|
1376 |
+
"use_safetensors": True
|
1377 |
+
}
|
1378 |
+
|
1379 |
+
if device == "cuda":
|
1380 |
+
|
1381 |
+
upscaler_kwargs["variant"] = "fp16"
|
1382 |
+
upscaler_kwargs["torch_dtype"] = torch.float16
|
1383 |
+
|
1384 |
+
if use_custom_hugging_face_cache_dir == 1:
|
1385 |
+
|
1386 |
+
upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
|
1387 |
+
|
1388 |
+
upscaler = DiffusionPipeline.from_pretrained(
|
1389 |
+
"stabilityai/sd-x2-latent-upscaler",
|
1390 |
+
**upscaler_kwargs
|
1391 |
+
)
|
1392 |
+
|
1393 |
+
if use_model_cpu_offload_for_upscaler == 1:
|
1394 |
+
|
1395 |
+
upscaler.enable_model_cpu_offload()
|
1396 |
+
|
1397 |
+
if use_xformers == 1:
|
1398 |
+
|
1399 |
+
upscaler.enable_xformers_memory_efficient_attention()
|
1400 |
+
|
1401 |
+
upscaler = upscaler.to(device)
|
1402 |
+
|
1403 |
+
if use_sequential_cpu_offload_for_upscaler == 1:
|
1404 |
+
|
1405 |
+
upscaler.enable_sequential_cpu_offload()
|
1406 |
+
|
1407 |
+
if use_default_attn_processor == 1:
|
1408 |
+
|
1409 |
+
upscaler.unet.set_default_attn_processor()
|
1410 |
+
|
1411 |
+
if device == "cuda":
|
1412 |
+
torch.cuda.empty_cache()
|
1413 |
+
else:
|
1414 |
+
upscaler.unet = torch.compile(
|
1415 |
+
upscaler.unet,
|
1416 |
+
mode = "reduce-overhead",
|
1417 |
+
fullgraph = True
|
1418 |
+
)
|
1419 |
+
|
1420 |
+
|
1421 |
+
|
1422 |
+
if log_generation_times == 1:
|
1423 |
+
|
1424 |
+
start_time = time.time()
|
1425 |
+
|
1426 |
+
|
1427 |
+
|
1428 |
+
# Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is
|
1429 |
+
# truncated to 75. This happens automatically, but we want to tell people
|
1430 |
+
# that
|
1431 |
+
|
1432 |
+
tokenizer = pipe.tokenizer
|
1433 |
+
|
1434 |
+
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
|
1435 |
+
|
1436 |
+
token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
|
1437 |
+
token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
|
1438 |
+
|
1439 |
+
prompt_text_not_used_substring = ""
|
1440 |
+
|
1441 |
+
message_about_prompt_truncation = ""
|
1442 |
+
|
1443 |
+
if token_length_of_prompt_text > max_token_length_of_model:
|
1444 |
+
|
1445 |
+
(
|
1446 |
+
prompt_text,
|
1447 |
+
prompt_text_not_used_substring
|
1448 |
+
) = truncate_prompt(
|
1449 |
+
prompt_text
|
1450 |
+
)
|
1451 |
+
|
1452 |
+
message_about_prompt_truncation += "Your prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + prompt_text_not_used_substring + "</span>"
|
1453 |
+
|
1454 |
+
negative_prompt_text_not_used_substring = ""
|
1455 |
+
|
1456 |
+
if token_length_of_negative_prompt_text > max_token_length_of_model:
|
1457 |
+
|
1458 |
+
(
|
1459 |
+
negative_prompt_text,
|
1460 |
+
negative_prompt_text_not_used_substring
|
1461 |
+
) = truncate_prompt(
|
1462 |
+
negative_prompt_text
|
1463 |
+
)
|
1464 |
+
|
1465 |
+
if len(message_about_prompt_truncation) > 0:
|
1466 |
+
|
1467 |
+
message_about_prompt_truncation += "<br/><br/>"
|
1468 |
+
|
1469 |
+
message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + negative_prompt_text_not_used_substring + "</span>"
|
1470 |
+
|
1471 |
+
prompt_truncated_field_udpate = gr.HTML(
|
1472 |
+
value = "",
|
1473 |
+
visible = False
|
1474 |
+
)
|
1475 |
+
|
1476 |
+
if len(message_about_prompt_truncation) > 0:
|
1477 |
+
|
1478 |
+
prompt_truncated_field_udpate = gr.HTML(
|
1479 |
+
value = "<div style=\"padding: 10px; background: #fff;\"><span style=\"font-weight: bold;\">Note</span>: " + message_about_prompt_truncation + "</div>",
|
1480 |
+
visible = True
|
1481 |
+
)
|
1482 |
+
|
1483 |
+
show_message("Note: Part of your prompt has been truncated automatically because it was too long.")
|
1484 |
+
|
1485 |
+
|
1486 |
+
|
1487 |
+
actual_seed = int(actual_seed)
|
1488 |
+
|
1489 |
+
if actual_seed == 0:
|
1490 |
+
|
1491 |
+
import random
|
1492 |
+
|
1493 |
+
default_seed_maximum_for_random = default_seed_maximum
|
1494 |
+
|
1495 |
+
if default_seed_maximum_for_random > 9007199254740992:
|
1496 |
+
|
1497 |
+
# If above this number, seeds may not be able to be entered into slider properly.
|
1498 |
+
|
1499 |
+
default_seed_maximum_for_random = 9007199254740992
|
1500 |
+
|
1501 |
+
actual_seed = int(random.randrange(1, 10**len(str(default_seed_maximum))))
|
1502 |
+
|
1503 |
+
if seed_not_valid(actual_seed):
|
1504 |
+
|
1505 |
+
raise Exception("Seed is not valid.")
|
1506 |
+
|
1507 |
+
generator = torch.manual_seed(actual_seed)
|
1508 |
+
|
1509 |
+
|
1510 |
+
|
1511 |
+
if model_configuration_name_value.find("default") < 0:
|
1512 |
+
|
1513 |
+
|
1514 |
+
|
1515 |
+
#
|
1516 |
+
#
|
1517 |
+
#
|
1518 |
+
# Attempt To Match Online Configurations
|
1519 |
+
#
|
1520 |
+
#
|
1521 |
+
#
|
1522 |
+
|
1523 |
+
|
1524 |
+
|
1525 |
+
prompt = prompt_text
|
1526 |
+
negative_prompt = negative_prompt_text
|
1527 |
+
width = image_width
|
1528 |
+
height = image_height
|
1529 |
+
scale = guidance_scale
|
1530 |
+
steps = base_model_num_inference_steps
|
1531 |
+
refining = use_refiner
|
1532 |
+
if refining == 1:
|
1533 |
+
refining = "Yes"
|
1534 |
+
upscaling = use_upscaler
|
1535 |
+
if upscaling == 1:
|
1536 |
+
upscaling = "Yes"
|
1537 |
+
|
1538 |
+
prompt_2 = ""
|
1539 |
+
negative_prompt_2 = ""
|
1540 |
+
|
1541 |
+
high_noise_frac = refining_denoise_start_for_online_config_field_value
|
1542 |
+
|
1543 |
+
if (
|
1544 |
+
model_configuration_name_value == "sdxl_2023-11-12" or
|
1545 |
+
model_configuration_name_value == "sdxl_2023-09-05"
|
1546 |
+
):
|
1547 |
+
|
1548 |
+
n_steps = refining_number_of_iterations_for_online_config_field_value
|
1549 |
+
|
1550 |
+
upscaling_num_inference_steps = 15
|
1551 |
+
|
1552 |
+
if model_configuration_name_value == "sdxl_2023-09-05":
|
1553 |
+
|
1554 |
+
upscaling_num_inference_steps = 5
|
1555 |
+
|
1556 |
+
|
1557 |
+
|
1558 |
+
show_message("Initial image creation has begun.");
|
1559 |
+
int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
|
1560 |
+
if upscaling == 'Yes':
|
1561 |
+
show_message("Refining has begun.");
|
1562 |
+
image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps, denoising_start=high_noise_frac).images[0]
|
1563 |
+
show_message("Upscaling has begun.");
|
1564 |
+
|
1565 |
+
# Changed
|
1566 |
+
#
|
1567 |
+
# num_inference_steps=15
|
1568 |
+
#
|
1569 |
+
|
1570 |
+
upscaled = upscaler(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=upscaling_num_inference_steps, guidance_scale=0).images[0]
|
1571 |
+
# torch.cuda.empty_cache()
|
1572 |
+
if device == "cuda":
|
1573 |
+
torch.cuda.empty_cache()
|
1574 |
+
|
1575 |
+
# Changed
|
1576 |
+
#
|
1577 |
+
# return (image, upscaled)
|
1578 |
+
#
|
1579 |
+
|
1580 |
+
image_to_return = upscaled
|
1581 |
+
|
1582 |
+
else:
|
1583 |
+
show_message("Refining has begun.");
|
1584 |
+
image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps ,denoising_start=high_noise_frac).images[0]
|
1585 |
+
# torch.cuda.empty_cache()
|
1586 |
+
if device == "cuda":
|
1587 |
+
torch.cuda.empty_cache()
|
1588 |
+
|
1589 |
+
# Changed
|
1590 |
+
#
|
1591 |
+
# return (image, image)
|
1592 |
+
#
|
1593 |
+
image_to_return = image
|
1594 |
+
|
1595 |
+
|
1596 |
+
|
1597 |
+
elif (
|
1598 |
+
model_configuration_name_value == "photoreal_2023-11-12" or
|
1599 |
+
model_configuration_name_value == "photoreal_2023-09-01"
|
1600 |
+
):
|
1601 |
+
|
1602 |
+
Prompt = prompt
|
1603 |
+
upscale = refining # Not a mistake. This is wrong in code.
|
1604 |
+
|
1605 |
+
|
1606 |
+
|
1607 |
+
if upscale == "Yes":
|
1608 |
+
show_message("Initial image creation has begun.");
|
1609 |
+
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
|
1610 |
+
show_message("Refining has begun.");
|
1611 |
+
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
1612 |
+
else:
|
1613 |
+
show_message("Image creation has begun.");
|
1614 |
+
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
1615 |
+
|
1616 |
+
|
1617 |
+
|
1618 |
+
image_to_return = image
|
1619 |
+
|
1620 |
+
else:
|
1621 |
+
|
1622 |
+
|
1623 |
+
|
1624 |
+
#
|
1625 |
+
#
|
1626 |
+
#
|
1627 |
+
# My Configurations
|
1628 |
+
#
|
1629 |
+
#
|
1630 |
+
#
|
1631 |
+
|
1632 |
+
|
1633 |
+
|
1634 |
+
if use_refiner == 1:
|
1635 |
+
|
1636 |
+
if use_upscaler == 1:
|
1637 |
+
|
1638 |
+
show_message("Will create initial image, then refine and then upscale");
|
1639 |
+
|
1640 |
+
if show_messages_in_command_prompt == 1:
|
1641 |
+
|
1642 |
+
print ("Initial image steps...");
|
1643 |
+
|
1644 |
+
intitial_image = pipe(
|
1645 |
+
prompt = prompt_text,
|
1646 |
+
negative_prompt = negative_prompt_text,
|
1647 |
+
width = image_width,
|
1648 |
+
height = image_height,
|
1649 |
+
num_inference_steps = base_model_num_inference_steps,
|
1650 |
+
guidance_scale = guidance_scale,
|
1651 |
+
num_images_per_prompt = 1,
|
1652 |
+
generator = generator,
|
1653 |
+
#denoising_end = refining_denoise_start_for_default_config,
|
1654 |
+
output_type = which_output_type_before_refiner_and_upscaler
|
1655 |
+
).images
|
1656 |
+
|
1657 |
+
if show_messages_in_command_prompt == 1:
|
1658 |
+
|
1659 |
+
print ("Refiner steps...");
|
1660 |
+
|
1661 |
+
refined_image = refiner(
|
1662 |
+
prompt = prompt_text,
|
1663 |
+
negative_prompt = negative_prompt_text,
|
1664 |
+
image = intitial_image,
|
1665 |
+
num_inference_steps = base_model_num_inference_steps,
|
1666 |
+
denoising_start = refining_denoise_start_for_default_config,
|
1667 |
+
output_type = "pil"
|
1668 |
+
).images
|
1669 |
+
|
1670 |
+
if show_messages_in_command_prompt == 1:
|
1671 |
+
|
1672 |
+
print ("Upscaler steps...");
|
1673 |
+
|
1674 |
+
upscaled_image = upscaler(
|
1675 |
+
prompt = prompt_text,
|
1676 |
+
negative_prompt = negative_prompt_text,
|
1677 |
+
image = refined_image,
|
1678 |
+
num_inference_steps = upscaling_num_inference_steps,
|
1679 |
+
guidance_scale = 0
|
1680 |
+
).images[0]
|
1681 |
+
|
1682 |
+
if device == "cuda":
|
1683 |
+
torch.cuda.empty_cache()
|
1684 |
+
|
1685 |
+
image_to_return = upscaled_image
|
1686 |
+
|
1687 |
+
else:
|
1688 |
+
|
1689 |
+
show_message("Will create initial image and then refine");
|
1690 |
+
|
1691 |
+
if show_messages_in_command_prompt == 1:
|
1692 |
+
|
1693 |
+
print ("Initial image steps...");
|
1694 |
+
|
1695 |
+
intitial_image = pipe(
|
1696 |
+
prompt = prompt_text,
|
1697 |
+
negative_prompt = negative_prompt_text,
|
1698 |
+
width = image_width,
|
1699 |
+
height = image_height,
|
1700 |
+
|
1701 |
+
|
1702 |
+
|
1703 |
+
num_inference_steps = base_model_num_inference_steps,
|
1704 |
+
|
1705 |
+
|
1706 |
+
|
1707 |
+
#testing
|
1708 |
+
# num_inference_steps = 100,
|
1709 |
+
# denoising_end = 0.75,
|
1710 |
+
|
1711 |
+
|
1712 |
+
|
1713 |
+
guidance_scale = guidance_scale,
|
1714 |
+
num_images_per_prompt = 1,
|
1715 |
+
generator = generator,
|
1716 |
+
output_type = which_output_type_before_refiner_and_upscaler
|
1717 |
+
).images
|
1718 |
+
|
1719 |
+
if show_messages_in_command_prompt == 1:
|
1720 |
+
|
1721 |
+
print ("Refiner steps...");
|
1722 |
+
|
1723 |
+
refined_image = refiner(
|
1724 |
+
prompt = prompt_text,
|
1725 |
+
negative_prompt = negative_prompt_text,
|
1726 |
+
image = intitial_image,
|
1727 |
+
|
1728 |
+
|
1729 |
+
|
1730 |
+
#testing
|
1731 |
+
# num_inference_steps = base_model_num_inference_steps,
|
1732 |
+
# denoising_start = refining_denoise_start_for_default_config
|
1733 |
+
|
1734 |
+
|
1735 |
+
|
1736 |
+
num_inference_steps = 60,
|
1737 |
+
denoising_start = 0.25
|
1738 |
+
|
1739 |
+
|
1740 |
+
|
1741 |
+
).images[0]
|
1742 |
+
|
1743 |
+
if device == "cuda":
|
1744 |
+
torch.cuda.empty_cache()
|
1745 |
+
|
1746 |
+
image_to_return = refined_image
|
1747 |
+
|
1748 |
+
else:
|
1749 |
+
|
1750 |
+
if use_upscaler == 1:
|
1751 |
+
|
1752 |
+
show_message("Will create initial image and then upscale");
|
1753 |
+
|
1754 |
+
if show_messages_in_command_prompt == 1:
|
1755 |
+
|
1756 |
+
print ("Initial image steps...");
|
1757 |
+
|
1758 |
+
intitial_image = pipe(
|
1759 |
+
prompt = prompt_text,
|
1760 |
+
negative_prompt = negative_prompt_text,
|
1761 |
+
width = image_width,
|
1762 |
+
height = image_height,
|
1763 |
+
num_inference_steps = base_model_num_inference_steps,
|
1764 |
+
guidance_scale = guidance_scale,
|
1765 |
+
num_images_per_prompt = 1,
|
1766 |
+
generator = generator,
|
1767 |
+
# output_type = which_output_type_before_refiner_and_upscaler
|
1768 |
+
output_type = "pil"
|
1769 |
+
).images
|
1770 |
+
|
1771 |
+
if show_messages_in_command_prompt == 1:
|
1772 |
+
|
1773 |
+
print ("Upscaler steps...");
|
1774 |
+
|
1775 |
+
upscaled_image = upscaler(
|
1776 |
+
prompt = prompt_text,
|
1777 |
+
negative_prompt = negative_prompt_text,
|
1778 |
+
image = intitial_image,
|
1779 |
+
num_inference_steps = upscaling_num_inference_steps,
|
1780 |
+
guidance_scale = 0
|
1781 |
+
).images[0]
|
1782 |
+
|
1783 |
+
if device == "cuda":
|
1784 |
+
torch.cuda.empty_cache()
|
1785 |
+
|
1786 |
+
image_to_return = upscaled_image
|
1787 |
+
|
1788 |
+
else:
|
1789 |
+
|
1790 |
+
show_message("Will create image (no refining or upscaling)");
|
1791 |
+
|
1792 |
+
if show_messages_in_command_prompt == 1:
|
1793 |
+
|
1794 |
+
print ("Image steps...");
|
1795 |
+
|
1796 |
+
image = pipe(
|
1797 |
+
prompt = prompt_text,
|
1798 |
+
negative_prompt = negative_prompt_text,
|
1799 |
+
width = image_width,
|
1800 |
+
height = image_height,
|
1801 |
+
num_inference_steps = base_model_num_inference_steps,
|
1802 |
+
guidance_scale = guidance_scale,
|
1803 |
+
num_images_per_prompt = 1,
|
1804 |
+
generator = generator
|
1805 |
+
).images[0]
|
1806 |
+
|
1807 |
+
if device == "cuda":
|
1808 |
+
torch.cuda.empty_cache()
|
1809 |
+
|
1810 |
+
image_to_return = image
|
1811 |
+
|
1812 |
+
|
1813 |
+
|
1814 |
+
#
|
1815 |
+
#
|
1816 |
+
#
|
1817 |
+
# Prompt Information
|
1818 |
+
#
|
1819 |
+
#
|
1820 |
+
#
|
1821 |
+
|
1822 |
+
nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")"
|
1823 |
+
|
1824 |
+
info_about_prompt_lines_array = [
|
1825 |
+
"Prompt:\n" + prompt_text
|
1826 |
+
]
|
1827 |
+
|
1828 |
+
if len(negative_prompt_text) > 0:
|
1829 |
+
|
1830 |
+
info_about_prompt_lines_array.extend([
|
1831 |
+
"Negative Prompt:\n" + negative_prompt_text
|
1832 |
+
])
|
1833 |
+
|
1834 |
+
dimensions_title = "Dimensions"
|
1835 |
+
|
1836 |
+
if use_upscaler == 1:
|
1837 |
+
|
1838 |
+
dimensions_title = "Original Dimensions"
|
1839 |
+
|
1840 |
+
info_about_prompt_lines_array.extend([
|
1841 |
+
dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px"
|
1842 |
+
])
|
1843 |
+
|
1844 |
+
if use_upscaler == 1:
|
1845 |
+
|
1846 |
+
upscaled_image_width = int(image_width * 2)
|
1847 |
+
upscaled_image_height = int(image_height * 2)
|
1848 |
+
|
1849 |
+
info_about_prompt_lines_array.extend([
|
1850 |
+
"Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px"
|
1851 |
+
])
|
1852 |
+
|
1853 |
+
info_about_prompt_lines_array.extend([
|
1854 |
+
"Seed: " + str(actual_seed)
|
1855 |
+
])
|
1856 |
+
|
1857 |
+
if int(guidance_scale) > 0:
|
1858 |
+
|
1859 |
+
info_about_prompt_lines_array.extend([
|
1860 |
+
"Guidance Scale: " + str(guidance_scale)
|
1861 |
+
])
|
1862 |
+
|
1863 |
+
info_about_prompt_lines_array.extend([
|
1864 |
+
"Base Model Steps: " + str(base_model_num_inference_steps),
|
1865 |
+
"Model: " + nice_model_name
|
1866 |
+
])
|
1867 |
+
|
1868 |
+
if use_refiner == 1:
|
1869 |
+
|
1870 |
+
# Default Configuration
|
1871 |
+
|
1872 |
+
|
1873 |
+
|
1874 |
+
|
1875 |
+
|
1876 |
+
|
1877 |
+
|
1878 |
+
|
1879 |
+
|
1880 |
+
# not done yet
|
1881 |
+
|
1882 |
+
|
1883 |
+
|
1884 |
+
|
1885 |
+
|
1886 |
+
|
1887 |
+
|
1888 |
+
|
1889 |
+
|
1890 |
+
|
1891 |
+
# Online Configuration
|
1892 |
+
|
1893 |
+
if refining_denoise_start_for_online_config_field_value != 0:
|
1894 |
+
|
1895 |
+
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value * 100) + "%"
|
1896 |
+
|
1897 |
+
info_about_prompt_lines_array.extend([
|
1898 |
+
"Refiner?: Yes",
|
1899 |
+
"Refiner denoise start %: " + nice_refiner_denoise_start
|
1900 |
+
])
|
1901 |
+
|
1902 |
+
if int(refining_number_of_iterations_for_online_config_field_value) != 0:
|
1903 |
+
|
1904 |
+
nice_refiner_number_of_iterations = str(refining_number_of_iterations_for_online_config_field_value)
|
1905 |
+
|
1906 |
+
info_about_prompt_lines_array.extend([
|
1907 |
+
"Refiner number of iterations: " + nice_refiner_number_of_iterations
|
1908 |
+
])
|
1909 |
+
|
1910 |
+
if use_upscaler == 1:
|
1911 |
+
|
1912 |
+
info_about_prompt_lines_array.extend([
|
1913 |
+
"Upscaled (2x)? Yes",
|
1914 |
+
"Refiner Steps: " + str(upscaling_num_inference_steps)
|
1915 |
+
])
|
1916 |
+
|
1917 |
+
if log_generation_times == 1:
|
1918 |
+
|
1919 |
+
end_time = time.time()
|
1920 |
+
|
1921 |
+
generation_time_in_seconds = (end_time - start_time)
|
1922 |
+
|
1923 |
+
(
|
1924 |
+
generation_partial_hours,
|
1925 |
+
generation_partial_minutes,
|
1926 |
+
generation_partial_seconds
|
1927 |
+
) = convert_seconds(generation_time_in_seconds)
|
1928 |
+
|
1929 |
+
if generation_partial_hours > 0:
|
1930 |
+
|
1931 |
+
hours_text = "hr"
|
1932 |
+
|
1933 |
+
if generation_partial_hours > 1:
|
1934 |
+
|
1935 |
+
hours_text = "hrs"
|
1936 |
+
|
1937 |
+
nice_generation_time = str(int(generation_partial_hours)) + " " + hours_text + ". " + str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
|
1938 |
+
|
1939 |
+
elif generation_partial_minutes > 0:
|
1940 |
+
|
1941 |
+
nice_generation_time = str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
|
1942 |
+
|
1943 |
+
else:
|
1944 |
+
|
1945 |
+
nice_generation_time = str(round(generation_time_in_seconds, 2)) + " sec."
|
1946 |
+
|
1947 |
+
info_about_prompt_lines_array.extend([
|
1948 |
+
"Time: " + nice_generation_time
|
1949 |
+
])
|
1950 |
+
|
1951 |
+
|
1952 |
+
|
1953 |
+
if len(prompt_text_not_used_substring) > 0:
|
1954 |
+
|
1955 |
+
info_about_prompt_lines_array.extend([
|
1956 |
+
"End of Prompt Truncated: " + prompt_text_not_used_substring
|
1957 |
+
])
|
1958 |
+
|
1959 |
+
if len(negative_prompt_text_not_used_substring) > 0:
|
1960 |
+
|
1961 |
+
info_about_prompt_lines_array.extend([
|
1962 |
+
"End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring
|
1963 |
+
])
|
1964 |
+
|
1965 |
+
|
1966 |
+
|
1967 |
+
if display_xformers_usage_in_prompt_info > 0:
|
1968 |
+
|
1969 |
+
nice_xformers_usage = "No"
|
1970 |
+
|
1971 |
+
if use_xformers == 1:
|
1972 |
+
|
1973 |
+
nice_xformers_usage = "Yes"
|
1974 |
+
|
1975 |
+
if include_transformers_version_in_prompt_info == 1:
|
1976 |
+
|
1977 |
+
import transformers
|
1978 |
+
|
1979 |
+
nice_xformers_usage += " (version " + str(transformers.__version__) + ")"
|
1980 |
+
|
1981 |
+
info_about_prompt_lines_array.extend([
|
1982 |
+
"xFormers Used?: " + nice_xformers_usage
|
1983 |
+
])
|
1984 |
+
|
1985 |
+
if display_default_attn_processor_usage_in_prompt_info > 0:
|
1986 |
+
|
1987 |
+
nice_default_attn_processor_usage = "No"
|
1988 |
+
|
1989 |
+
if use_default_attn_processor == 1:
|
1990 |
+
|
1991 |
+
nice_default_attn_processor_usage = "Yes"
|
1992 |
+
|
1993 |
+
info_about_prompt_lines_array.extend([
|
1994 |
+
"Default AttnProcessor Used?: " + nice_default_attn_processor_usage
|
1995 |
+
])
|
1996 |
+
|
1997 |
+
|
1998 |
+
|
1999 |
+
info_about_prompt = '\n'.join(info_about_prompt_lines_array)
|
2000 |
+
|
2001 |
+
|
2002 |
+
|
2003 |
+
if auto_save_imagery == 1:
|
2004 |
+
|
2005 |
+
|
2006 |
+
|
2007 |
+
if not os.path.exists(saved_images_dir):
|
2008 |
+
os.makedirs(saved_images_dir)
|
2009 |
+
|
2010 |
+
yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d')
|
2011 |
+
|
2012 |
+
saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/"
|
2013 |
+
|
2014 |
+
if not os.path.exists(saved_images_date_dir):
|
2015 |
+
os.makedirs(saved_images_date_dir)
|
2016 |
+
|
2017 |
+
image_count = 1
|
2018 |
+
|
2019 |
+
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count)
|
2020 |
+
|
2021 |
+
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png"
|
2022 |
+
|
2023 |
+
while os.path.exists(saved_image_path_and_file):
|
2024 |
+
|
2025 |
+
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count)
|
2026 |
+
|
2027 |
+
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png"
|
2028 |
+
|
2029 |
+
image_count += 1
|
2030 |
+
|
2031 |
+
image_to_return_file = image_to_return.save(saved_image_path_and_file)
|
2032 |
+
|
2033 |
+
saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt"
|
2034 |
+
|
2035 |
+
prompt_info_file_handle = open(saved_text_file_path_and_file, "w")
|
2036 |
+
prompt_info_file_handle.writelines(info_about_prompt)
|
2037 |
+
prompt_info_file_handle.close()
|
2038 |
+
|
2039 |
+
|
2040 |
+
|
2041 |
+
if use_image_gallery == 1:
|
2042 |
+
|
2043 |
+
image_gallery_array.insert(0, image_to_return)
|
2044 |
+
prompt_information_array.insert(0, info_about_prompt)
|
2045 |
+
|
2046 |
+
output_image_field_update = gr.Gallery(
|
2047 |
+
value = image_gallery_array,
|
2048 |
+
selected_index = 0
|
2049 |
+
)
|
2050 |
+
|
2051 |
+
else:
|
2052 |
+
|
2053 |
+
output_image_field_update = gr.Image(
|
2054 |
+
value = image_to_return
|
2055 |
+
)
|
2056 |
+
|
2057 |
+
|
2058 |
+
|
2059 |
+
if show_messages_in_command_prompt == 1:
|
2060 |
+
|
2061 |
+
print ("Image created.")
|
2062 |
+
|
2063 |
+
|
2064 |
+
|
2065 |
+
return {
|
2066 |
+
output_image_field: output_image_field_update,
|
2067 |
+
output_text_field: info_about_prompt,
|
2068 |
+
prompt_truncated_field: prompt_truncated_field_udpate
|
2069 |
+
}
|
2070 |
+
|
2071 |
+
|
2072 |
+
|
2073 |
+
|
2074 |
+
|
2075 |
+
|
2076 |
+
|
2077 |
+
#####################
|
2078 |
+
#
|
2079 |
+
# Cancel Image Processing
|
2080 |
+
#
|
2081 |
+
# When running on Windows, this is an attempt at closing the command
|
2082 |
+
# prompt from the web interface. It's really not worth having this. You
|
2083 |
+
# can just close the prompt. I would like a nice way to cancel image
|
2084 |
+
# creation, but couldn't figure that out.
|
2085 |
+
#
|
2086 |
+
#####################
|
2087 |
+
|
2088 |
+
def cancel_image_processing():
|
2089 |
+
|
2090 |
+
# I simply don't know how to stop the image generation without closing
|
2091 |
+
# the command prompt. Doing that requires the code below twice for some
|
2092 |
+
# reason.
|
2093 |
+
#
|
2094 |
+
# Method:
|
2095 |
+
# https://stackoverflow.com/questions/67146623/how-to-close-the-command-prompt-from-python-script-directly
|
2096 |
+
|
2097 |
+
gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.")
|
2098 |
+
|
2099 |
+
os.system('title kill_window')
|
2100 |
+
|
2101 |
+
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"')
|
2102 |
+
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"')
|
2103 |
+
|
2104 |
+
|
2105 |
+
|
2106 |
+
|
2107 |
+
|
2108 |
+
|
2109 |
+
|
2110 |
+
#####################
|
2111 |
+
#
|
2112 |
+
# Base Model Field Update Function
|
2113 |
+
#
|
2114 |
+
# When the base model dropdown changes, this function is run.
|
2115 |
+
#
|
2116 |
+
#####################
|
2117 |
+
|
2118 |
+
def base_model_field_update_function(
|
2119 |
+
base_model_field_index
|
2120 |
+
):
|
2121 |
+
|
2122 |
+
base_model_field_value = base_model_array[base_model_field_index]
|
2123 |
+
|
2124 |
+
if base_model_field_value in base_model_array:
|
2125 |
+
|
2126 |
+
if base_model_field_value in base_model_object_of_model_configuration_arrays:
|
2127 |
+
|
2128 |
+
model_configuration_choices_array_update = []
|
2129 |
+
|
2130 |
+
for this_model_configuration in base_model_object_of_model_configuration_arrays[base_model_field_value]:
|
2131 |
+
|
2132 |
+
model_configuration_choices_array_update.append(
|
2133 |
+
model_configuration_names_object[this_model_configuration]
|
2134 |
+
)
|
2135 |
+
|
2136 |
+
if base_model_field_value in base_model_model_configuration_defaults_object:
|
2137 |
+
|
2138 |
+
model_configuration_field_selected_value = stored_model_configuration_names_object[base_model_field_value]
|
2139 |
+
|
2140 |
+
model_configuration_field_update = gr.Dropdown(
|
2141 |
+
choices = model_configuration_choices_array_update,
|
2142 |
+
value = model_configuration_field_selected_value
|
2143 |
+
)
|
2144 |
+
|
2145 |
+
negative_prompt_field_visibility = True
|
2146 |
+
negative_prompt_for_sdxl_turbo_field_visibility = False
|
2147 |
+
base_model_num_inference_steps_field_visibility = True
|
2148 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
|
2149 |
+
guidance_scale_field_visibility = True
|
2150 |
+
guidance_scale_for_sdxl_turbo_field_visibility = False
|
2151 |
+
|
2152 |
+
if base_model_field_value == "sdxl_turbo":
|
2153 |
+
|
2154 |
+
negative_prompt_field_visibility = False
|
2155 |
+
negative_prompt_for_sdxl_turbo_field_visibility = True
|
2156 |
+
base_model_num_inference_steps_field_visibility = False
|
2157 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
|
2158 |
+
guidance_scale_field_visibility = False
|
2159 |
+
guidance_scale_for_sdxl_turbo_field_visibility = True
|
2160 |
+
|
2161 |
+
negative_prompt_field_update = gr.Textbox(
|
2162 |
+
visible = negative_prompt_field_visibility
|
2163 |
+
)
|
2164 |
+
|
2165 |
+
negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
|
2166 |
+
visible = negative_prompt_for_sdxl_turbo_field_visibility
|
2167 |
+
)
|
2168 |
+
|
2169 |
+
base_model_num_inference_steps_field_update = gr.Slider(
|
2170 |
+
visible = base_model_num_inference_steps_field_visibility
|
2171 |
+
)
|
2172 |
+
|
2173 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
|
2174 |
+
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
|
2175 |
+
)
|
2176 |
+
|
2177 |
+
guidance_scale_field_update = gr.Slider(
|
2178 |
+
visible = guidance_scale_field_visibility
|
2179 |
+
)
|
2180 |
+
|
2181 |
+
guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
|
2182 |
+
visible = guidance_scale_for_sdxl_turbo_field_visibility
|
2183 |
+
)
|
2184 |
+
|
2185 |
+
return {
|
2186 |
+
model_configuration_field: model_configuration_field_update,
|
2187 |
+
negative_prompt_field: negative_prompt_field_update,
|
2188 |
+
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
|
2189 |
+
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
|
2190 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
|
2191 |
+
guidance_scale_field: guidance_scale_field_update,
|
2192 |
+
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update
|
2193 |
+
|
2194 |
+
}
|
2195 |
+
|
2196 |
+
error_function("Error")
|
2197 |
+
|
2198 |
+
|
2199 |
+
|
2200 |
+
|
2201 |
+
|
2202 |
+
|
2203 |
+
|
2204 |
+
#####################
|
2205 |
+
#
|
2206 |
+
# Model Configuration Field Update Function
|
2207 |
+
#
|
2208 |
+
# When the model configuration dropdown changes, this function is run.
|
2209 |
+
#
|
2210 |
+
#####################
|
2211 |
+
|
2212 |
+
def model_configuration_field_update_function(
|
2213 |
+
base_model_field_index,
|
2214 |
+
model_configuration_field_index
|
2215 |
+
):
|
2216 |
+
|
2217 |
+
base_model_field_value = base_model_array[base_model_field_index]
|
2218 |
+
|
2219 |
+
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
|
2220 |
+
|
2221 |
+
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
|
2222 |
+
|
2223 |
+
|
2224 |
+
|
2225 |
+
stored_model_configuration_names_object[base_model_field_value] = model_configuration_names_object[model_configuration_name_value]
|
2226 |
+
|
2227 |
+
|
2228 |
+
|
2229 |
+
is_config_state = 0
|
2230 |
+
|
2231 |
+
if model_configuration_name_value in default_model_configuration_object:
|
2232 |
+
|
2233 |
+
is_config_state = 1
|
2234 |
+
|
2235 |
+
negative_prompt_field_visibility = True
|
2236 |
+
negative_prompt_for_sdxl_turbo_field_visibility = False
|
2237 |
+
base_model_num_inference_steps_field_visibility = True
|
2238 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
|
2239 |
+
guidance_scale_field_visibility = True
|
2240 |
+
guidance_scale_for_sdxl_turbo_field_visibility = False
|
2241 |
+
|
2242 |
+
if base_model_field_value == "sdxl_turbo":
|
2243 |
+
|
2244 |
+
negative_prompt_field_visibility = False
|
2245 |
+
negative_prompt_for_sdxl_turbo_field_visibility = True
|
2246 |
+
base_model_num_inference_steps_field_visibility = False
|
2247 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
|
2248 |
+
guidance_scale_field_visibility = False
|
2249 |
+
guidance_scale_for_sdxl_turbo_field_visibility = True
|
2250 |
+
|
2251 |
+
negative_prompt_field_update = gr.Textbox(
|
2252 |
+
visible = negative_prompt_field_visibility
|
2253 |
+
)
|
2254 |
+
|
2255 |
+
negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
|
2256 |
+
visible = negative_prompt_for_sdxl_turbo_field_visibility
|
2257 |
+
)
|
2258 |
+
|
2259 |
+
base_model_num_inference_steps_field_update = gr.Slider(
|
2260 |
+
visible = base_model_num_inference_steps_field_visibility
|
2261 |
+
)
|
2262 |
+
|
2263 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
|
2264 |
+
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
|
2265 |
+
)
|
2266 |
+
|
2267 |
+
guidance_scale_field_update = gr.Slider(
|
2268 |
+
visible = guidance_scale_field_visibility
|
2269 |
+
)
|
2270 |
+
|
2271 |
+
guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
|
2272 |
+
visible = guidance_scale_for_sdxl_turbo_field_visibility
|
2273 |
+
)
|
2274 |
+
|
2275 |
+
|
2276 |
+
|
2277 |
+
refiner_default_config_accordion_visibility = False
|
2278 |
+
refiner_online_config_accordion_visibility = True
|
2279 |
+
|
2280 |
+
if is_config_state == 1:
|
2281 |
+
|
2282 |
+
refiner_default_config_accordion_visibility = True
|
2283 |
+
refiner_online_config_accordion_visibility = False
|
2284 |
+
|
2285 |
+
|
2286 |
+
|
2287 |
+
refining_selection_automatically_selected_message_field_visibility = False
|
2288 |
+
|
2289 |
+
refining_selection_online_config_normal_field_visibility = True
|
2290 |
+
refining_selection_online_config_automatically_selected_field_visibility = False
|
2291 |
+
|
2292 |
+
if model_configuration_name_value in model_configuration_force_refiner_object:
|
2293 |
+
|
2294 |
+
refining_selection_automatically_selected_message_field_visibility = True
|
2295 |
+
|
2296 |
+
refining_selection_online_config_normal_field_visibility = False
|
2297 |
+
refining_selection_online_config_automatically_selected_field_visibility = True
|
2298 |
+
|
2299 |
+
|
2300 |
+
|
2301 |
+
refiner_default_config_accordion_update = gr.Accordion(
|
2302 |
+
visible = refiner_default_config_accordion_visibility
|
2303 |
+
)
|
2304 |
+
|
2305 |
+
refiner_online_config_accordion_update = gr.Accordion(
|
2306 |
+
visible = refiner_online_config_accordion_visibility
|
2307 |
+
)
|
2308 |
+
|
2309 |
+
refining_selection_automatically_selected_message_field_update = gr.Markdown(
|
2310 |
+
visible = refining_selection_automatically_selected_message_field_visibility
|
2311 |
+
)
|
2312 |
+
|
2313 |
+
refining_selection_online_config_normal_field_update = gr.Radio(
|
2314 |
+
visible = refining_selection_online_config_normal_field_visibility
|
2315 |
+
)
|
2316 |
+
|
2317 |
+
refining_selection_online_config_automatically_selected_field_update = gr.Radio(
|
2318 |
+
visible = refining_selection_online_config_automatically_selected_field_visibility
|
2319 |
+
)
|
2320 |
+
|
2321 |
+
|
2322 |
+
|
2323 |
+
return {
|
2324 |
+
negative_prompt_field: negative_prompt_field_update,
|
2325 |
+
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
|
2326 |
+
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
|
2327 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
|
2328 |
+
guidance_scale_field: guidance_scale_field_update,
|
2329 |
+
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update,
|
2330 |
+
|
2331 |
+
refiner_default_config_accordion: refiner_default_config_accordion_update,
|
2332 |
+
refiner_online_config_accordion: refiner_online_config_accordion_update,
|
2333 |
+
refining_selection_automatically_selected_message_field: refining_selection_automatically_selected_message_field_update,
|
2334 |
+
refining_selection_online_config_normal_field: refining_selection_online_config_normal_field_update,
|
2335 |
+
refining_selection_online_config_automatically_selected_field: refining_selection_online_config_automatically_selected_field_update
|
2336 |
+
|
2337 |
+
}
|
2338 |
+
|
2339 |
+
error_function("Error")
|
2340 |
+
|
2341 |
+
|
2342 |
+
|
2343 |
+
|
2344 |
+
|
2345 |
+
|
2346 |
+
|
2347 |
+
#####################
|
2348 |
+
#
|
2349 |
+
# Update Refiner and Upscaler Status Function
|
2350 |
+
#
|
2351 |
+
# When the refiner or upscaler is turned on or off, a text message is
|
2352 |
+
# printed on the page. That needs to be updated.
|
2353 |
+
#
|
2354 |
+
#####################
|
2355 |
+
|
2356 |
+
def update_refiner_and_upscaler_status_function(
|
2357 |
+
base_model_field_index,
|
2358 |
+
model_configuration_field_index,
|
2359 |
+
refining_selection_default_config_field_value,
|
2360 |
+
refining_selection_online_config_normal_field_value,
|
2361 |
+
refining_selection_online_config_automatically_selected_field_value,
|
2362 |
+
upscaling_selection_field_value
|
2363 |
+
):
|
2364 |
+
|
2365 |
+
base_model_field_value = base_model_array[base_model_field_index]
|
2366 |
+
|
2367 |
+
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
|
2368 |
+
|
2369 |
+
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
|
2370 |
+
|
2371 |
+
is_config_state = 0
|
2372 |
+
|
2373 |
+
if model_configuration_name_value in default_model_configuration_object:
|
2374 |
+
|
2375 |
+
is_config_state = 1
|
2376 |
+
|
2377 |
+
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value)
|
2378 |
+
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
|
2379 |
+
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
|
2380 |
+
upscaling_selection_field_value = numerical_bool(upscaling_selection_field_value)
|
2381 |
+
|
2382 |
+
refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
|
2383 |
+
|
2384 |
+
if (
|
2385 |
+
(
|
2386 |
+
(is_config_state == 1) and
|
2387 |
+
refining_selection_online_config_normal_field_value
|
2388 |
+
) or (
|
2389 |
+
(is_config_state != 1) and
|
2390 |
+
refining_selection_online_config_automatically_selected_field_value
|
2391 |
+
)
|
2392 |
+
):
|
2393 |
+
|
2394 |
+
refiner_and_upscaler_status_text += refiner_on_text
|
2395 |
+
|
2396 |
+
else:
|
2397 |
+
|
2398 |
+
refiner_and_upscaler_status_text += refiner_off_text
|
2399 |
+
|
2400 |
+
if upscaling_selection_field_value == 1:
|
2401 |
+
|
2402 |
+
refiner_and_upscaler_status_text += upscaler_on_text
|
2403 |
+
|
2404 |
+
else:
|
2405 |
+
|
2406 |
+
refiner_and_upscaler_status_text += upscaler_off_text
|
2407 |
+
|
2408 |
+
refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
|
2409 |
+
|
2410 |
+
refiner_and_upscaler_text_field_update = gr.HTML(
|
2411 |
+
value = refiner_and_upscaler_status_text
|
2412 |
+
)
|
2413 |
+
|
2414 |
+
return {
|
2415 |
+
refiner_and_upscaler_text_field: refiner_and_upscaler_text_field_update
|
2416 |
+
}
|
2417 |
+
|
2418 |
+
error_function("Error")
|
2419 |
+
|
2420 |
+
|
2421 |
+
|
2422 |
+
|
2423 |
+
|
2424 |
+
|
2425 |
+
|
2426 |
+
###############################################################################
|
2427 |
+
###############################################################################
|
2428 |
+
#
|
2429 |
+
#
|
2430 |
+
#
|
2431 |
+
#
|
2432 |
+
#
|
2433 |
+
#
|
2434 |
+
# Create Web Display
|
2435 |
+
#
|
2436 |
+
#
|
2437 |
+
#
|
2438 |
+
#
|
2439 |
+
#
|
2440 |
+
#
|
2441 |
+
###############################################################################
|
2442 |
+
###############################################################################
|
2443 |
+
|
2444 |
+
|
2445 |
+
|
2446 |
+
# Hide border when yield is used:
|
2447 |
+
# https://github.com/gradio-app/gradio/issues/5479
|
2448 |
+
# .generating {border: none !important;}
|
2449 |
+
|
2450 |
+
with gr.Blocks(
|
2451 |
+
title = "AI Image Creation",
|
2452 |
+
css = "footer{display:none !important}",
|
2453 |
+
theme = gr.themes.Default(
|
2454 |
+
spacing_size = gr.themes.sizes.spacing_md,
|
2455 |
+
# spacing_size = gr.themes.sizes.spacing_sm,
|
2456 |
+
radius_size = gr.themes.sizes.radius_none
|
2457 |
+
)
|
2458 |
+
) as sd_interface:
|
2459 |
+
|
2460 |
+
gr.Markdown(opening_html)
|
2461 |
+
|
2462 |
+
with gr.Row():
|
2463 |
+
|
2464 |
+
with gr.Column(scale = 1):
|
2465 |
+
|
2466 |
+
generate_image_btn = gr.Button(
|
2467 |
+
value = "Generate",
|
2468 |
+
variant = "primary"
|
2469 |
+
)
|
2470 |
+
|
2471 |
+
with gr.Group():
|
2472 |
+
|
2473 |
+
with gr.Row():
|
2474 |
+
|
2475 |
+
prompt_field = gr.Textbox(
|
2476 |
+
label = "Prompt (77 token limit):",
|
2477 |
+
value = default_prompt
|
2478 |
+
)
|
2479 |
+
|
2480 |
+
with gr.Row():
|
2481 |
+
|
2482 |
+
negative_prompt_field = gr.Textbox(
|
2483 |
+
label = "Negative Prompt (77 token limit):",
|
2484 |
+
value = default_negative_prompt,
|
2485 |
+
visible = default_negative_prompt_field_visibility
|
2486 |
+
)
|
2487 |
+
|
2488 |
+
with gr.Row():
|
2489 |
+
|
2490 |
+
negative_prompt_for_sdxl_turbo_field = gr.HTML(
|
2491 |
+
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Negative prompt is not used for SDXL Turbo.</div>",
|
2492 |
+
visible = default_negative_prompt_for_sdxl_turbo_field_visibility
|
2493 |
+
)
|
2494 |
+
|
2495 |
+
with gr.Group(
|
2496 |
+
visible = refiner_group_visible
|
2497 |
+
):
|
2498 |
+
|
2499 |
+
with gr.Accordion(
|
2500 |
+
label = "Refiner (Default Config)",
|
2501 |
+
elem_id = "refiner_default_config_accordion_id",
|
2502 |
+
open = refiner_default_config_accordion_open,
|
2503 |
+
visible = refiner_default_config_accordion_visible
|
2504 |
+
) as refiner_default_config_accordion:
|
2505 |
+
|
2506 |
+
#
|
2507 |
+
#
|
2508 |
+
#
|
2509 |
+
# Refiner (Default Config)
|
2510 |
+
#
|
2511 |
+
#
|
2512 |
+
#
|
2513 |
+
|
2514 |
+
with gr.Row():
|
2515 |
+
|
2516 |
+
gr.Markdown("This can be used if the image has too much noise.")
|
2517 |
+
|
2518 |
+
with gr.Row():
|
2519 |
+
|
2520 |
+
refining_selection_default_config_field = gr.Radio(
|
2521 |
+
choices = ["Yes", "No"],
|
2522 |
+
value = default_refine_option,
|
2523 |
+
show_label = False,
|
2524 |
+
container = False
|
2525 |
+
)
|
2526 |
+
|
2527 |
+
with gr.Row():
|
2528 |
+
|
2529 |
+
refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox(
|
2530 |
+
label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner (doesn't work yet)",
|
2531 |
+
value = default_use_denoising_start_in_base_model_when_using_refiner,
|
2532 |
+
# interactive = True,
|
2533 |
+
container = True
|
2534 |
+
)
|
2535 |
+
|
2536 |
+
with gr.Row():
|
2537 |
+
|
2538 |
+
refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox(
|
2539 |
+
label = "Base model output in latent space instead of PIL image when using refiner (doesn't work yet)",
|
2540 |
+
value = default_base_model_output_to_refiner_is_in_latent_space,
|
2541 |
+
# interactive = True,
|
2542 |
+
container = True
|
2543 |
+
)
|
2544 |
+
|
2545 |
+
with gr.Row():
|
2546 |
+
|
2547 |
+
refining_denoise_start_for_default_config_field = gr.Slider(
|
2548 |
+
label = "Refiner denoise start %",
|
2549 |
+
minimum = 0.7,
|
2550 |
+
maximum = 0.99,
|
2551 |
+
value = 0.95,
|
2552 |
+
step = 0.01
|
2553 |
+
)
|
2554 |
+
|
2555 |
+
# with gr.Row():
|
2556 |
+
|
2557 |
+
# refining_number_of_iterations_for_default_config_field = gr.Slider(
|
2558 |
+
# label = "Refiner number of iterations",
|
2559 |
+
# minimum = 1,
|
2560 |
+
# maximum = 100,
|
2561 |
+
# value = 100,
|
2562 |
+
# step = 1
|
2563 |
+
# )
|
2564 |
+
|
2565 |
+
with gr.Accordion(
|
2566 |
+
label = "Refiner (Online Config)",
|
2567 |
+
elem_id = "refiner_online_config_accordion_id",
|
2568 |
+
open = refiner_online_config_accordion_open,
|
2569 |
+
visible = refiner_online_config_accordion_visible
|
2570 |
+
) as refiner_online_config_accordion:
|
2571 |
+
|
2572 |
+
#
|
2573 |
+
#
|
2574 |
+
#
|
2575 |
+
# Refiner (Online Config)
|
2576 |
+
#
|
2577 |
+
#
|
2578 |
+
#
|
2579 |
+
|
2580 |
+
refining_selection_automatically_selected_message_field_visible = False
|
2581 |
+
|
2582 |
+
refining_selection_online_config_normal_field_visible = True
|
2583 |
+
refining_selection_online_config_automatically_selected_field_visible = False
|
2584 |
+
|
2585 |
+
if model_configuration_requires_refiner == 1:
|
2586 |
+
|
2587 |
+
refining_selection_automatically_selected_message_field_visible = True
|
2588 |
+
|
2589 |
+
refining_selection_online_config_normal_field_visible = False
|
2590 |
+
refining_selection_online_config_automatically_selected_field_visible = True
|
2591 |
+
|
2592 |
+
with gr.Row():
|
2593 |
+
|
2594 |
+
refining_selection_automatically_selected_message_field = gr.Markdown(
|
2595 |
+
value = "The online configuration you selected automatically uses the refiner.",
|
2596 |
+
visible = refining_selection_automatically_selected_message_field_visible
|
2597 |
+
)
|
2598 |
+
|
2599 |
+
with gr.Row():
|
2600 |
+
|
2601 |
+
refining_selection_online_config_normal_field = gr.Radio(
|
2602 |
+
choices = ["Yes", "No"],
|
2603 |
+
value = default_refine_option,
|
2604 |
+
show_label = False,
|
2605 |
+
container = False,
|
2606 |
+
visible = refining_selection_online_config_normal_field_visible
|
2607 |
+
)
|
2608 |
+
|
2609 |
+
with gr.Row():
|
2610 |
+
|
2611 |
+
refining_selection_online_config_automatically_selected_field = gr.Radio(
|
2612 |
+
choices = ["Yes"],
|
2613 |
+
value = "Yes",
|
2614 |
+
show_label = False,
|
2615 |
+
container = False,
|
2616 |
+
visible = refining_selection_online_config_automatically_selected_field_visible
|
2617 |
+
)
|
2618 |
+
|
2619 |
+
with gr.Row():
|
2620 |
+
|
2621 |
+
refining_denoise_start_for_online_config_field = gr.Slider(
|
2622 |
+
label = "Refiner denoise start %",
|
2623 |
+
minimum = 0.7,
|
2624 |
+
maximum = 0.99,
|
2625 |
+
value = 0.95,
|
2626 |
+
step = 0.01
|
2627 |
+
)
|
2628 |
+
|
2629 |
+
with gr.Row():
|
2630 |
+
|
2631 |
+
refining_number_of_iterations_for_online_config_field = gr.Slider(
|
2632 |
+
label = "Refiner number of iterations",
|
2633 |
+
minimum = 1,
|
2634 |
+
maximum = 100,
|
2635 |
+
value = 100,
|
2636 |
+
step = 1
|
2637 |
+
)
|
2638 |
+
|
2639 |
+
with gr.Group(
|
2640 |
+
visible = upscaler_group_visible
|
2641 |
+
):
|
2642 |
+
|
2643 |
+
with gr.Accordion(
|
2644 |
+
label = "Upscaler",
|
2645 |
+
elem_id = "upscaler_accordion_id",
|
2646 |
+
open = upscaler_accordion_open,
|
2647 |
+
visible = upscaler_group_visible
|
2648 |
+
):
|
2649 |
+
|
2650 |
+
#
|
2651 |
+
#
|
2652 |
+
#
|
2653 |
+
# Upscaler
|
2654 |
+
#
|
2655 |
+
#
|
2656 |
+
#
|
2657 |
+
|
2658 |
+
with gr.Row():
|
2659 |
+
|
2660 |
+
gr.Markdown("Upscale by 2x?")
|
2661 |
+
|
2662 |
+
with gr.Row():
|
2663 |
+
|
2664 |
+
upscaling_selection_field = gr.Radio(
|
2665 |
+
choices = ['Yes', 'No'],
|
2666 |
+
value = default_upscale_option,
|
2667 |
+
show_label = False,
|
2668 |
+
container = False
|
2669 |
+
)
|
2670 |
+
|
2671 |
+
with gr.Row():
|
2672 |
+
|
2673 |
+
upscaling_num_inference_steps_field = gr.Slider(
|
2674 |
+
label = "Upscaler number of iterations",
|
2675 |
+
minimum = 1,
|
2676 |
+
maximum = 100,
|
2677 |
+
value = 100,
|
2678 |
+
step = 1
|
2679 |
+
)
|
2680 |
+
|
2681 |
+
if (
|
2682 |
+
(enable_refiner == 1) or
|
2683 |
+
(enable_upscaler == 1)
|
2684 |
+
):
|
2685 |
+
|
2686 |
+
refiner_and_upscaler_text_field = gr.HTML(
|
2687 |
+
value = default_refiner_and_upscaler_status_text
|
2688 |
+
)
|
2689 |
+
|
2690 |
+
with gr.Column(scale = 1):
|
2691 |
+
|
2692 |
+
with gr.Group():
|
2693 |
+
|
2694 |
+
with gr.Row():
|
2695 |
+
|
2696 |
+
base_model_field = gr.Dropdown(
|
2697 |
+
label = "Base Model:",
|
2698 |
+
choices = default_base_model_choices_array,
|
2699 |
+
value = default_base_model_nicely_named_value,
|
2700 |
+
type = "index",
|
2701 |
+
#info = "Main model type",
|
2702 |
+
filterable = False,
|
2703 |
+
min_width = 240,
|
2704 |
+
interactive = True
|
2705 |
+
)
|
2706 |
+
|
2707 |
+
model_configuration_field = gr.Dropdown(
|
2708 |
+
label = "Configuration Type:",
|
2709 |
+
choices = default_model_configuration_choices_array,
|
2710 |
+
value = default_model_configuration_nicely_named_value,
|
2711 |
+
type = "index",
|
2712 |
+
#info = "See end of page for info.",
|
2713 |
+
filterable = False,
|
2714 |
+
min_width = 240,
|
2715 |
+
interactive = True
|
2716 |
+
)
|
2717 |
+
|
2718 |
+
with gr.Row():
|
2719 |
+
|
2720 |
+
image_width_field = gr.Slider(
|
2721 |
+
label = "Width:",
|
2722 |
+
minimum = 256,
|
2723 |
+
maximum = 1024,
|
2724 |
+
value = default_width,
|
2725 |
+
step = width_and_height_input_slider_steps,
|
2726 |
+
interactive = True
|
2727 |
+
)
|
2728 |
+
|
2729 |
+
image_height_field = gr.Slider(
|
2730 |
+
label = "Height:",
|
2731 |
+
minimum = 256,
|
2732 |
+
maximum = 1024,
|
2733 |
+
value = default_height,
|
2734 |
+
step = width_and_height_input_slider_steps,
|
2735 |
+
interactive = True
|
2736 |
+
)
|
2737 |
+
|
2738 |
+
with gr.Row():
|
2739 |
+
|
2740 |
+
base_model_num_inference_steps_field = gr.Slider(
|
2741 |
+
label = "Steps:",
|
2742 |
+
minimum = 1,
|
2743 |
+
maximum = 100,
|
2744 |
+
value = default_base_model_base_model_num_inference_steps,
|
2745 |
+
step = 1,
|
2746 |
+
visible = default_base_model_num_inference_steps_field_visibility,
|
2747 |
+
interactive = True
|
2748 |
+
)
|
2749 |
+
|
2750 |
+
with gr.Row():
|
2751 |
+
|
2752 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field = gr.Slider(
|
2753 |
+
label = "Steps:",
|
2754 |
+
info = "Try using only 1 or a couple of steps.",
|
2755 |
+
minimum = 1,
|
2756 |
+
maximum = 25,
|
2757 |
+
value = default_base_model_base_model_num_inference_steps_for_sdxl_turbo,
|
2758 |
+
step = 1,
|
2759 |
+
visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility,
|
2760 |
+
interactive = True
|
2761 |
+
)
|
2762 |
+
|
2763 |
+
with gr.Row():
|
2764 |
+
|
2765 |
+
guidance_scale_field = gr.Slider(
|
2766 |
+
label = "Guidance Scale:",
|
2767 |
+
minimum = 1,
|
2768 |
+
maximum = 15,
|
2769 |
+
value = default_guidance_scale_value,
|
2770 |
+
step = 0.25,
|
2771 |
+
visible = default_guidance_scale_field_visibility,
|
2772 |
+
interactive = True
|
2773 |
+
)
|
2774 |
+
|
2775 |
+
with gr.Row():
|
2776 |
+
|
2777 |
+
guidance_scale_for_sdxl_turbo_field = gr.HTML(
|
2778 |
+
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Guidance scale is not used for SDXL Turbo.</div>",
|
2779 |
+
visible = default_guidance_scale_for_sdxl_turbo_field_visibility
|
2780 |
+
)
|
2781 |
+
|
2782 |
+
with gr.Row():
|
2783 |
+
|
2784 |
+
seed_selection_option = gr.Slider(
|
2785 |
+
label = "Seed (0 is random):",
|
2786 |
+
minimum = 0,
|
2787 |
+
maximum = default_seed_maximum,
|
2788 |
+
value = default_seed_value,
|
2789 |
+
step = 1,
|
2790 |
+
interactive = True
|
2791 |
+
)
|
2792 |
+
|
2793 |
+
if make_seed_selection_a_textbox == 1:
|
2794 |
+
|
2795 |
+
seed_selection_option = gr.Textbox(
|
2796 |
+
label = "Seed (0 is random; " + str(default_seed_maximum) + " max):",
|
2797 |
+
value = "0",
|
2798 |
+
interactive = True
|
2799 |
+
)
|
2800 |
+
|
2801 |
+
seed_field = seed_selection_option
|
2802 |
+
|
2803 |
+
|
2804 |
+
|
2805 |
+
with gr.Column(scale = 1):
|
2806 |
+
|
2807 |
+
# with gr.Row():
|
2808 |
+
|
2809 |
+
# generate_image_btn = gr.Button(
|
2810 |
+
# value = "Generate",
|
2811 |
+
# variant = "primary"
|
2812 |
+
# )
|
2813 |
+
|
2814 |
+
with gr.Row():
|
2815 |
+
|
2816 |
+
if use_image_gallery == 1:
|
2817 |
+
|
2818 |
+
output_image_field = gr.Gallery(
|
2819 |
+
label = "Generated Images",
|
2820 |
+
value = [],
|
2821 |
+
# columns = 1,
|
2822 |
+
# rows = 1,
|
2823 |
+
selected_index = 0,
|
2824 |
+
elem_id = "image_gallery",
|
2825 |
+
allow_preview = "True",
|
2826 |
+
preview = True
|
2827 |
+
)
|
2828 |
+
|
2829 |
+
else:
|
2830 |
+
|
2831 |
+
output_image_field = gr.Image(
|
2832 |
+
label = "Generated Image",
|
2833 |
+
type = "pil"
|
2834 |
+
)
|
2835 |
+
|
2836 |
+
with gr.Row():
|
2837 |
+
|
2838 |
+
output_text_field = gr.Text(
|
2839 |
+
label = "Prompt Information:",
|
2840 |
+
value = "After an image is generated, its generation information will appear here." + additional_prompt_info_html,
|
2841 |
+
show_copy_button = True,
|
2842 |
+
lines = 4
|
2843 |
+
)
|
2844 |
+
|
2845 |
+
with gr.Row():
|
2846 |
+
|
2847 |
+
prompt_truncated_field = gr.HTML(
|
2848 |
+
value = "",
|
2849 |
+
visible = False
|
2850 |
+
)
|
2851 |
+
|
2852 |
+
global cancel_image_btn
|
2853 |
+
|
2854 |
+
if enable_close_command_prompt_button == 1:
|
2855 |
+
|
2856 |
+
cancel_image_btn = gr.Button(
|
2857 |
+
value = "Close Command Prompt / Cancel",
|
2858 |
+
variant = "stop"
|
2859 |
+
)
|
2860 |
+
|
2861 |
+
gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again to create more images.")
|
2862 |
+
|
2863 |
+
if len(ending_html) > 0:
|
2864 |
+
|
2865 |
+
with gr.Accordion(
|
2866 |
+
label = "Information",
|
2867 |
+
elem_id = "information_section_id",
|
2868 |
+
open = True
|
2869 |
+
):
|
2870 |
+
|
2871 |
+
gr.Markdown(ending_html)
|
2872 |
+
|
2873 |
+
base_model_field.change(
|
2874 |
+
fn = base_model_field_update_function,
|
2875 |
+
inputs = [
|
2876 |
+
base_model_field
|
2877 |
+
],
|
2878 |
+
outputs = [
|
2879 |
+
model_configuration_field,
|
2880 |
+
negative_prompt_field,
|
2881 |
+
negative_prompt_for_sdxl_turbo_field,
|
2882 |
+
base_model_num_inference_steps_field,
|
2883 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field,
|
2884 |
+
guidance_scale_field,
|
2885 |
+
guidance_scale_for_sdxl_turbo_field
|
2886 |
+
],
|
2887 |
+
queue = None,
|
2888 |
+
show_progress = "hidden"
|
2889 |
+
)
|
2890 |
+
|
2891 |
+
model_configuration_field.change(
|
2892 |
+
fn = model_configuration_field_update_function,
|
2893 |
+
inputs = [
|
2894 |
+
base_model_field,
|
2895 |
+
model_configuration_field
|
2896 |
+
],
|
2897 |
+
outputs = [
|
2898 |
+
negative_prompt_field,
|
2899 |
+
negative_prompt_for_sdxl_turbo_field,
|
2900 |
+
base_model_num_inference_steps_field,
|
2901 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field,
|
2902 |
+
guidance_scale_field,
|
2903 |
+
guidance_scale_for_sdxl_turbo_field,
|
2904 |
+
refiner_default_config_accordion,
|
2905 |
+
refiner_online_config_accordion,
|
2906 |
+
refining_selection_automatically_selected_message_field,
|
2907 |
+
refining_selection_online_config_normal_field,
|
2908 |
+
refining_selection_online_config_automatically_selected_field
|
2909 |
+
],
|
2910 |
+
queue = None,
|
2911 |
+
show_progress = "hidden"
|
2912 |
+
)
|
2913 |
+
|
2914 |
+
if use_image_gallery == 1:
|
2915 |
+
|
2916 |
+
output_image_field.select(
|
2917 |
+
fn = update_prompt_info_from_gallery,
|
2918 |
+
inputs = None,
|
2919 |
+
outputs = [
|
2920 |
+
output_image_field,
|
2921 |
+
output_text_field
|
2922 |
+
]
|
2923 |
+
)
|
2924 |
+
|
2925 |
+
if (
|
2926 |
+
(enable_refiner == 1) or
|
2927 |
+
(enable_upscaler == 1)
|
2928 |
+
):
|
2929 |
+
|
2930 |
+
triggers_array = []
|
2931 |
+
|
2932 |
+
if enable_refiner == 1:
|
2933 |
+
|
2934 |
+
triggers_array.extend([
|
2935 |
+
refining_selection_default_config_field.change,
|
2936 |
+
refining_selection_online_config_normal_field.change,
|
2937 |
+
refining_selection_online_config_automatically_selected_field.change
|
2938 |
+
])
|
2939 |
+
|
2940 |
+
if enable_upscaler == 1:
|
2941 |
+
|
2942 |
+
triggers_array.extend([
|
2943 |
+
upscaling_selection_field.change
|
2944 |
+
])
|
2945 |
+
|
2946 |
+
gr.on(
|
2947 |
+
triggers = triggers_array,
|
2948 |
+
fn = update_refiner_and_upscaler_status_function,
|
2949 |
+
inputs = [
|
2950 |
+
base_model_field,
|
2951 |
+
model_configuration_field,
|
2952 |
+
refining_selection_default_config_field,
|
2953 |
+
refining_selection_online_config_normal_field,
|
2954 |
+
refining_selection_online_config_automatically_selected_field,
|
2955 |
+
upscaling_selection_field
|
2956 |
+
],
|
2957 |
+
outputs = [
|
2958 |
+
refiner_and_upscaler_text_field
|
2959 |
+
],
|
2960 |
+
queue = None,
|
2961 |
+
show_progress = "hidden"
|
2962 |
+
)
|
2963 |
+
|
2964 |
+
generate_image_btn_click_event = generate_image_btn.click(
|
2965 |
+
fn = create_image_function,
|
2966 |
+
inputs = [
|
2967 |
+
base_model_field,
|
2968 |
+
model_configuration_field,
|
2969 |
+
prompt_field,
|
2970 |
+
negative_prompt_field,
|
2971 |
+
image_width_field,
|
2972 |
+
image_height_field,
|
2973 |
+
guidance_scale_field,
|
2974 |
+
base_model_num_inference_steps_field,
|
2975 |
+
base_model_num_inference_steps_field_for_sdxl_turbo_field,
|
2976 |
+
seed_field,
|
2977 |
+
|
2978 |
+
refining_selection_online_config_normal_field,
|
2979 |
+
refining_selection_online_config_automatically_selected_field,
|
2980 |
+
|
2981 |
+
refining_use_denoising_start_in_base_model_when_using_refiner_field,
|
2982 |
+
refining_base_model_output_to_refiner_is_in_latent_space_field,
|
2983 |
+
|
2984 |
+
refining_denoise_start_for_online_config_field,
|
2985 |
+
refining_number_of_iterations_for_online_config_field,
|
2986 |
+
|
2987 |
+
upscaling_selection_field,
|
2988 |
+
upscaling_num_inference_steps_field
|
2989 |
+
],
|
2990 |
+
outputs = [
|
2991 |
+
output_image_field,
|
2992 |
+
output_text_field,
|
2993 |
+
prompt_truncated_field
|
2994 |
+
]
|
2995 |
+
)
|
2996 |
+
|
2997 |
+
if enable_close_command_prompt_button == 1:
|
2998 |
+
|
2999 |
+
# https://github.com/gradio-app/gradio/pull/2433/files
|
3000 |
+
|
3001 |
+
cancel_image_btn.click(
|
3002 |
+
fn = cancel_image_processing,
|
3003 |
+
inputs = None,
|
3004 |
+
outputs = None,
|
3005 |
+
cancels = [generate_image_btn_click_event]
|
3006 |
+
)
|
3007 |
+
|
3008 |
+
|
3009 |
+
|
3010 |
+
sd_interface.queue(
|
3011 |
+
# concurrency_limit = 1,
|
3012 |
+
max_size = 20
|
3013 |
+
)
|
3014 |
+
|
3015 |
+
inbrowser = False
|
3016 |
+
|
3017 |
+
if auto_open_browser == 1:
|
3018 |
+
|
3019 |
+
inbrowser = True
|
3020 |
+
|
3021 |
+
sd_interface.launch(
|
3022 |
+
inbrowser = inbrowser,
|
3023 |
+
# debug = True,
|
3024 |
+
share = None,
|
3025 |
+
show_api = False,
|
3026 |
+
quiet = True,
|
3027 |
+
show_error = True,
|
3028 |
+
max_threads = 1
|
3029 |
+
)
|
3030 |
+
|
3031 |
+
sd_interface.load(
|
3032 |
+
scroll_to_output = False,
|
3033 |
+
show_progress = "full"
|
3034 |
+
)
|