prithivMLmods commited on
Commit
e989373
·
verified ·
1 Parent(s): 59df0e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -1
app.py CHANGED
@@ -1,3 +1,234 @@
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- exec(os.environ.get('CASCADE1'))
 
 
1
+ #!/usr/bin/env python
2
+
3
  import os
4
+ import random
5
+ import uuid
6
+ import json
7
+
8
+ import gradio as gr
9
+ import numpy as np
10
+ from PIL import Image
11
+ import spaces
12
+ import torch
13
+ from diffusers import DiffusionPipeline
14
+
15
+ bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
16
+ bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
17
+ default_negative = os.getenv("default_negative","")
18
+
19
+ def check_text(prompt, negative=""):
20
+ for i in bad_words:
21
+ if i in prompt:
22
+ return True
23
+ for i in bad_words_negative:
24
+ if i in negative:
25
+ return True
26
+ return False
27
+
28
+ DESCRIPTION = """# RealVisXL_V3.0"""
29
+ if not torch.cuda.is_available():
30
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
31
+
32
+ MAX_SEED = np.iinfo(np.int32).max
33
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
34
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
35
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
36
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
37
+
38
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
39
+
40
+ NUM_IMAGES_PER_PROMPT = 1
41
+
42
+ if torch.cuda.is_available():
43
+ pipe = DiffusionPipeline.from_pretrained(
44
+ "SG161222/RealVisXL_V3.0",
45
+ torch_dtype=torch.float16,
46
+ use_safetensors=True,
47
+ add_watermarker=False,
48
+ variant="fp16"
49
+ )
50
+ pipe2 = DiffusionPipeline.from_pretrained(
51
+ "SG161222/RealVisXL_V4.0",
52
+ torch_dtype=torch.float16,
53
+ use_safetensors=True,
54
+ add_watermarker=False,
55
+ variant="fp16"
56
+ )
57
+ if ENABLE_CPU_OFFLOAD:
58
+ pipe.enable_model_cpu_offload()
59
+ pipe2.enable_model_cpu_offload()
60
+ else:
61
+ pipe.to(device)
62
+ pipe2.to(device)
63
+ print("Loaded on Device!")
64
+
65
+ if USE_TORCH_COMPILE:
66
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
67
+ pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
68
+ print("Model Compiled!")
69
+
70
+
71
+ def save_image(img):
72
+ unique_name = str(uuid.uuid4()) + ".png"
73
+ img.save(unique_name)
74
+ return unique_name
75
+
76
+
77
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
78
+ if randomize_seed:
79
+ seed = random.randint(0, MAX_SEED)
80
+ return seed
81
+
82
+
83
+
84
+ @spaces.GPU(enable_queue=True)
85
+ def generate(
86
+ prompt: str,
87
+ negative_prompt: str = "",
88
+ use_negative_prompt: bool = False,
89
+ seed: int = 0,
90
+ width: int = 1024,
91
+ height: int = 1024,
92
+ guidance_scale: float = 3,
93
+ randomize_seed: bool = False,
94
+ use_resolution_binning: bool = True,
95
+ progress=gr.Progress(track_tqdm=True),
96
+ ):
97
+ pipe.to(device)
98
+ seed = int(randomize_seed_fn(seed, randomize_seed))
99
+ generator = torch.Generator().manual_seed(seed)
100
+
101
+ if not use_negative_prompt:
102
+ negative_prompt = "" # type: ignore
103
+ negative_prompt += default_negative
104
+
105
+ options = {
106
+ "prompt":prompt,
107
+ "negative_prompt":negative_prompt,
108
+ "width":width,
109
+ "height":height,
110
+ "guidance_scale":guidance_scale,
111
+ "num_inference_steps":25,
112
+ "generator":generator,
113
+ "num_images_per_prompt":NUM_IMAGES_PER_PROMPT,
114
+ "use_resolution_binning":use_resolution_binning,
115
+ "output_type":"pil",
116
+
117
+ }
118
+
119
+ images = pipe(**options).images+pipe2(**options).images
120
+
121
+ image_paths = [save_image(img) for img in images]
122
+ return image_paths, seed
123
+
124
+
125
+ examples = [
126
+ "neon holography crystal cat",
127
+ "a cat eating a piece of cheese",
128
+ "an astronaut riding a horse in space",
129
+ "a cartoon of a boy playing with a tiger",
130
+ "a cute robot artist painting on an easel, concept art",
131
+ #"a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
132
+ ]
133
+
134
+ css = '''
135
+ .gradio-container{max-width: 560px !important}
136
+ h1{text-align:center}
137
+ '''
138
+ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
139
+ gr.Markdown(DESCRIPTION)
140
+ gr.DuplicateButton(
141
+ value="Duplicate Space for private use",
142
+ elem_id="duplicate-button",
143
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
144
+ )
145
+ with gr.Group():
146
+ with gr.Row():
147
+ prompt = gr.Text(
148
+ label="Prompt",
149
+ show_label=False,
150
+ max_lines=1,
151
+ placeholder="Enter your prompt",
152
+ container=False,
153
+ )
154
+ run_button = gr.Button("Run", scale=0)
155
+ result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
156
+ with gr.Accordion("Advanced options", open=False):
157
+ with gr.Row():
158
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
159
+ negative_prompt = gr.Text(
160
+ label="Negative prompt",
161
+ max_lines=1,
162
+ placeholder="Enter a negative prompt",
163
+ visible=True,
164
+ )
165
+ seed = gr.Slider(
166
+ label="Seed",
167
+ minimum=0,
168
+ maximum=MAX_SEED,
169
+ step=1,
170
+ value=0,
171
+ )
172
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
173
+ with gr.Row(visible=True):
174
+ width = gr.Slider(
175
+ label="Width",
176
+ minimum=256,
177
+ maximum=MAX_IMAGE_SIZE,
178
+ step=32,
179
+ value=1024,
180
+ )
181
+ height = gr.Slider(
182
+ label="Height",
183
+ minimum=256,
184
+ maximum=MAX_IMAGE_SIZE,
185
+ step=32,
186
+ value=1024,
187
+ )
188
+ with gr.Row():
189
+ guidance_scale = gr.Slider(
190
+ label="Guidance Scale",
191
+ minimum=0.1,
192
+ maximum=20,
193
+ step=0.1,
194
+ value=3.0,
195
+ )
196
+
197
+ gr.Examples(
198
+ examples=examples,
199
+ inputs=prompt,
200
+ outputs=[result, seed],
201
+ fn=generate,
202
+ cache_examples=CACHE_EXAMPLES,
203
+ )
204
+
205
+ use_negative_prompt.change(
206
+ fn=lambda x: gr.update(visible=x),
207
+ inputs=use_negative_prompt,
208
+ outputs=negative_prompt,
209
+ api_name=False,
210
+ )
211
+
212
+ gr.on(
213
+ triggers=[
214
+ prompt.submit,
215
+ negative_prompt.submit,
216
+ run_button.click,
217
+ ],
218
+ fn=generate,
219
+ inputs=[
220
+ prompt,
221
+ negative_prompt,
222
+ use_negative_prompt,
223
+ seed,
224
+ width,
225
+ height,
226
+ guidance_scale,
227
+ randomize_seed,
228
+ ],
229
+ outputs=[result, seed],
230
+ api_name="run",
231
+ )
232
 
233
+ if __name__ == "__main__":
234
+ demo.queue(max_size=20).launch()