Fabrice-TIERCELIN
commited on
Adapt information
Browse files
app.py
CHANGED
@@ -16,8 +16,12 @@ from PIL import Image, ImageFilter
|
|
16 |
|
17 |
max_64_bit_int = 2**63 - 1
|
18 |
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
21 |
|
22 |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType)
|
23 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
@@ -38,6 +42,7 @@ def check(
|
|
38 |
num_inference_steps,
|
39 |
guidance_scale,
|
40 |
image_guidance_scale,
|
|
|
41 |
seed,
|
42 |
progress = gr.Progress()):
|
43 |
if input_image is None:
|
@@ -54,6 +59,7 @@ def pix2pix(
|
|
54 |
num_inference_steps,
|
55 |
guidance_scale,
|
56 |
image_guidance_scale,
|
|
|
57 |
seed,
|
58 |
progress = gr.Progress()):
|
59 |
check(
|
@@ -64,6 +70,7 @@ def pix2pix(
|
|
64 |
num_inference_steps,
|
65 |
guidance_scale,
|
66 |
image_guidance_scale,
|
|
|
67 |
seed
|
68 |
)
|
69 |
start = time.time()
|
@@ -136,7 +143,7 @@ def pix2pix(
|
|
136 |
minutes = minutes - (hours * 60)
|
137 |
return [
|
138 |
output_image,
|
139 |
-
"Start again to get a different result. The new image is " + str(output_width) + " pixels large and " + str(output_height) + " pixels high, so an image of " + f'{output_width * output_height:,}' + " pixels. The image have been generated in " + str(hours) + " h, " + str(minutes) + " min, " + str(secondes) + " sec." + limitation
|
140 |
]
|
141 |
|
142 |
with gr.Blocks() as interface:
|
@@ -146,7 +153,7 @@ with gr.Blocks() as interface:
|
|
146 |
<p style="text-align: center;">Modifies your image using a textual instruction, freely, without account, without watermark, without installation, which can be downloaded</p>
|
147 |
<br/>
|
148 |
<br/>
|
149 |
-
|
150 |
<br/>
|
151 |
<ul>
|
152 |
<li>To change the <b>view angle</b> of your image, I recommend to use <i>Zero123</i>,</li>
|
@@ -166,9 +173,9 @@ with gr.Blocks() as interface:
|
|
166 |
)
|
167 |
with gr.Column():
|
168 |
input_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil")
|
169 |
-
prompt = gr.Textbox(label =
|
170 |
with gr.Accordion("Advanced options", open = False):
|
171 |
-
negative_prompt = gr.Textbox(label =
|
172 |
denoising_steps = gr.Slider(minimum = 0, maximum = 1000, value = 0, step = 1, label = "Denoising", info = "lower=irrelevant result, higher=relevant result")
|
173 |
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
|
174 |
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt")
|
@@ -176,7 +183,7 @@ with gr.Blocks() as interface:
|
|
176 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
177 |
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")
|
178 |
|
179 |
-
submit = gr.Button("Modify", variant = "primary")
|
180 |
|
181 |
modified_image = gr.Image(label = "Modified image")
|
182 |
information = gr.Label(label = "Information")
|
@@ -194,6 +201,7 @@ with gr.Blocks() as interface:
|
|
194 |
num_inference_steps,
|
195 |
guidance_scale,
|
196 |
image_guidance_scale,
|
|
|
197 |
seed
|
198 |
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [
|
199 |
input_image,
|
@@ -203,6 +211,7 @@ with gr.Blocks() as interface:
|
|
203 |
num_inference_steps,
|
204 |
guidance_scale,
|
205 |
image_guidance_scale,
|
|
|
206 |
seed
|
207 |
], outputs = [
|
208 |
modified_image,
|
@@ -219,6 +228,7 @@ with gr.Blocks() as interface:
|
|
219 |
num_inference_steps,
|
220 |
guidance_scale,
|
221 |
image_guidance_scale,
|
|
|
222 |
seed
|
223 |
],
|
224 |
outputs = [
|
@@ -234,6 +244,7 @@ with gr.Blocks() as interface:
|
|
234 |
20,
|
235 |
5,
|
236 |
1.5,
|
|
|
237 |
42
|
238 |
],
|
239 |
[
|
@@ -244,6 +255,7 @@ with gr.Blocks() as interface:
|
|
244 |
20,
|
245 |
5,
|
246 |
1.5,
|
|
|
247 |
42
|
248 |
],
|
249 |
[
|
@@ -254,6 +266,7 @@ with gr.Blocks() as interface:
|
|
254 |
20,
|
255 |
5,
|
256 |
1.5,
|
|
|
257 |
42
|
258 |
],
|
259 |
[
|
@@ -264,6 +277,7 @@ with gr.Blocks() as interface:
|
|
264 |
20,
|
265 |
5,
|
266 |
1.5,
|
|
|
267 |
42
|
268 |
],
|
269 |
[
|
@@ -274,6 +288,7 @@ with gr.Blocks() as interface:
|
|
274 |
20,
|
275 |
5,
|
276 |
25,
|
|
|
277 |
42
|
278 |
],
|
279 |
],
|
|
|
16 |
|
17 |
max_64_bit_int = 2**63 - 1
|
18 |
|
19 |
+
if torch.cuda.is_available():
|
20 |
+
device = "cuda"
|
21 |
+
floatType = torch.float16
|
22 |
+
else:
|
23 |
+
device = "cpu"
|
24 |
+
floatType = torch.float32
|
25 |
|
26 |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType)
|
27 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
|
42 |
num_inference_steps,
|
43 |
guidance_scale,
|
44 |
image_guidance_scale,
|
45 |
+
is_randomize_seed,
|
46 |
seed,
|
47 |
progress = gr.Progress()):
|
48 |
if input_image is None:
|
|
|
59 |
num_inference_steps,
|
60 |
guidance_scale,
|
61 |
image_guidance_scale,
|
62 |
+
is_randomize_seed,
|
63 |
seed,
|
64 |
progress = gr.Progress()):
|
65 |
check(
|
|
|
70 |
num_inference_steps,
|
71 |
guidance_scale,
|
72 |
image_guidance_scale,
|
73 |
+
is_randomize_seed,
|
74 |
seed
|
75 |
)
|
76 |
start = time.time()
|
|
|
143 |
minutes = minutes - (hours * 60)
|
144 |
return [
|
145 |
output_image,
|
146 |
+
("Start again to get a different result. " if is_randomize_seed else "") + "The new image is " + str(output_width) + " pixels large and " + str(output_height) + " pixels high, so an image of " + f'{output_width * output_height:,}' + " pixels. The image have been generated in " + str(hours) + " h, " + str(minutes) + " min, " + str(secondes) + " sec." + limitation
|
147 |
]
|
148 |
|
149 |
with gr.Blocks() as interface:
|
|
|
153 |
<p style="text-align: center;">Modifies your image using a textual instruction, freely, without account, without watermark, without installation, which can be downloaded</p>
|
154 |
<br/>
|
155 |
<br/>
|
156 |
+
✨ Powered by <i>SD 1.5</i> and <i>ControlNet</i>. The result quality extremely varies depending on what we ask.
|
157 |
<br/>
|
158 |
<ul>
|
159 |
<li>To change the <b>view angle</b> of your image, I recommend to use <i>Zero123</i>,</li>
|
|
|
173 |
)
|
174 |
with gr.Column():
|
175 |
input_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil")
|
176 |
+
prompt = gr.Textbox(label = "Prompt", info = "Instruct what to change in the image", placeholder = "Order the AI what to change in the image", lines = 2)
|
177 |
with gr.Accordion("Advanced options", open = False):
|
178 |
+
negative_prompt = gr.Textbox(label = "Negative prompt", placeholder = "Describe what you do NOT want to see in the image", value = "Watermark")
|
179 |
denoising_steps = gr.Slider(minimum = 0, maximum = 1000, value = 0, step = 1, label = "Denoising", info = "lower=irrelevant result, higher=relevant result")
|
180 |
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
|
181 |
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt")
|
|
|
183 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
184 |
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")
|
185 |
|
186 |
+
submit = gr.Button("🚀 Modify", variant = "primary")
|
187 |
|
188 |
modified_image = gr.Image(label = "Modified image")
|
189 |
information = gr.Label(label = "Information")
|
|
|
201 |
num_inference_steps,
|
202 |
guidance_scale,
|
203 |
image_guidance_scale,
|
204 |
+
randomize_seed,
|
205 |
seed
|
206 |
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [
|
207 |
input_image,
|
|
|
211 |
num_inference_steps,
|
212 |
guidance_scale,
|
213 |
image_guidance_scale,
|
214 |
+
randomize_seed,
|
215 |
seed
|
216 |
], outputs = [
|
217 |
modified_image,
|
|
|
228 |
num_inference_steps,
|
229 |
guidance_scale,
|
230 |
image_guidance_scale,
|
231 |
+
randomize_seed,
|
232 |
seed
|
233 |
],
|
234 |
outputs = [
|
|
|
244 |
20,
|
245 |
5,
|
246 |
1.5,
|
247 |
+
False,
|
248 |
42
|
249 |
],
|
250 |
[
|
|
|
255 |
20,
|
256 |
5,
|
257 |
1.5,
|
258 |
+
False,
|
259 |
42
|
260 |
],
|
261 |
[
|
|
|
266 |
20,
|
267 |
5,
|
268 |
1.5,
|
269 |
+
False,
|
270 |
42
|
271 |
],
|
272 |
[
|
|
|
277 |
20,
|
278 |
5,
|
279 |
1.5,
|
280 |
+
False,
|
281 |
42
|
282 |
],
|
283 |
[
|
|
|
288 |
20,
|
289 |
5,
|
290 |
25,
|
291 |
+
False,
|
292 |
42
|
293 |
],
|
294 |
],
|