Fabrice-TIERCELIN
commited on
Commit
•
c70fac3
1
Parent(s):
3cc05f9
Update seed
Browse files
app.py
CHANGED
@@ -1,8 +1,3 @@
|
|
1 |
-
from diffusers import (
|
2 |
-
ControlNetModel,
|
3 |
-
DiffusionPipeline,
|
4 |
-
StableDiffusionControlNetPipeline,
|
5 |
-
)
|
6 |
import gradio as gr
|
7 |
import numpy as np
|
8 |
import os
|
@@ -10,19 +5,31 @@ import time
|
|
10 |
import math
|
11 |
import random
|
12 |
import imageio
|
13 |
-
from PIL import Image, ImageFilter
|
14 |
import torch
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
max_64_bit_int = 2**63 - 1
|
17 |
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
floatType = torch.float16 if torch.cuda.is_available() else torch.float32
|
|
|
20 |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType)
|
21 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
22 |
"runwayml/stable-diffusion-v1-5", safety_checker = None, controlnet = controlnet, torch_dtype = floatType
|
23 |
)
|
24 |
pipe = pipe.to(device)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
def check(
|
27 |
input_image,
|
28 |
prompt,
|
@@ -31,7 +38,6 @@ def check(
|
|
31 |
num_inference_steps,
|
32 |
guidance_scale,
|
33 |
image_guidance_scale,
|
34 |
-
randomize_seed,
|
35 |
seed,
|
36 |
progress = gr.Progress()):
|
37 |
if input_image is None:
|
@@ -48,7 +54,6 @@ def pix2pix(
|
|
48 |
num_inference_steps,
|
49 |
guidance_scale,
|
50 |
image_guidance_scale,
|
51 |
-
randomize_seed,
|
52 |
seed,
|
53 |
progress = gr.Progress()):
|
54 |
check(
|
@@ -59,7 +64,6 @@ def pix2pix(
|
|
59 |
num_inference_steps,
|
60 |
guidance_scale,
|
61 |
image_guidance_scale,
|
62 |
-
randomize_seed,
|
63 |
seed
|
64 |
)
|
65 |
start = time.time()
|
@@ -80,11 +84,11 @@ def pix2pix(
|
|
80 |
if image_guidance_scale is None:
|
81 |
image_guidance_scale = 1.5
|
82 |
|
83 |
-
if
|
84 |
seed = random.randint(0, max_64_bit_int)
|
85 |
|
86 |
random.seed(seed)
|
87 |
-
|
88 |
|
89 |
original_height, original_width, dummy_channel = np.array(input_image).shape
|
90 |
output_width = original_width
|
@@ -161,7 +165,7 @@ with gr.Blocks() as interface:
|
|
161 |
"""
|
162 |
)
|
163 |
with gr.Column():
|
164 |
-
input_image = gr.Image(label = "Your image", sources = ["upload"], type = "pil")
|
165 |
prompt = gr.Textbox(label = 'Prompt', info = "Instruct what to change in the image", placeholder = 'Order the AI what to change in the image')
|
166 |
with gr.Accordion("Advanced options", open = False):
|
167 |
negative_prompt = gr.Textbox(label = 'Negative prompt', placeholder = 'Describe what you do NOT want to see in the image', value = 'Watermark')
|
@@ -169,15 +173,20 @@ with gr.Blocks() as interface:
|
|
169 |
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
|
170 |
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt")
|
171 |
image_guidance_scale = gr.Slider(minimum = 1, value = 1.5, step = 0.1, label = "Image Guidance Scale", info = "lower=image quality, higher=follow the image")
|
172 |
-
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed
|
173 |
-
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed
|
174 |
|
175 |
submit = gr.Button("Modify", variant = "primary")
|
176 |
|
177 |
modified_image = gr.Image(label = "Modified image")
|
178 |
information = gr.Label(label = "Information")
|
179 |
|
180 |
-
submit.click(
|
|
|
|
|
|
|
|
|
|
|
181 |
input_image,
|
182 |
prompt,
|
183 |
negative_prompt,
|
@@ -185,7 +194,6 @@ with gr.Blocks() as interface:
|
|
185 |
num_inference_steps,
|
186 |
guidance_scale,
|
187 |
image_guidance_scale,
|
188 |
-
randomize_seed,
|
189 |
seed
|
190 |
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [
|
191 |
input_image,
|
@@ -195,7 +203,6 @@ with gr.Blocks() as interface:
|
|
195 |
num_inference_steps,
|
196 |
guidance_scale,
|
197 |
image_guidance_scale,
|
198 |
-
randomize_seed,
|
199 |
seed
|
200 |
], outputs = [
|
201 |
modified_image,
|
@@ -203,6 +210,7 @@ with gr.Blocks() as interface:
|
|
203 |
], scroll_to_output = True)
|
204 |
|
205 |
gr.Examples(
|
|
|
206 |
inputs = [
|
207 |
input_image,
|
208 |
prompt,
|
@@ -211,7 +219,6 @@ with gr.Blocks() as interface:
|
|
211 |
num_inference_steps,
|
212 |
guidance_scale,
|
213 |
image_guidance_scale,
|
214 |
-
randomize_seed,
|
215 |
seed
|
216 |
],
|
217 |
outputs = [
|
@@ -220,58 +227,53 @@ with gr.Blocks() as interface:
|
|
220 |
],
|
221 |
examples = [
|
222 |
[
|
223 |
-
"Example1.webp",
|
224 |
"What if it's snowing?",
|
225 |
"Watermark",
|
226 |
1,
|
227 |
20,
|
228 |
5,
|
229 |
1.5,
|
230 |
-
True,
|
231 |
42
|
232 |
],
|
233 |
[
|
234 |
-
"Example2.png",
|
235 |
"What if this woman had brown hair?",
|
236 |
"Watermark",
|
237 |
1,
|
238 |
20,
|
239 |
5,
|
240 |
1.5,
|
241 |
-
True,
|
242 |
42
|
243 |
],
|
244 |
[
|
245 |
-
"Example3.jpeg",
|
246 |
"Replace the house by a windmill",
|
247 |
"Watermark",
|
248 |
1,
|
249 |
20,
|
250 |
5,
|
251 |
1.5,
|
252 |
-
True,
|
253 |
42
|
254 |
],
|
255 |
[
|
256 |
-
"Example4.gif",
|
257 |
"What if the camera was in opposite side?",
|
258 |
"Watermark",
|
259 |
1,
|
260 |
20,
|
261 |
5,
|
262 |
1.5,
|
263 |
-
True,
|
264 |
42
|
265 |
],
|
266 |
[
|
267 |
-
"Example5.bmp",
|
268 |
"Turn him into cyborg",
|
269 |
"Watermark",
|
270 |
1,
|
271 |
20,
|
272 |
5,
|
273 |
25,
|
274 |
-
True,
|
275 |
42
|
276 |
],
|
277 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import os
|
|
|
5 |
import math
|
6 |
import random
|
7 |
import imageio
|
|
|
8 |
import torch
|
9 |
|
10 |
+
from diffusers import (
|
11 |
+
ControlNetModel,
|
12 |
+
DiffusionPipeline,
|
13 |
+
StableDiffusionControlNetPipeline,
|
14 |
+
)
|
15 |
+
from PIL import Image, ImageFilter
|
16 |
+
|
17 |
max_64_bit_int = 2**63 - 1
|
18 |
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
floatType = torch.float16 if torch.cuda.is_available() else torch.float32
|
21 |
+
|
22 |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_ip2p", torch_dtype = floatType)
|
23 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
24 |
"runwayml/stable-diffusion-v1-5", safety_checker = None, controlnet = controlnet, torch_dtype = floatType
|
25 |
)
|
26 |
pipe = pipe.to(device)
|
27 |
|
28 |
+
def update_seed(is_randomize_seed, seed):
|
29 |
+
if is_randomize_seed:
|
30 |
+
return random.randint(0, max_64_bit_int)
|
31 |
+
return seed
|
32 |
+
|
33 |
def check(
|
34 |
input_image,
|
35 |
prompt,
|
|
|
38 |
num_inference_steps,
|
39 |
guidance_scale,
|
40 |
image_guidance_scale,
|
|
|
41 |
seed,
|
42 |
progress = gr.Progress()):
|
43 |
if input_image is None:
|
|
|
54 |
num_inference_steps,
|
55 |
guidance_scale,
|
56 |
image_guidance_scale,
|
|
|
57 |
seed,
|
58 |
progress = gr.Progress()):
|
59 |
check(
|
|
|
64 |
num_inference_steps,
|
65 |
guidance_scale,
|
66 |
image_guidance_scale,
|
|
|
67 |
seed
|
68 |
)
|
69 |
start = time.time()
|
|
|
84 |
if image_guidance_scale is None:
|
85 |
image_guidance_scale = 1.5
|
86 |
|
87 |
+
if seed is None:
|
88 |
seed = random.randint(0, max_64_bit_int)
|
89 |
|
90 |
random.seed(seed)
|
91 |
+
torch.manual_seed(seed)
|
92 |
|
93 |
original_height, original_width, dummy_channel = np.array(input_image).shape
|
94 |
output_width = original_width
|
|
|
165 |
"""
|
166 |
)
|
167 |
with gr.Column():
|
168 |
+
input_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil")
|
169 |
prompt = gr.Textbox(label = 'Prompt', info = "Instruct what to change in the image", placeholder = 'Order the AI what to change in the image')
|
170 |
with gr.Accordion("Advanced options", open = False):
|
171 |
negative_prompt = gr.Textbox(label = 'Negative prompt', placeholder = 'Describe what you do NOT want to see in the image', value = 'Watermark')
|
|
|
173 |
num_inference_steps = gr.Slider(minimum = 10, maximum = 500, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
|
174 |
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 5, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt")
|
175 |
image_guidance_scale = gr.Slider(minimum = 1, value = 1.5, step = 0.1, label = "Image Guidance Scale", info = "lower=image quality, higher=follow the image")
|
176 |
+
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
177 |
+
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")
|
178 |
|
179 |
submit = gr.Button("Modify", variant = "primary")
|
180 |
|
181 |
modified_image = gr.Image(label = "Modified image")
|
182 |
information = gr.Label(label = "Information")
|
183 |
|
184 |
+
submit.click(fn = update_seed, inputs = [
|
185 |
+
randomize_seed,
|
186 |
+
seed
|
187 |
+
], outputs = [
|
188 |
+
seed
|
189 |
+
], queue = False, show_progress = False).then(check, inputs = [
|
190 |
input_image,
|
191 |
prompt,
|
192 |
negative_prompt,
|
|
|
194 |
num_inference_steps,
|
195 |
guidance_scale,
|
196 |
image_guidance_scale,
|
|
|
197 |
seed
|
198 |
], outputs = [], queue = False, show_progress = False).success(pix2pix, inputs = [
|
199 |
input_image,
|
|
|
203 |
num_inference_steps,
|
204 |
guidance_scale,
|
205 |
image_guidance_scale,
|
|
|
206 |
seed
|
207 |
], outputs = [
|
208 |
modified_image,
|
|
|
210 |
], scroll_to_output = True)
|
211 |
|
212 |
gr.Examples(
|
213 |
+
fn = pix2pix,
|
214 |
inputs = [
|
215 |
input_image,
|
216 |
prompt,
|
|
|
219 |
num_inference_steps,
|
220 |
guidance_scale,
|
221 |
image_guidance_scale,
|
|
|
222 |
seed
|
223 |
],
|
224 |
outputs = [
|
|
|
227 |
],
|
228 |
examples = [
|
229 |
[
|
230 |
+
"./Examples/Example1.webp",
|
231 |
"What if it's snowing?",
|
232 |
"Watermark",
|
233 |
1,
|
234 |
20,
|
235 |
5,
|
236 |
1.5,
|
|
|
237 |
42
|
238 |
],
|
239 |
[
|
240 |
+
"./Examples/Example2.png",
|
241 |
"What if this woman had brown hair?",
|
242 |
"Watermark",
|
243 |
1,
|
244 |
20,
|
245 |
5,
|
246 |
1.5,
|
|
|
247 |
42
|
248 |
],
|
249 |
[
|
250 |
+
"./Examples/Example3.jpeg",
|
251 |
"Replace the house by a windmill",
|
252 |
"Watermark",
|
253 |
1,
|
254 |
20,
|
255 |
5,
|
256 |
1.5,
|
|
|
257 |
42
|
258 |
],
|
259 |
[
|
260 |
+
"./Examples/Example4.gif",
|
261 |
"What if the camera was in opposite side?",
|
262 |
"Watermark",
|
263 |
1,
|
264 |
20,
|
265 |
5,
|
266 |
1.5,
|
|
|
267 |
42
|
268 |
],
|
269 |
[
|
270 |
+
"./Examples/Example5.bmp",
|
271 |
"Turn him into cyborg",
|
272 |
"Watermark",
|
273 |
1,
|
274 |
20,
|
275 |
5,
|
276 |
25,
|
|
|
277 |
42
|
278 |
],
|
279 |
],
|