prithivMLmods commited on
Commit
d9cf60c
1 Parent(s): 7e3176c

Upload 8 files

Browse files
Files changed (8) hide show
  1. app.py +196 -0
  2. assets/1.png +0 -0
  3. assets/2.png +0 -0
  4. assets/3.png +0 -0
  5. assets/4.png +0 -0
  6. assets/demo.txt +0 -0
  7. requirements.txt +8 -0
  8. std.txt +379 -0
app.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #patch 0.01
3
+ import os
4
+ import random
5
+ import uuid
6
+ import gradio as gr
7
+ import numpy as np
8
+ from PIL import Image
9
+ import spaces
10
+ import torch
11
+ from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
12
+
13
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
14
+
15
+ #DESCRIPTIONx = """## STABLE INSTRUCT 📦
16
+
17
+ #"""
18
+
19
+ examples = [
20
+ ["assets/4.png", "Change the color of the jacket to white."],
21
+ ["assets/1.png", "Change the picture to black and white."],
22
+ ["assets/2.png", "Add the chocolate topping to the ice cream."],
23
+ ["assets/3.png", "Make the burger look spicy."],
24
+ ]
25
+
26
+ model_id = "timbrooks/instruct-pix2pix"
27
+ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
28
+ pipe.to("cuda")
29
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
30
+
31
+ DESCRIPTION = """
32
+ """
33
+ if not torch.cuda.is_available():
34
+ DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
35
+
36
+ MAX_SEED = np.iinfo(np.int32).max
37
+ CACHE_EXAMPLES = False
38
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
39
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
40
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
41
+
42
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
43
+
44
+ def save_image(img):
45
+ unique_name = str(uuid.uuid4()) + ".png"
46
+ img.save(unique_name)
47
+ return unique_name
48
+
49
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
50
+ if randomize_seed:
51
+ seed = random.randint(0, MAX_SEED)
52
+ return seed
53
+
54
+ @spaces.GPU
55
+ def img2img_generate(
56
+ prompt: str,
57
+ init_image: gr.Image,
58
+ negative_prompt: str = "",
59
+ use_negative_prompt: bool = False,
60
+ seed: int = 0,
61
+ guidance_scale: float = 7,
62
+ randomize_seed: bool = False,
63
+ num_inference_steps=30,
64
+ strength: float = 0.8,
65
+ NUM_IMAGES_PER_PROMPT=1,
66
+ use_resolution_binning: bool = True,
67
+ progress=gr.Progress(track_tqdm=True),
68
+ ):
69
+ pipe.to(device)
70
+ seed = int(randomize_seed_fn(seed, randomize_seed))
71
+ generator = torch.Generator().manual_seed(seed)
72
+
73
+ if not use_negative_prompt:
74
+ negative_prompt = None # type: ignore
75
+
76
+ init_image = init_image.resize((768, 768))
77
+
78
+
79
+ output = pipe(
80
+ prompt=prompt,
81
+ image=init_image,
82
+ negative_prompt=negative_prompt,
83
+ guidance_scale=guidance_scale,
84
+ num_inference_steps=num_inference_steps,
85
+ generator=generator,
86
+ strength=strength,
87
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
88
+ output_type="pil",
89
+ ).images
90
+
91
+ return output
92
+
93
+ css = '''
94
+ .gradio-container{max-width: 800px !important}
95
+ h1{text-align:center}
96
+ '''
97
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
98
+ # gr.Markdown(DESCRIPTIONx)
99
+ with gr.Group():
100
+ with gr.Row(equal_height=True):
101
+ with gr.Column(scale=1):
102
+ img2img_prompt = gr.Text(
103
+ label="Instruct",
104
+ show_label=False,
105
+ max_lines=1,
106
+ placeholder="Enter your instruction",
107
+ container=False,
108
+ )
109
+ init_image = gr.Image(label="Image", type="pil")
110
+ with gr.Row():
111
+ img2img_run_button = gr.Button("Generate", variant="primary")
112
+ with gr.Column(scale=1):
113
+ img2img_output = gr.Gallery(label="Result", elem_id="gallery")
114
+ with gr.Accordion("Advanced options", open=False, visible=False):
115
+ with gr.Row():
116
+ img2img_use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
117
+ img2img_negative_prompt = gr.Text(
118
+ label="Negative prompt",
119
+ max_lines=1,
120
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
121
+ visible=True,
122
+ )
123
+ img2img_seed = gr.Slider(
124
+ label="Seed",
125
+ minimum=0,
126
+ maximum=MAX_SEED,
127
+ step=1,
128
+ value=0,
129
+ )
130
+ img2img_steps = gr.Slider(
131
+ label="Steps",
132
+ minimum=0,
133
+ maximum=60,
134
+ step=1,
135
+ value=25,
136
+ )
137
+ img2img_number_image = gr.Slider(
138
+ label="No.of.Images",
139
+ minimum=1,
140
+ maximum=4,
141
+ step=1,
142
+ value=1,
143
+ )
144
+ img2img_randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
145
+ with gr.Row():
146
+ img2img_guidance_scale = gr.Slider(
147
+ label="Guidance Scale",
148
+ minimum=0.1,
149
+ maximum=10,
150
+ step=0.1,
151
+ value=5.0,
152
+ )
153
+ strength = gr.Slider(label="Confidence", minimum=0.0, maximum=1.0, step=0.01, value=0.8)
154
+
155
+ gr.Examples(
156
+ examples=examples,
157
+ inputs=[init_image, img2img_prompt],
158
+ outputs=img2img_output,
159
+ fn=img2img_generate,
160
+ cache_examples=CACHE_EXAMPLES,
161
+ )
162
+
163
+ img2img_use_negative_prompt.change(
164
+ fn=lambda x: gr.update(visible=x),
165
+ inputs=img2img_use_negative_prompt,
166
+ outputs=img2img_negative_prompt,
167
+ api_name=False,
168
+ )
169
+
170
+ gr.on(
171
+ triggers=[
172
+ img2img_prompt.submit,
173
+ img2img_negative_prompt.submit,
174
+ img2img_run_button.click,
175
+ ],
176
+ fn=img2img_generate,
177
+ inputs=[
178
+ img2img_prompt,
179
+ init_image,
180
+ img2img_negative_prompt,
181
+ img2img_use_negative_prompt,
182
+ img2img_seed,
183
+ img2img_guidance_scale,
184
+ img2img_randomize_seed,
185
+ img2img_steps,
186
+ strength,
187
+ img2img_number_image,
188
+ ],
189
+ outputs=[img2img_output],
190
+ api_name="image-to-image",
191
+ )
192
+
193
+ #gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards")
194
+
195
+ if __name__ == "__main__":
196
+ demo.queue().launch(show_api=False, debug=False)
assets/1.png ADDED
assets/2.png ADDED
assets/3.png ADDED
assets/4.png ADDED
assets/demo.txt ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torchvision==0.18.1
2
+ diffusers==0.28.2
3
+ transformers==4.41.2
4
+ sentencepiece
5
+ peft
6
+ accelerate
7
+ spaces
8
+ pillow
std.txt ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #patch 0.01
3
+ import os
4
+ import random
5
+ import uuid
6
+ import gradio as gr
7
+ import numpy as np
8
+ from PIL import Image
9
+ import spaces
10
+ import torch
11
+ from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
12
+
13
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
14
+
15
+ examples = [
16
+ ["assets/1.png", "Change the picture to black and white."],
17
+ ["assets/2.png", "Add the chocolate topping to the ice cream."],
18
+ ["assets/3.png", "Make the burger look spicy."],
19
+ ["assets/4.png", "Change the color of the jacket to white."],
20
+ ]
21
+
22
+ model_id = "timbrooks/instruct-pix2pix"
23
+ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
24
+ pipe.to("cuda")
25
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
26
+
27
+ DESCRIPTION = """
28
+ """
29
+ if not torch.cuda.is_available():
30
+ DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
31
+
32
+ MAX_SEED = np.iinfo(np.int32).max
33
+ CACHE_EXAMPLES = False
34
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
35
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
36
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
37
+
38
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
39
+
40
+ def save_image(img):
41
+ unique_name = str(uuid.uuid4()) + ".png"
42
+ img.save(unique_name)
43
+ return unique_name
44
+
45
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
46
+ if randomize_seed:
47
+ seed = random.randint(0, MAX_SEED)
48
+ return seed
49
+
50
+ @spaces.GPU
51
+ def img2img_generate(
52
+ prompt: str,
53
+ init_image: gr.Image,
54
+ negative_prompt: str = "",
55
+ use_negative_prompt: bool = False,
56
+ seed: int = 0,
57
+ guidance_scale: float = 7,
58
+ randomize_seed: bool = False,
59
+ num_inference_steps=30,
60
+ strength: float = 0.8,
61
+ NUM_IMAGES_PER_PROMPT=1,
62
+ use_resolution_binning: bool = True,
63
+ progress=gr.Progress(track_tqdm=True),
64
+ ):
65
+ pipe.to(device)
66
+ seed = int(randomize_seed_fn(seed, randomize_seed))
67
+ generator = torch.Generator().manual_seed(seed)
68
+
69
+ if not use_negative_prompt:
70
+ negative_prompt = None # type: ignore
71
+
72
+ init_image = init_image.resize((768, 768))
73
+
74
+
75
+ output = pipe(
76
+ prompt=prompt,
77
+ image=init_image,
78
+ negative_prompt=negative_prompt,
79
+ guidance_scale=guidance_scale,
80
+ num_inference_steps=num_inference_steps,
81
+ generator=generator,
82
+ strength=strength,
83
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
84
+ output_type="pil",
85
+ ).images
86
+
87
+ return output
88
+
89
+ css = '''
90
+ .gradio-container{max-width: 800px !important}
91
+ h1{text-align:center}
92
+ '''
93
+ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
94
+ gr.Markdown(DESCRIPTION)
95
+ with gr.Group():
96
+ with gr.Row(equal_height=True):
97
+ with gr.Column(scale=1):
98
+ img2img_prompt = gr.Text(
99
+ label="Instruct",
100
+ show_label=False,
101
+ max_lines=1,
102
+ placeholder="Enter your prompt",
103
+ container=False,
104
+ )
105
+ init_image = gr.Image(label="Image", type="pil")
106
+ with gr.Row():
107
+ img2img_run_button = gr.Button("Generate", variant="primary")
108
+ with gr.Column(scale=1):
109
+ img2img_output = gr.Gallery(label="Result", elem_id="gallery")
110
+ with gr.Accordion("Advanced options", open=False, visible=False):
111
+ with gr.Row():
112
+ img2img_use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
113
+ img2img_negative_prompt = gr.Text(
114
+ label="Negative prompt",
115
+ max_lines=1,
116
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
117
+ visible=True,
118
+ )
119
+ img2img_seed = gr.Slider(
120
+ label="Seed",
121
+ minimum=0,
122
+ maximum=MAX_SEED,
123
+ step=1,
124
+ value=0,
125
+ )
126
+ img2img_steps = gr.Slider(
127
+ label="Steps",
128
+ minimum=0,
129
+ maximum=60,
130
+ step=1,
131
+ value=25,
132
+ )
133
+ img2img_number_image = gr.Slider(
134
+ label="No.of.Images",
135
+ minimum=1,
136
+ maximum=4,
137
+ step=1,
138
+ value=1,
139
+ )
140
+ img2img_randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
141
+ with gr.Row():
142
+ img2img_guidance_scale = gr.Slider(
143
+ label="Guidance Scale",
144
+ minimum=0.1,
145
+ maximum=10,
146
+ step=0.1,
147
+ value=5.0,
148
+ )
149
+ strength = gr.Slider(label="Confidence", minimum=0.0, maximum=1.0, step=0.01, value=0.8)
150
+
151
+ gr.Examples(
152
+ examples=examples,
153
+ inputs=[init_image, img2img_prompt],
154
+ outputs=img2img_output,
155
+ fn=img2img_generate,
156
+ cache_examples=CACHE_EXAMPLES,
157
+ )
158
+
159
+ img2img_use_negative_prompt.change(
160
+ fn=lambda x: gr.update(visible=x),
161
+ inputs=img2img_use_negative_prompt,
162
+ outputs=img2img_negative_prompt,
163
+ api_name=False,
164
+ )
165
+
166
+ gr.on(
167
+ triggers=[
168
+ img2img_prompt.submit,
169
+ img2img_negative_prompt.submit,
170
+ img2img_run_button.click,
171
+ ],
172
+ fn=img2img_generate,
173
+ inputs=[
174
+ img2img_prompt,
175
+ init_image,
176
+ img2img_negative_prompt,
177
+ img2img_use_negative_prompt,
178
+ img2img_seed,
179
+ img2img_guidance_scale,
180
+ img2img_randomize_seed,
181
+ img2img_steps,
182
+ strength,
183
+ img2img_number_image,
184
+ ],
185
+ outputs=[img2img_output],
186
+ api_name="img-to-img",
187
+ )
188
+
189
+ if __name__ == "__main__":
190
+ demo.queue().launch(show_api=False, debug=False#!/usr/bin/env python
191
+ #patch 0.01
192
+ import os
193
+ import random
194
+ import uuid
195
+ import gradio as gr
196
+ import numpy as np
197
+ from PIL import Image
198
+ import spaces
199
+ import torch
200
+ from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
201
+
202
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
203
+
204
+ examples = [
205
+ ["assets/1.png", "Change the picture to black and white."],
206
+ ["assets/2.png", "Add the chocolate topping to the ice cream."],
207
+ ["assets/3.png", "Make the burger look spicy."],
208
+ ["assets/4.png", "Change the color of the jacket to white."],
209
+ ]
210
+
211
+ model_id = "timbrooks/instruct-pix2pix"
212
+ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
213
+ pipe.to("cuda")
214
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
215
+
216
+ DESCRIPTION = """
217
+ """
218
+ if not torch.cuda.is_available():
219
+ DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
220
+
221
+ MAX_SEED = np.iinfo(np.int32).max
222
+ CACHE_EXAMPLES = False
223
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
224
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
225
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
226
+
227
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
228
+
229
+ def save_image(img):
230
+ unique_name = str(uuid.uuid4()) + ".png"
231
+ img.save(unique_name)
232
+ return unique_name
233
+
234
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
235
+ if randomize_seed:
236
+ seed = random.randint(0, MAX_SEED)
237
+ return seed
238
+
239
+ @spaces.GPU
240
+ def img2img_generate(
241
+ prompt: str,
242
+ init_image: gr.Image,
243
+ negative_prompt: str = "",
244
+ use_negative_prompt: bool = False,
245
+ seed: int = 0,
246
+ guidance_scale: float = 7,
247
+ randomize_seed: bool = False,
248
+ num_inference_steps=30,
249
+ strength: float = 0.8,
250
+ NUM_IMAGES_PER_PROMPT=1,
251
+ use_resolution_binning: bool = True,
252
+ progress=gr.Progress(track_tqdm=True),
253
+ ):
254
+ pipe.to(device)
255
+ seed = int(randomize_seed_fn(seed, randomize_seed))
256
+ generator = torch.Generator().manual_seed(seed)
257
+
258
+ if not use_negative_prompt:
259
+ negative_prompt = None # type: ignore
260
+
261
+ init_image = init_image.resize((768, 768))
262
+
263
+
264
+ output = pipe(
265
+ prompt=prompt,
266
+ image=init_image,
267
+ negative_prompt=negative_prompt,
268
+ guidance_scale=guidance_scale,
269
+ num_inference_steps=num_inference_steps,
270
+ generator=generator,
271
+ strength=strength,
272
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
273
+ output_type="pil",
274
+ ).images
275
+
276
+ return output
277
+
278
+ css = '''
279
+ .gradio-container{max-width: 800px !important}
280
+ h1{text-align:center}
281
+ '''
282
+ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
283
+ gr.Markdown(DESCRIPTION)
284
+ with gr.Group():
285
+ with gr.Row(equal_height=True):
286
+ with gr.Column(scale=1):
287
+ img2img_prompt = gr.Text(
288
+ label="Instruct",
289
+ show_label=False,
290
+ max_lines=1,
291
+ placeholder="Enter your prompt",
292
+ container=False,
293
+ )
294
+ init_image = gr.Image(label="Image", type="pil")
295
+ with gr.Row():
296
+ img2img_run_button = gr.Button("Generate", variant="primary")
297
+ with gr.Column(scale=1):
298
+ img2img_output = gr.Gallery(label="Result", elem_id="gallery")
299
+ with gr.Accordion("Advanced options", open=False, visible=False):
300
+ with gr.Row():
301
+ img2img_use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
302
+ img2img_negative_prompt = gr.Text(
303
+ label="Negative prompt",
304
+ max_lines=1,
305
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
306
+ visible=True,
307
+ )
308
+ img2img_seed = gr.Slider(
309
+ label="Seed",
310
+ minimum=0,
311
+ maximum=MAX_SEED,
312
+ step=1,
313
+ value=0,
314
+ )
315
+ img2img_steps = gr.Slider(
316
+ label="Steps",
317
+ minimum=0,
318
+ maximum=60,
319
+ step=1,
320
+ value=25,
321
+ )
322
+ img2img_number_image = gr.Slider(
323
+ label="No.of.Images",
324
+ minimum=1,
325
+ maximum=4,
326
+ step=1,
327
+ value=1,
328
+ )
329
+ img2img_randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
330
+ with gr.Row():
331
+ img2img_guidance_scale = gr.Slider(
332
+ label="Guidance Scale",
333
+ minimum=0.1,
334
+ maximum=10,
335
+ step=0.1,
336
+ value=5.0,
337
+ )
338
+ strength = gr.Slider(label="Confidence", minimum=0.0, maximum=1.0, step=0.01, value=0.8)
339
+
340
+ gr.Examples(
341
+ examples=examples,
342
+ inputs=[init_image, img2img_prompt],
343
+ outputs=img2img_output,
344
+ fn=img2img_generate,
345
+ cache_examples=CACHE_EXAMPLES,
346
+ )
347
+
348
+ img2img_use_negative_prompt.change(
349
+ fn=lambda x: gr.update(visible=x),
350
+ inputs=img2img_use_negative_prompt,
351
+ outputs=img2img_negative_prompt,
352
+ api_name=False,
353
+ )
354
+
355
+ gr.on(
356
+ triggers=[
357
+ img2img_prompt.submit,
358
+ img2img_negative_prompt.submit,
359
+ img2img_run_button.click,
360
+ ],
361
+ fn=img2img_generate,
362
+ inputs=[
363
+ img2img_prompt,
364
+ init_image,
365
+ img2img_negative_prompt,
366
+ img2img_use_negative_prompt,
367
+ img2img_seed,
368
+ img2img_guidance_scale,
369
+ img2img_randomize_seed,
370
+ img2img_steps,
371
+ strength,
372
+ img2img_number_image,
373
+ ],
374
+ outputs=[img2img_output],
375
+ api_name="img-to-img",
376
+ )
377
+
378
+ if __name__ == "__main__":
379
+ demo.queue().launch(show_api=False, debug=False