jayparmr commited on
Commit
f70725b
1 Parent(s): a3f5c82

Upload folder using huggingface_hub

Browse files
inference.py CHANGED
@@ -93,29 +93,32 @@ def canny(task: Task):
93
  )
94
  lora_patcher.patch()
95
 
96
- images, has_nsfw = controlnet.process(
97
- prompt=prompt,
98
- imageUrl=task.get_imageUrl(),
99
- seed=task.get_seed(),
100
- steps=task.get_steps(),
101
- width=width,
102
- height=height,
103
- guidance_scale=task.get_cy_guidance_scale(),
104
- negative_prompt=[
105
  f"monochrome, neon, x-ray, negative image, oversaturated, {task.get_negative_prompt()}"
106
  ]
107
  * num_return_sequences,
 
108
  **lora_patcher.kwargs(),
109
- )
 
110
  if task.get_high_res_fix():
111
- images, _ = high_res.apply(
112
- prompt=prompt,
113
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
114
- images=images,
115
- width=task.get_width(),
116
- height=task.get_height(),
117
- steps=task.get_steps(),
118
- )
 
 
119
 
120
  generated_image_urls = upload_images(images, "_canny", task.get_taskId())
121
 
@@ -142,17 +145,18 @@ def tile_upscale(task: Task):
142
  lora_patcher = lora_style.get_patcher(controlnet.pipe, task.get_style())
143
  lora_patcher.patch()
144
 
145
- images, has_nsfw = controlnet.process(
146
- imageUrl=task.get_imageUrl(),
147
- seed=task.get_seed(),
148
- steps=task.get_steps(),
149
- width=task.get_width(),
150
- height=task.get_height(),
151
- prompt=prompt,
152
- resize_dimension=task.get_resize_dimension(),
153
- negative_prompt=task.get_negative_prompt(),
154
- guidance_scale=task.get_ti_guidance_scale(),
155
- )
 
156
 
157
  generated_image_url = upload_image(images[0], output_key)
158
 
@@ -181,24 +185,29 @@ def scribble(task: Task):
181
  )
182
  lora_patcher.patch()
183
 
184
- images, has_nsfw = controlnet.process(
185
- imageUrl=task.get_imageUrl(),
186
- seed=task.get_seed(),
187
- steps=task.get_steps(),
188
- width=width,
189
- height=height,
190
- prompt=prompt,
191
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
192
- )
 
 
 
193
  if task.get_high_res_fix():
194
- images, _ = high_res.apply(
195
- prompt=prompt,
196
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
197
- images=images,
198
- width=task.get_width(),
199
- height=task.get_height(),
200
- steps=task.get_steps(),
201
- )
 
 
202
 
203
  generated_image_urls = upload_images(images, "_scribble", task.get_taskId())
204
 
@@ -227,24 +236,29 @@ def linearart(task: Task):
227
  )
228
  lora_patcher.patch()
229
 
230
- images, has_nsfw = controlnet.process(
231
- imageUrl=task.get_imageUrl(),
232
- seed=task.get_seed(),
233
- steps=task.get_steps(),
234
- width=width,
235
- height=height,
236
- prompt=prompt,
237
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
238
- )
 
 
 
239
  if task.get_high_res_fix():
240
- images, _ = high_res.apply(
241
- prompt=prompt,
242
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
243
- images=images,
244
- width=task.get_width(),
245
- height=task.get_height(),
246
- steps=task.get_steps(),
247
- )
 
 
248
 
249
  generated_image_urls = upload_images(images, "_linearart", task.get_taskId())
250
 
@@ -291,35 +305,32 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
291
  else:
292
  poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
293
 
294
- src_image = download_image(task.get_auxilary_imageUrl()).resize(
295
- (task.get_width(), task.get_height())
296
- )
297
- condition_image = ControlNet.linearart_condition_image(src_image)
298
-
299
- images, has_nsfw = controlnet.process(
300
- prompt=prompt,
301
- image=poses,
302
- condition_image=[condition_image] * num_return_sequences,
303
- seed=task.get_seed(),
304
- steps=task.get_steps(),
305
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
306
- width=width,
307
- height=height,
308
- guidance_scale=task.get_po_guidance_scale(),
309
  **lora_patcher.kwargs(),
310
- )
 
 
311
  if task.get_high_res_fix():
312
- images, _ = high_res.apply(
313
- prompt=prompt,
314
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
315
- images=images,
316
- width=task.get_width(),
317
- height=task.get_height(),
318
- steps=task.get_steps(),
319
- )
 
 
320
 
321
  upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
322
- upload_image(condition_image, "crecoAI/{}_condition.png".format(task.get_taskId()))
323
 
324
  generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
325
 
@@ -348,25 +359,28 @@ def text2img(task: Task):
348
 
349
  torch.manual_seed(task.get_seed())
350
 
351
- images, has_nsfw = text2img_pipe.process(
352
- params=params,
353
- num_inference_steps=task.get_steps(),
354
- guidance_scale=7.5,
355
- height=height,
356
- width=width,
357
- negative_prompt=task.get_negative_prompt(),
358
- iteration=task.get_iteration(),
359
  **lora_patcher.kwargs(),
360
- )
 
 
361
  if task.get_high_res_fix():
362
- images, _ = high_res.apply(
363
- prompt=params.prompt if params.prompt else [""] * num_return_sequences,
364
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
365
- images=images,
366
- width=task.get_width(),
367
- height=task.get_height(),
368
- steps=task.get_steps(),
369
- )
 
 
370
 
371
  generated_image_urls = upload_images(images, "", task.get_taskId())
372
 
@@ -394,26 +408,29 @@ def img2img(task: Task):
394
 
395
  torch.manual_seed(task.get_seed())
396
 
397
- images, has_nsfw = img2img_pipe.process(
398
- prompt=prompt,
399
- imageUrl=task.get_imageUrl(),
400
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
401
- steps=task.get_steps(),
402
- width=width,
403
- height=height,
404
- strength=task.get_i2i_strength(),
405
- guidance_scale=task.get_i2i_guidance_scale(),
406
  **lora_patcher.kwargs(),
407
- )
 
 
408
  if task.get_high_res_fix():
409
- images, _ = high_res.apply(
410
- prompt=prompt,
411
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
412
- images=images,
413
- width=task.get_width(),
414
- height=task.get_height(),
415
- steps=task.get_steps(),
416
- )
 
 
417
 
418
  generated_image_urls = upload_images(images, "_imgtoimg", task.get_taskId())
419
 
@@ -433,15 +450,18 @@ def inpaint(task: Task):
433
 
434
  print({"prompts": prompt})
435
 
436
- images = inpainter.process(
437
- prompt=prompt,
438
- image_url=task.get_imageUrl(),
439
- mask_image_url=task.get_maskImageUrl(),
440
- width=task.get_width(),
441
- height=task.get_height(),
442
- seed=task.get_seed(),
443
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
444
- )
 
 
 
445
 
446
  generated_image_urls = upload_images(images, "_inpaint", task.get_taskId())
447
 
 
93
  )
94
  lora_patcher.patch()
95
 
96
+ kwargs = {
97
+ "prompt": prompt,
98
+ "imageUrl": task.get_imageUrl(),
99
+ "seed": task.get_seed(),
100
+ "num_inference_steps": task.get_steps(),
101
+ "width": width,
102
+ "height": height,
103
+ "negative_prompt": [
 
104
  f"monochrome, neon, x-ray, negative image, oversaturated, {task.get_negative_prompt()}"
105
  ]
106
  * num_return_sequences,
107
+ **task.cnc_kwargs(),
108
  **lora_patcher.kwargs(),
109
+ }
110
+ images, has_nsfw = controlnet.process(**kwargs)
111
  if task.get_high_res_fix():
112
+ kwargs = {
113
+ "prompt": prompt,
114
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
115
+ "images": images,
116
+ "width": task.get_width(),
117
+ "height": task.get_height(),
118
+ "num_inference_steps": task.get_steps(),
119
+ **task.high_res_kwargs(),
120
+ }
121
+ images, _ = high_res.apply(**kwargs)
122
 
123
  generated_image_urls = upload_images(images, "_canny", task.get_taskId())
124
 
 
145
  lora_patcher = lora_style.get_patcher(controlnet.pipe, task.get_style())
146
  lora_patcher.patch()
147
 
148
+ kwargs = {
149
+ "imageUrl": task.get_imageUrl(),
150
+ "seed": task.get_seed(),
151
+ "num_inference_steps": task.get_steps(),
152
+ "negative_prompt": task.get_negative_prompt(),
153
+ "width": task.get_width(),
154
+ "height": task.get_height(),
155
+ "prompt": prompt,
156
+ "resize_dimension": task.get_resize_dimension(),
157
+ **task.cnt_kwargs(),
158
+ }
159
+ images, has_nsfw = controlnet.process(**kwargs)
160
 
161
  generated_image_url = upload_image(images[0], output_key)
162
 
 
185
  )
186
  lora_patcher.patch()
187
 
188
+ kwargs = {
189
+ "imageUrl": task.get_imageUrl(),
190
+ "seed": task.get_seed(),
191
+ "num_inference_steps": task.get_steps(),
192
+ "width": width,
193
+ "height": height,
194
+ "prompt": prompt,
195
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
196
+ **task.cns_kwargs(),
197
+ }
198
+ images, has_nsfw = controlnet.process(**kwargs)
199
+
200
  if task.get_high_res_fix():
201
+ kwargs = {
202
+ "prompt": prompt,
203
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
204
+ "images": images,
205
+ "width": task.get_width(),
206
+ "height": task.get_height(),
207
+ "num_inference_steps": task.get_steps(),
208
+ **task.high_res_kwargs(),
209
+ }
210
+ images, _ = high_res.apply(**kwargs)
211
 
212
  generated_image_urls = upload_images(images, "_scribble", task.get_taskId())
213
 
 
236
  )
237
  lora_patcher.patch()
238
 
239
+ kwargs = {
240
+ "imageUrl": task.get_imageUrl(),
241
+ "seed": task.get_seed(),
242
+ "num_inference_steps": task.get_steps(),
243
+ "width": width,
244
+ "height": height,
245
+ "prompt": prompt,
246
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
247
+ **task.cnl_kwargs(),
248
+ }
249
+ images, has_nsfw = controlnet.process(**kwargs)
250
+
251
  if task.get_high_res_fix():
252
+ kwargs = {
253
+ "prompt": prompt,
254
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
255
+ "images": images,
256
+ "width": task.get_width(),
257
+ "height": task.get_height(),
258
+ "num_inference_steps": task.get_steps(),
259
+ **task.high_res_kwargs(),
260
+ }
261
+ images, _ = high_res.apply(**kwargs)
262
 
263
  generated_image_urls = upload_images(images, "_linearart", task.get_taskId())
264
 
 
305
  else:
306
  poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
307
 
308
+ kwargs = {
309
+ "prompt": prompt,
310
+ "image": poses,
311
+ "seed": task.get_seed(),
312
+ "num_inference_steps": task.get_steps(),
313
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
314
+ "width": width,
315
+ "height": height,
316
+ **task.cnp_kwargs(),
 
 
 
 
 
 
317
  **lora_patcher.kwargs(),
318
+ }
319
+ images, has_nsfw = controlnet.process(**kwargs)
320
+
321
  if task.get_high_res_fix():
322
+ kwargs = {
323
+ "prompt": prompt,
324
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
325
+ "images": images,
326
+ "width": task.get_width(),
327
+ "height": task.get_height(),
328
+ "num_inference_steps": task.get_steps(),
329
+ **task.high_res_kwargs(),
330
+ }
331
+ images, _ = high_res.apply(**kwargs)
332
 
333
  upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
 
334
 
335
  generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
336
 
 
359
 
360
  torch.manual_seed(task.get_seed())
361
 
362
+ kwargs = {
363
+ "params": params,
364
+ "num_inference_steps": task.get_steps(),
365
+ "height": height,
366
+ "width": width,
367
+ "negative_prompt": task.get_negative_prompt(),
368
+ **task.t2i_kwargs(),
 
369
  **lora_patcher.kwargs(),
370
+ }
371
+ images, has_nsfw = text2img_pipe.process(**kwargs)
372
+
373
  if task.get_high_res_fix():
374
+ kwargs = {
375
+ "prompt": params.prompt if params.prompt else [""] * num_return_sequences,
376
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
377
+ "images": images,
378
+ "width": task.get_width(),
379
+ "height": task.get_height(),
380
+ "num_inference_steps": task.get_steps(),
381
+ **task.high_res_kwargs(),
382
+ }
383
+ images, _ = high_res.apply(**kwargs)
384
 
385
  generated_image_urls = upload_images(images, "", task.get_taskId())
386
 
 
408
 
409
  torch.manual_seed(task.get_seed())
410
 
411
+ kwargs = {
412
+ "prompt": prompt,
413
+ "imageUrl": task.get_imageUrl(),
414
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
415
+ "num_inference_steps": task.get_steps(),
416
+ "width": width,
417
+ "height": height,
418
+ **task.i2i_kwargs(),
 
419
  **lora_patcher.kwargs(),
420
+ }
421
+ images, has_nsfw = img2img_pipe.process(**kwargs)
422
+
423
  if task.get_high_res_fix():
424
+ kwargs = {
425
+ "prompt": prompt,
426
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
427
+ "images": images,
428
+ "width": task.get_width(),
429
+ "height": task.get_height(),
430
+ "num_inference_steps": task.get_steps(),
431
+ **task.high_res_kwargs(),
432
+ }
433
+ images, _ = high_res.apply(**kwargs)
434
 
435
  generated_image_urls = upload_images(images, "_imgtoimg", task.get_taskId())
436
 
 
450
 
451
  print({"prompts": prompt})
452
 
453
+ kwargs = {
454
+ "prompt": prompt,
455
+ "image_url": task.get_imageUrl(),
456
+ "mask_image_url": task.get_maskImageUrl(),
457
+ "width": task.get_width(),
458
+ "height": task.get_height(),
459
+ "seed": task.get_seed(),
460
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
461
+ "num_inference_steps": task.get_steps(),
462
+ **task.ip_kwargs(),
463
+ }
464
+ images = inpainter.process(**kwargs)
465
 
466
  generated_image_urls = upload_images(images, "_inpaint", task.get_taskId())
467
 
inference2.py CHANGED
@@ -72,17 +72,18 @@ def tile_upscale(task: Task):
72
  lora_patcher = lora_style.get_patcher(controlnet.pipe, task.get_style())
73
  lora_patcher.patch()
74
 
75
- images, has_nsfw = controlnet.process_tile_upscaler(
76
- imageUrl=task.get_imageUrl(),
77
- seed=task.get_seed(),
78
- steps=task.get_steps(),
79
- width=task.get_width(),
80
- height=task.get_height(),
81
- prompt=prompt,
82
- resize_dimension=task.get_resize_dimension(),
83
- negative_prompt=task.get_negative_prompt(),
84
- guidance_scale=task.get_ti_guidance_scale(),
85
- )
 
86
 
87
  generated_image_url = upload_image(images[0], output_key)
88
 
@@ -99,7 +100,6 @@ def tile_upscale(task: Task):
99
  @update_db
100
  @slack.auto_send_alert
101
  def remove_bg(task: Task):
102
- # remove_background = RemoveBackground()
103
  output_image = remove_background_v2.remove(
104
  task.get_imageUrl(), model_type=task.get_modelType()
105
  )
@@ -121,15 +121,18 @@ def inpaint(task: Task):
121
 
122
  print({"prompts": prompt})
123
 
124
- images = inpainter.process(
125
- prompt=prompt,
126
- image_url=task.get_imageUrl(),
127
- mask_image_url=task.get_maskImageUrl(),
128
- width=task.get_width(),
129
- height=task.get_height(),
130
- seed=task.get_seed(),
131
- negative_prompt=[task.get_negative_prompt()] * num_return_sequences,
132
- )
 
 
 
133
 
134
  generated_image_urls = upload_images(images, "_inpaint", task.get_taskId())
135
 
 
72
  lora_patcher = lora_style.get_patcher(controlnet.pipe, task.get_style())
73
  lora_patcher.patch()
74
 
75
+ kwargs = {
76
+ "imageUrl": task.get_imageUrl(),
77
+ "seed": task.get_seed(),
78
+ "num_inference_steps": task.get_steps(),
79
+ "negative_prompt": task.get_negative_prompt(),
80
+ "width": task.get_width(),
81
+ "height": task.get_height(),
82
+ "prompt": prompt,
83
+ "resize_dimension": task.get_resize_dimension(),
84
+ **task.cnt_kwargs(),
85
+ }
86
+ images, has_nsfw = controlnet.process(**kwargs)
87
 
88
  generated_image_url = upload_image(images[0], output_key)
89
 
 
100
  @update_db
101
  @slack.auto_send_alert
102
  def remove_bg(task: Task):
 
103
  output_image = remove_background_v2.remove(
104
  task.get_imageUrl(), model_type=task.get_modelType()
105
  )
 
121
 
122
  print({"prompts": prompt})
123
 
124
+ kwargs = {
125
+ "prompt": prompt,
126
+ "image_url": task.get_imageUrl(),
127
+ "mask_image_url": task.get_maskImageUrl(),
128
+ "width": task.get_width(),
129
+ "height": task.get_height(),
130
+ "seed": task.get_seed(),
131
+ "negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
132
+ "num_inference_steps": task.get_steps(),
133
+ **task.ip_kwargs(),
134
+ }
135
+ images = inpainter.process(**kwargs)
136
 
137
  generated_image_urls = upload_images(images, "_inpaint", task.get_taskId())
138
 
internals/data/task.py CHANGED
@@ -92,7 +92,7 @@ class Task:
92
  return int(self.__data.get("seed", -1))
93
 
94
  def get_steps(self) -> int:
95
- return int(self.__data.get("steps", "75"))
96
 
97
  def get_type(self) -> Union[TaskType, None]:
98
  try:
@@ -127,21 +127,6 @@ class Task:
127
  def get_face_enhance(self) -> bool:
128
  return self.__data.get("up_face_enhance", False)
129
 
130
- def get_ti_guidance_scale(self) -> float:
131
- return self.__data.get("ti_guidance_scale", 7.5)
132
-
133
- def get_i2i_guidance_scale(self) -> float:
134
- return self.__data.get("i2i_guidance_scale", 7.5)
135
-
136
- def get_i2i_strength(self) -> float:
137
- return self.__data.get("i2i_strength", 0.75)
138
-
139
- def get_cy_guidance_scale(self) -> float:
140
- return self.__data.get("cy_guidance_scale", 9)
141
-
142
- def get_po_guidance_scale(self) -> float:
143
- return self.__data.get("po_guidance_scale", 7.5)
144
-
145
  def rbg_controlnet_conditioning_scale(self) -> float:
146
  return self.__data.get("rbg_conditioning_scale", 0.5)
147
 
@@ -166,6 +151,38 @@ class Task:
166
  def get_raw(self) -> dict:
167
  return self.__data.copy()
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  @property
170
  @lru_cache(1)
171
  def PROMPT(self):
 
92
  return int(self.__data.get("seed", -1))
93
 
94
  def get_steps(self) -> int:
95
+ return int(self.__data.get("steps", 30))
96
 
97
  def get_type(self) -> Union[TaskType, None]:
98
  try:
 
127
  def get_face_enhance(self) -> bool:
128
  return self.__data.get("up_face_enhance", False)
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  def rbg_controlnet_conditioning_scale(self) -> float:
131
  return self.__data.get("rbg_conditioning_scale", 0.5)
132
 
 
151
  def get_raw(self) -> dict:
152
  return self.__data.copy()
153
 
154
+ def t2i_kwargs(self) -> dict:
155
+ return dict(self.__get_kwargs("t2i_"))
156
+
157
+ def i2i_kwargs(self) -> dict:
158
+ return dict(self.__get_kwargs("i2i_"))
159
+
160
+ def ip_kwargs(self) -> dict:
161
+ return dict(self.__get_kwargs("ip_"))
162
+
163
+ def cnc_kwargs(self) -> dict:
164
+ return dict(self.__get_kwargs("cnc_"))
165
+
166
+ def cnp_kwargs(self) -> dict:
167
+ return dict(self.__get_kwargs("cnp_"))
168
+
169
+ def cns_kwargs(self) -> dict:
170
+ return dict(self.__get_kwargs("cns_"))
171
+
172
+ def cnl_kwargs(self) -> dict:
173
+ return dict(self.__get_kwargs("cnl_"))
174
+
175
+ def cnt_kwargs(self) -> dict:
176
+ return dict(self.__get_kwargs("cnt_"))
177
+
178
+ def high_res_kwargs(self) -> dict:
179
+ return dict(self.__get_kwargs("hrf_"))
180
+
181
+ def __get_kwargs(self, prefix: str):
182
+ for k, v in self.__data.items():
183
+ if k.startswith(prefix):
184
+ yield k[len(prefix) :], v
185
+
186
  @property
187
  @lru_cache(1)
188
  def PROMPT(self):
internals/pipelines/commons.py CHANGED
@@ -66,46 +66,29 @@ class Text2Img(AbstractPipeline):
66
  def process(
67
  self,
68
  params: Params,
69
- height: Optional[int] = None,
70
- width: Optional[int] = None,
71
- num_inference_steps: int = 50,
72
- guidance_scale: float = 7.5,
73
- negative_prompt: Optional[str] = None,
74
- num_images_per_prompt: int = 1,
75
- eta: float = 0.0,
76
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
77
- latents: Optional[torch.FloatTensor] = None,
78
- prompt_embeds: Optional[torch.FloatTensor] = None,
79
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
80
- output_type: Optional[str] = "pil",
81
- return_dict: bool = True,
82
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
83
- callback_steps: int = 1,
84
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
85
  iteration: float = 3.0,
 
86
  ):
87
  prompt = params.prompt
88
 
89
  if params.prompt_left and params.prompt_right:
90
  # multi-character pipelines
91
  prompt = [params.prompt[0], params.prompt_left[0], params.prompt_right[0]]
92
- result = self.pipe.multi_character_diffusion(
93
- prompt=prompt,
94
- pos=["1:1-0:0", "1:2-0:0", "1:2-0:1"],
95
- mix_val=[0.2, 0.8, 0.8],
96
- height=height,
97
- width=width,
98
- num_inference_steps=num_inference_steps,
99
- guidance_scale=guidance_scale,
100
- negative_prompt=[negative_prompt or ""] * len(prompt),
101
- num_images_per_prompt=num_return_sequences,
102
- eta=eta,
103
- # generator=generator,
104
- output_type=output_type,
105
- return_dict=return_dict,
106
- callback=callback,
107
- callback_steps=callback_steps,
108
- )
109
  else:
110
  # two step pipeline
111
  modified_prompt = params.modified_prompt
@@ -122,25 +105,14 @@ class Text2Img(AbstractPipeline):
122
  "iteration": iteration,
123
  }
124
 
125
- result = self.pipe.__call__(
126
- height=height,
127
- width=width,
128
- num_inference_steps=num_inference_steps,
129
- guidance_scale=guidance_scale,
130
- negative_prompt=[negative_prompt or ""] * num_return_sequences,
131
- num_images_per_prompt=num_images_per_prompt,
132
- eta=eta,
133
- generator=generator,
134
- latents=latents,
135
- prompt_embeds=prompt_embeds,
136
- negative_prompt_embeds=negative_prompt_embeds,
137
- output_type=output_type,
138
- return_dict=return_dict,
139
- callback=callback,
140
- callback_steps=callback_steps,
141
- cross_attention_kwargs=cross_attention_kwargs,
142
- **kwargs
143
- )
144
 
145
  return Result.from_result(result)
146
 
@@ -192,21 +164,24 @@ class Img2Img(AbstractPipeline):
192
  prompt: List[str],
193
  imageUrl: str,
194
  negative_prompt: List[str],
195
- strength: float,
196
- guidance_scale: float,
197
- steps: int,
198
  width: int,
199
  height: int,
 
 
 
200
  ):
201
  image = download_image(imageUrl).resize((width, height))
202
 
203
- result = self.pipe.__call__(
204
- prompt=prompt,
205
- image=image,
206
- strength=strength,
207
- negative_prompt=negative_prompt,
208
- guidance_scale=guidance_scale,
209
- num_images_per_prompt=1,
210
- num_inference_steps=steps,
211
- )
 
 
212
  return Result.from_result(result)
 
66
  def process(
67
  self,
68
  params: Params,
69
+ num_inference_steps: int,
70
+ height: int,
71
+ width: int,
72
+ negative_prompt: str,
 
 
 
 
 
 
 
 
 
 
 
 
73
  iteration: float = 3.0,
74
+ **kwargs,
75
  ):
76
  prompt = params.prompt
77
 
78
  if params.prompt_left and params.prompt_right:
79
  # multi-character pipelines
80
  prompt = [params.prompt[0], params.prompt_left[0], params.prompt_right[0]]
81
+ kwargs = {
82
+ "prompt": prompt,
83
+ "pos": ["1:1-0:0", "1:2-0:0", "1:2-0:1"],
84
+ "mix_val": [0.2, 0.8, 0.8],
85
+ "height": height,
86
+ "width": width,
87
+ "num_inference_steps": num_inference_steps,
88
+ "negative_prompt": [negative_prompt or ""] * len(prompt),
89
+ **kwargs,
90
+ }
91
+ result = self.pipe.multi_character_diffusion(**kwargs)
 
 
 
 
 
 
92
  else:
93
  # two step pipeline
94
  modified_prompt = params.modified_prompt
 
105
  "iteration": iteration,
106
  }
107
 
108
+ kwargs = {
109
+ "height": height,
110
+ "width": width,
111
+ "negative_prompt": [negative_prompt or ""] * num_return_sequences,
112
+ "num_inference_steps": num_inference_steps,
113
+ **kwargs,
114
+ }
115
+ result = self.pipe.__call__(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  return Result.from_result(result)
118
 
 
164
  prompt: List[str],
165
  imageUrl: str,
166
  negative_prompt: List[str],
167
+ num_inference_steps: int,
 
 
168
  width: int,
169
  height: int,
170
+ strength: float = 0.75,
171
+ guidance_scale: float = 7.5,
172
+ **kwargs,
173
  ):
174
  image = download_image(imageUrl).resize((width, height))
175
 
176
+ kwargs = {
177
+ "prompt": prompt,
178
+ "image": image,
179
+ "strength": strength,
180
+ "negative_prompt": negative_prompt,
181
+ "guidance_scale": guidance_scale,
182
+ "num_images_per_prompt": 1,
183
+ "num_inference_steps": num_inference_steps,
184
+ **kwargs,
185
+ }
186
+ result = self.pipe.__call__(**kwargs)
187
  return Result.from_result(result)
internals/pipelines/controlnets.py CHANGED
@@ -57,7 +57,7 @@ class ControlNet(AbstractPipeline):
57
  if not model:
58
  raise Exception(f"ControlNet is not supported for {task_name}")
59
  while model in list(config.keys()):
60
- task_name = config[model] # pyright: ignore
61
  model = config[task_name]
62
 
63
  controlnet = ControlNetModel.from_pretrained(
@@ -152,11 +152,12 @@ class ControlNet(AbstractPipeline):
152
  prompt: List[str],
153
  imageUrl: str,
154
  seed: int,
155
- steps: int,
156
  negative_prompt: List[str],
157
- guidance_scale: float,
158
  height: int,
159
  width: int,
 
 
160
  ):
161
  if self.__current_task_name != "canny":
162
  raise Exception("ControlNet is not loaded with canny model")
@@ -166,16 +167,19 @@ class ControlNet(AbstractPipeline):
166
  init_image = download_image(imageUrl).resize((width, height))
167
  init_image = self.__canny_detect_edge(init_image)
168
 
169
- result = self.pipe2.__call__(
170
- prompt=prompt,
171
- image=init_image,
172
- guidance_scale=guidance_scale,
173
- num_images_per_prompt=1,
174
- negative_prompt=negative_prompt,
175
- num_inference_steps=steps,
176
- height=height,
177
- width=width,
178
- )
 
 
 
179
  return Result.from_result(result)
180
 
181
  @torch.inference_mode()
@@ -183,29 +187,31 @@ class ControlNet(AbstractPipeline):
183
  self,
184
  prompt: List[str],
185
  image: List[Image.Image],
186
- condition_image: List[Image.Image],
187
  seed: int,
188
- steps: int,
189
- guidance_scale: float,
190
  negative_prompt: List[str],
191
  height: int,
192
  width: int,
 
 
193
  ):
194
  if self.__current_task_name != "pose":
195
  raise Exception("ControlNet is not loaded with pose model")
196
 
197
  torch.manual_seed(seed)
198
 
199
- result = self.pipe2.__call__(
200
- prompt=prompt[0],
201
- image=[image[0]],
202
- num_images_per_prompt=4,
203
- num_inference_steps=steps,
204
- negative_prompt=negative_prompt[0],
205
- guidance_scale=guidance_scale,
206
- height=height,
207
- width=width,
208
- )
 
 
209
  return Result.from_result(result)
210
 
211
  @torch.inference_mode()
@@ -214,12 +220,13 @@ class ControlNet(AbstractPipeline):
214
  imageUrl: str,
215
  prompt: str,
216
  negative_prompt: str,
217
- steps: int,
218
  seed: int,
219
  height: int,
220
  width: int,
221
  resize_dimension: int,
222
- guidance_scale: float,
 
223
  ):
224
  if self.__current_task_name != "tile_upscaler":
225
  raise Exception("ControlNet is not loaded with tile_upscaler model")
@@ -231,16 +238,18 @@ class ControlNet(AbstractPipeline):
231
  init_image, resize_dimension
232
  )
233
 
234
- result = self.pipe.__call__(
235
- image=condition_image,
236
- prompt=prompt,
237
- controlnet_conditioning_image=condition_image,
238
- num_inference_steps=steps,
239
- negative_prompt=negative_prompt,
240
- height=condition_image.size[1],
241
- width=condition_image.size[0],
242
- guidance_scale=guidance_scale,
243
- )
 
 
244
  return Result.from_result(result)
245
 
246
  @torch.inference_mode()
@@ -249,11 +258,12 @@ class ControlNet(AbstractPipeline):
249
  imageUrl: Union[str, Image.Image],
250
  prompt: Union[str, List[str]],
251
  negative_prompt: Union[str, List[str]],
252
- steps: int,
253
  seed: int,
254
  height: int,
255
  width: int,
256
  guidance_scale: float = 7.5,
 
257
  ):
258
  if self.__current_task_name != "scribble":
259
  raise Exception("ControlNet is not loaded with scribble model")
@@ -267,15 +277,17 @@ class ControlNet(AbstractPipeline):
267
 
268
  condition_image = self.__scribble_condition_image(init_image)
269
 
270
- result = self.pipe2.__call__(
271
- image=condition_image,
272
- prompt=prompt,
273
- num_inference_steps=steps,
274
- negative_prompt=negative_prompt,
275
- height=height,
276
- width=width,
277
- guidance_scale=guidance_scale,
278
- )
 
 
279
  return Result.from_result(result)
280
 
281
  @torch.inference_mode()
@@ -284,11 +296,12 @@ class ControlNet(AbstractPipeline):
284
  imageUrl: str,
285
  prompt: Union[str, List[str]],
286
  negative_prompt: Union[str, List[str]],
287
- steps: int,
288
  seed: int,
289
  height: int,
290
  width: int,
291
  guidance_scale: float = 7.5,
 
292
  ):
293
  if self.__current_task_name != "linearart":
294
  raise Exception("ControlNet is not loaded with linearart model")
@@ -298,15 +311,17 @@ class ControlNet(AbstractPipeline):
298
  init_image = download_image(imageUrl).resize((width, height))
299
  condition_image = ControlNet.linearart_condition_image(init_image)
300
 
301
- result = self.pipe2.__call__(
302
- image=condition_image,
303
- prompt=prompt,
304
- num_inference_steps=steps,
305
- negative_prompt=negative_prompt,
306
- height=height,
307
- width=width,
308
- guidance_scale=guidance_scale,
309
- )
 
 
310
  return Result.from_result(result)
311
 
312
  def cleanup(self):
 
57
  if not model:
58
  raise Exception(f"ControlNet is not supported for {task_name}")
59
  while model in list(config.keys()):
60
+ task_name = model # pyright: ignore
61
  model = config[task_name]
62
 
63
  controlnet = ControlNetModel.from_pretrained(
 
152
  prompt: List[str],
153
  imageUrl: str,
154
  seed: int,
155
+ num_inference_steps: int,
156
  negative_prompt: List[str],
 
157
  height: int,
158
  width: int,
159
+ guidance_scale: float = 9,
160
+ **kwargs,
161
  ):
162
  if self.__current_task_name != "canny":
163
  raise Exception("ControlNet is not loaded with canny model")
 
167
  init_image = download_image(imageUrl).resize((width, height))
168
  init_image = self.__canny_detect_edge(init_image)
169
 
170
+ kwargs = {
171
+ "prompt": prompt,
172
+ "image": init_image,
173
+ "guidance_scale": guidance_scale,
174
+ "num_images_per_prompt": 1,
175
+ "negative_prompt": negative_prompt,
176
+ "num_inference_steps": num_inference_steps,
177
+ "height": height,
178
+ "width": width,
179
+ **kwargs,
180
+ }
181
+
182
+ result = self.pipe2.__call__(**kwargs)
183
  return Result.from_result(result)
184
 
185
  @torch.inference_mode()
 
187
  self,
188
  prompt: List[str],
189
  image: List[Image.Image],
 
190
  seed: int,
191
+ num_inference_steps: int,
 
192
  negative_prompt: List[str],
193
  height: int,
194
  width: int,
195
+ guidance_scale: float = 7.5,
196
+ **kwargs,
197
  ):
198
  if self.__current_task_name != "pose":
199
  raise Exception("ControlNet is not loaded with pose model")
200
 
201
  torch.manual_seed(seed)
202
 
203
+ kwargs = {
204
+ "prompt": prompt[0],
205
+ "image": [image[0]],
206
+ "num_images_per_prompt": 4,
207
+ "num_inference_steps": num_inference_steps,
208
+ "negative_prompt": negative_prompt[0],
209
+ "guidance_scale": guidance_scale,
210
+ "height": height,
211
+ "width": width,
212
+ **kwargs,
213
+ }
214
+ result = self.pipe2.__call__(**kwargs)
215
  return Result.from_result(result)
216
 
217
  @torch.inference_mode()
 
220
  imageUrl: str,
221
  prompt: str,
222
  negative_prompt: str,
223
+ num_inference_steps: int,
224
  seed: int,
225
  height: int,
226
  width: int,
227
  resize_dimension: int,
228
+ guidance_scale: float = 7.5,
229
+ **kwargs,
230
  ):
231
  if self.__current_task_name != "tile_upscaler":
232
  raise Exception("ControlNet is not loaded with tile_upscaler model")
 
238
  init_image, resize_dimension
239
  )
240
 
241
+ kwargs = {
242
+ "image": condition_image,
243
+ "prompt": prompt,
244
+ "controlnet_conditioning_image": condition_image,
245
+ "num_inference_steps": num_inference_steps,
246
+ "negative_prompt": negative_prompt,
247
+ "height": condition_image.size[1],
248
+ "width": condition_image.size[0],
249
+ "guidance_scale": guidance_scale,
250
+ **kwargs,
251
+ }
252
+ result = self.pipe.__call__(**kwargs)
253
  return Result.from_result(result)
254
 
255
  @torch.inference_mode()
 
258
  imageUrl: Union[str, Image.Image],
259
  prompt: Union[str, List[str]],
260
  negative_prompt: Union[str, List[str]],
261
+ num_inference_steps: int,
262
  seed: int,
263
  height: int,
264
  width: int,
265
  guidance_scale: float = 7.5,
266
+ **kwargs,
267
  ):
268
  if self.__current_task_name != "scribble":
269
  raise Exception("ControlNet is not loaded with scribble model")
 
277
 
278
  condition_image = self.__scribble_condition_image(init_image)
279
 
280
+ kwargs = {
281
+ "image": condition_image,
282
+ "prompt": prompt,
283
+ "num_inference_steps": num_inference_steps,
284
+ "negative_prompt": negative_prompt,
285
+ "height": height,
286
+ "width": width,
287
+ "guidance_scale": guidance_scale,
288
+ **kwargs,
289
+ }
290
+ result = self.pipe2.__call__(**kwargs)
291
  return Result.from_result(result)
292
 
293
  @torch.inference_mode()
 
296
  imageUrl: str,
297
  prompt: Union[str, List[str]],
298
  negative_prompt: Union[str, List[str]],
299
+ num_inference_steps: int,
300
  seed: int,
301
  height: int,
302
  width: int,
303
  guidance_scale: float = 7.5,
304
+ **kwargs,
305
  ):
306
  if self.__current_task_name != "linearart":
307
  raise Exception("ControlNet is not loaded with linearart model")
 
311
  init_image = download_image(imageUrl).resize((width, height))
312
  condition_image = ControlNet.linearart_condition_image(init_image)
313
 
314
+ kwargs = {
315
+ "image": condition_image,
316
+ "prompt": prompt,
317
+ "num_inference_steps": num_inference_steps,
318
+ "negative_prompt": negative_prompt,
319
+ "height": height,
320
+ "width": width,
321
+ "guidance_scale": guidance_scale,
322
+ **kwargs,
323
+ }
324
+ result = self.pipe2.__call__(**kwargs)
325
  return Result.from_result(result)
326
 
327
  def cleanup(self):
internals/pipelines/high_res.py CHANGED
@@ -27,17 +27,22 @@ class HighRes(AbstractPipeline):
27
  images,
28
  width: int,
29
  height: int,
30
- steps: int,
 
 
 
31
  ):
32
  images = [image.resize((width, height)) for image in images]
33
- result = self.pipe.__call__(
34
- prompt=prompt,
35
- image=images,
36
- strength=0.5,
37
- negative_prompt=negative_prompt,
38
- guidance_scale=9,
39
- num_inference_steps=steps,
40
- )
 
 
41
  return Result.from_result(result)
42
 
43
  @staticmethod
 
27
  images,
28
  width: int,
29
  height: int,
30
+ num_inference_steps: int,
31
+ strength: float = 0.5,
32
+ guidance_scale: int = 9,
33
+ **kwargs,
34
  ):
35
  images = [image.resize((width, height)) for image in images]
36
+ kwargs = {
37
+ "prompt": prompt,
38
+ "image": images,
39
+ "strength": strength,
40
+ "negative_prompt": negative_prompt,
41
+ "guidance_scale": guidance_scale,
42
+ "num_inference_steps": num_inference_steps,
43
+ **kwargs,
44
+ }
45
+ result = self.pipe.__call__(**kwargs)
46
  return Result.from_result(result)
47
 
48
  @staticmethod
internals/pipelines/inpainter.py CHANGED
@@ -79,19 +79,22 @@ class InPainter(AbstractPipeline):
79
  seed: int,
80
  prompt: Union[str, List[str]],
81
  negative_prompt: Union[str, List[str]],
82
- steps: int = 50,
 
83
  ):
84
  torch.manual_seed(seed)
85
 
86
  input_img = download_image(image_url).resize((width, height))
87
  mask_img = download_image(mask_image_url).resize((width, height))
88
 
89
- return self.pipe.__call__(
90
- prompt=prompt,
91
- image=input_img,
92
- mask_image=mask_img,
93
- height=height,
94
- width=width,
95
- negative_prompt=negative_prompt,
96
- num_inference_steps=steps,
97
- ).images
 
 
 
79
  seed: int,
80
  prompt: Union[str, List[str]],
81
  negative_prompt: Union[str, List[str]],
82
+ num_inference_steps: int,
83
+ **kwargs,
84
  ):
85
  torch.manual_seed(seed)
86
 
87
  input_img = download_image(image_url).resize((width, height))
88
  mask_img = download_image(mask_image_url).resize((width, height))
89
 
90
+ kwargs = {
91
+ "prompt": prompt,
92
+ "image": input_img,
93
+ "mask_image": mask_img,
94
+ "height": height,
95
+ "width": width,
96
+ "negative_prompt": negative_prompt,
97
+ "num_inference_steps": num_inference_steps,
98
+ **kwargs,
99
+ }
100
+ return self.pipe.__call__(**kwargs).images
internals/pipelines/upscaler.py CHANGED
@@ -148,7 +148,7 @@ class Upscaler:
148
  model=model,
149
  half=False,
150
  gpu_id="0",
151
- tile=0,
152
  tile_pad=10,
153
  pre_pad=0,
154
  )
 
148
  model=model,
149
  half=False,
150
  gpu_id="0",
151
+ tile=320,
152
  tile_pad=10,
153
  pre_pad=0,
154
  )