Upload folder using huggingface_hub
Browse files- inference.py +8 -2
- internals/pipelines/controlnets.py +26 -11
- internals/pipelines/replace_background.py +5 -4
- internals/util/slack.py +2 -0
inference.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
|
|
2 |
from typing import List, Optional
|
3 |
|
4 |
-
import traceback
|
5 |
import torch
|
6 |
|
7 |
import internals.util.prompt as prompt_util
|
@@ -305,9 +305,14 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
305 |
else:
|
306 |
poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
|
307 |
|
|
|
|
|
|
|
|
|
|
|
308 |
kwargs = {
|
309 |
"prompt": prompt,
|
310 |
-
"image": poses,
|
311 |
"seed": task.get_seed(),
|
312 |
"num_inference_steps": task.get_steps(),
|
313 |
"negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
|
@@ -331,6 +336,7 @@ def pose(task: Task, s3_outkey: str = "_pose", poses: Optional[list] = None):
|
|
331 |
images, _ = high_res.apply(**kwargs)
|
332 |
|
333 |
upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
|
|
|
334 |
|
335 |
generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
|
336 |
|
|
|
1 |
import os
|
2 |
+
import traceback
|
3 |
from typing import List, Optional
|
4 |
|
|
|
5 |
import torch
|
6 |
|
7 |
import internals.util.prompt as prompt_util
|
|
|
305 |
else:
|
306 |
poses = [controlnet.detect_pose(task.get_imageUrl())] * num_return_sequences
|
307 |
|
308 |
+
depth = download_image(task.get_auxilary_imageUrl()).resize(
|
309 |
+
(task.get_width(), task.get_height())
|
310 |
+
)
|
311 |
+
depth = ControlNet.depth_image(depth)
|
312 |
+
|
313 |
kwargs = {
|
314 |
"prompt": prompt,
|
315 |
+
"image": [depth, poses[0]],
|
316 |
"seed": task.get_seed(),
|
317 |
"num_inference_steps": task.get_steps(),
|
318 |
"negative_prompt": [task.get_negative_prompt()] * num_return_sequences,
|
|
|
336 |
images, _ = high_res.apply(**kwargs)
|
337 |
|
338 |
upload_image(poses[0], "crecoAI/{}_pose.png".format(task.get_taskId()))
|
339 |
+
upload_image(depth, "crecoAI/{}_depth.png".format(task.get_taskId()))
|
340 |
|
341 |
generated_image_urls = upload_images(images, s3_outkey, task.get_taskId())
|
342 |
|
internals/pipelines/controlnets.py
CHANGED
@@ -2,20 +2,20 @@ from typing import List, Literal, Union
|
|
2 |
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
-
from pydash import has
|
6 |
import torch
|
7 |
from controlnet_aux import HEDdetector, LineartDetector, OpenposeDetector
|
8 |
from diffusers import (
|
9 |
ControlNetModel,
|
10 |
DiffusionPipeline,
|
11 |
StableDiffusionControlNetPipeline,
|
12 |
-
UniPCMultistepScheduler,
|
13 |
StableDiffusionXLControlNetPipeline,
|
|
|
14 |
)
|
15 |
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import (
|
16 |
MultiControlNetModel,
|
17 |
)
|
18 |
from PIL import Image
|
|
|
19 |
from torch.nn import Linear
|
20 |
from tqdm import gui
|
21 |
from transformers import pipeline
|
@@ -32,11 +32,10 @@ from internals.util.commons import download_image
|
|
32 |
from internals.util.config import (
|
33 |
get_hf_cache_dir,
|
34 |
get_hf_token,
|
35 |
-
get_model_dir,
|
36 |
get_is_sdxl,
|
|
|
37 |
)
|
38 |
|
39 |
-
|
40 |
CONTROLNET_TYPES = Literal["pose", "canny", "scribble", "linearart", "tile_upscaler"]
|
41 |
|
42 |
|
@@ -60,11 +59,25 @@ class ControlNet(AbstractPipeline):
|
|
60 |
task_name = model # pyright: ignore
|
61 |
model = config[task_name]
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
self.__current_task_name = task_name
|
69 |
self.controlnet = controlnet
|
70 |
|
@@ -202,15 +215,17 @@ class ControlNet(AbstractPipeline):
|
|
202 |
|
203 |
kwargs = {
|
204 |
"prompt": prompt[0],
|
205 |
-
"image":
|
206 |
"num_images_per_prompt": 4,
|
207 |
"num_inference_steps": num_inference_steps,
|
208 |
"negative_prompt": negative_prompt[0],
|
209 |
"guidance_scale": guidance_scale,
|
|
|
210 |
"height": height,
|
211 |
"width": width,
|
212 |
**kwargs,
|
213 |
}
|
|
|
214 |
result = self.pipe2.__call__(**kwargs)
|
215 |
return Result.from_result(result)
|
216 |
|
@@ -386,7 +401,7 @@ class ControlNet(AbstractPipeline):
|
|
386 |
return img
|
387 |
|
388 |
__model_normal = {
|
389 |
-
"pose": "lllyasviel/control_v11p_sd15_openpose",
|
390 |
"canny": "lllyasviel/control_v11p_sd15_canny",
|
391 |
"linearart": "lllyasviel/control_v11p_sd15_lineart",
|
392 |
"scribble": "lllyasviel/control_v11p_sd15_scribble",
|
|
|
2 |
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
5 |
import torch
|
6 |
from controlnet_aux import HEDdetector, LineartDetector, OpenposeDetector
|
7 |
from diffusers import (
|
8 |
ControlNetModel,
|
9 |
DiffusionPipeline,
|
10 |
StableDiffusionControlNetPipeline,
|
|
|
11 |
StableDiffusionXLControlNetPipeline,
|
12 |
+
UniPCMultistepScheduler,
|
13 |
)
|
14 |
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import (
|
15 |
MultiControlNetModel,
|
16 |
)
|
17 |
from PIL import Image
|
18 |
+
from pydash import has
|
19 |
from torch.nn import Linear
|
20 |
from tqdm import gui
|
21 |
from transformers import pipeline
|
|
|
32 |
from internals.util.config import (
|
33 |
get_hf_cache_dir,
|
34 |
get_hf_token,
|
|
|
35 |
get_is_sdxl,
|
36 |
+
get_model_dir,
|
37 |
)
|
38 |
|
|
|
39 |
CONTROLNET_TYPES = Literal["pose", "canny", "scribble", "linearart", "tile_upscaler"]
|
40 |
|
41 |
|
|
|
59 |
task_name = model # pyright: ignore
|
60 |
model = config[task_name]
|
61 |
|
62 |
+
# Multi controlnet
|
63 |
+
if "," in model:
|
64 |
+
model_names = [m.strip() for m in model.split(",")]
|
65 |
+
controlnets = []
|
66 |
+
for name in model_names:
|
67 |
+
cn = ControlNetModel.from_pretrained(
|
68 |
+
name,
|
69 |
+
torch_dtype=torch.float16,
|
70 |
+
cache_dir=get_hf_cache_dir(),
|
71 |
+
).to("cuda")
|
72 |
+
controlnets.append(cn)
|
73 |
+
controlnet = MultiControlNetModel(controlnets).to("cuda")
|
74 |
+
# Single controlnet
|
75 |
+
else:
|
76 |
+
controlnet = ControlNetModel.from_pretrained(
|
77 |
+
model,
|
78 |
+
torch_dtype=torch.float16,
|
79 |
+
cache_dir=get_hf_cache_dir(),
|
80 |
+
).to("cuda")
|
81 |
self.__current_task_name = task_name
|
82 |
self.controlnet = controlnet
|
83 |
|
|
|
215 |
|
216 |
kwargs = {
|
217 |
"prompt": prompt[0],
|
218 |
+
"image": image,
|
219 |
"num_images_per_prompt": 4,
|
220 |
"num_inference_steps": num_inference_steps,
|
221 |
"negative_prompt": negative_prompt[0],
|
222 |
"guidance_scale": guidance_scale,
|
223 |
+
"control_guidance_end": [0.5, 1.0],
|
224 |
"height": height,
|
225 |
"width": width,
|
226 |
**kwargs,
|
227 |
}
|
228 |
+
print(kwargs)
|
229 |
result = self.pipe2.__call__(**kwargs)
|
230 |
return Result.from_result(result)
|
231 |
|
|
|
401 |
return img
|
402 |
|
403 |
__model_normal = {
|
404 |
+
"pose": "lllyasviel/control_v11f1p_sd15_depth, lllyasviel/control_v11p_sd15_openpose",
|
405 |
"canny": "lllyasviel/control_v11p_sd15_canny",
|
406 |
"linearart": "lllyasviel/control_v11p_sd15_lineart",
|
407 |
"scribble": "lllyasviel/control_v11p_sd15_scribble",
|
internals/pipelines/replace_background.py
CHANGED
@@ -102,18 +102,19 @@ class ReplaceBackground(AbstractPipeline):
|
|
102 |
torch.manual_seed(seed)
|
103 |
torch.cuda.manual_seed(seed)
|
104 |
|
|
|
|
|
|
|
|
|
|
|
105 |
width = int(width)
|
106 |
height = int(height)
|
107 |
|
108 |
resolution = max(width, height)
|
109 |
|
110 |
-
image = image.convert("RGB")
|
111 |
-
|
112 |
image = ImageUtil.resize_image(image, resolution)
|
113 |
image = ImageUtil.padd_image(image, width, height)
|
114 |
|
115 |
-
image = self.remove_background.remove(image, model_type=model_type)
|
116 |
-
|
117 |
mask = image.copy()
|
118 |
pixdata = mask.load()
|
119 |
|
|
|
102 |
torch.manual_seed(seed)
|
103 |
torch.cuda.manual_seed(seed)
|
104 |
|
105 |
+
image = image.convert("RGB")
|
106 |
+
if max(image.size) > 1024:
|
107 |
+
image = ImageUtil.resize_image(image, dimension=1024)
|
108 |
+
image = self.remove_background.remove(image, model_type=model_type)
|
109 |
+
|
110 |
width = int(width)
|
111 |
height = int(height)
|
112 |
|
113 |
resolution = max(width, height)
|
114 |
|
|
|
|
|
115 |
image = ImageUtil.resize_image(image, resolution)
|
116 |
image = ImageUtil.padd_image(image, width, height)
|
117 |
|
|
|
|
|
118 |
mask = image.copy()
|
119 |
pixdata = mask.load()
|
120 |
|
internals/util/slack.py
CHANGED
@@ -31,6 +31,8 @@ class Slack:
|
|
31 |
for key, value in raw.items():
|
32 |
if value:
|
33 |
if type(value) == list:
|
|
|
|
|
34 |
message += f"*{key}*: {', '.join(value)}\n"
|
35 |
else:
|
36 |
message += f"*{key}*: {value}\n"
|
|
|
31 |
for key, value in raw.items():
|
32 |
if value:
|
33 |
if type(value) == list:
|
34 |
+
if type(value[0]) == float or type(value[0]) == int:
|
35 |
+
value = str(value)
|
36 |
message += f"*{key}*: {', '.join(value)}\n"
|
37 |
else:
|
38 |
message += f"*{key}*: {value}\n"
|