Upload folder using huggingface_hub
Browse files- inference.py +1 -3
- inference2.py +0 -2
- internals/pipelines/controlnets.py +3 -2
- internals/pipelines/replace_background.py +14 -32
- internals/util/image.py +0 -10
inference.py
CHANGED
@@ -490,8 +490,6 @@ def replace_bg(task: Task):
|
|
490 |
width=task.get_width(),
|
491 |
height=task.get_height(),
|
492 |
steps=task.get_steps(),
|
493 |
-
extend_object=task.rbg_extend_object(),
|
494 |
-
product_scale_width=task.get_image_scale(),
|
495 |
apply_high_res=task.get_high_res_fix(),
|
496 |
conditioning_scale=task.rbg_controlnet_conditioning_scale(),
|
497 |
model_type=task.get_modelType(),
|
@@ -525,7 +523,7 @@ def load_model_by_task(task: Task):
|
|
525 |
inpainter.load()
|
526 |
safety_checker.apply(inpainter)
|
527 |
elif task.get_type() == TaskType.REPLACE_BG:
|
528 |
-
replace_background.load(
|
529 |
else:
|
530 |
if task.get_type() == TaskType.TILE_UPSCALE:
|
531 |
controlnet.load_model("tile_upscaler")
|
|
|
490 |
width=task.get_width(),
|
491 |
height=task.get_height(),
|
492 |
steps=task.get_steps(),
|
|
|
|
|
493 |
apply_high_res=task.get_high_res_fix(),
|
494 |
conditioning_scale=task.rbg_controlnet_conditioning_scale(),
|
495 |
model_type=task.get_modelType(),
|
|
|
523 |
inpainter.load()
|
524 |
safety_checker.apply(inpainter)
|
525 |
elif task.get_type() == TaskType.REPLACE_BG:
|
526 |
+
replace_background.load(base=text2img_pipe, high_res=high_res)
|
527 |
else:
|
528 |
if task.get_type() == TaskType.TILE_UPSCALE:
|
529 |
controlnet.load_model("tile_upscaler")
|
inference2.py
CHANGED
@@ -177,8 +177,6 @@ def replace_bg(task: Task):
|
|
177 |
width=task.get_width(),
|
178 |
height=task.get_height(),
|
179 |
steps=task.get_steps(),
|
180 |
-
extend_object=task.rbg_extend_object(),
|
181 |
-
product_scale_width=task.get_image_scale(),
|
182 |
conditioning_scale=task.rbg_controlnet_conditioning_scale(),
|
183 |
model_type=task.get_modelType(),
|
184 |
)
|
|
|
177 |
width=task.get_width(),
|
178 |
height=task.get_height(),
|
179 |
steps=task.get_steps(),
|
|
|
|
|
180 |
conditioning_scale=task.rbg_controlnet_conditioning_scale(),
|
181 |
model_type=task.get_modelType(),
|
182 |
)
|
internals/pipelines/controlnets.py
CHANGED
@@ -165,7 +165,7 @@ class ControlNet(AbstractPipeline):
|
|
165 |
torch.manual_seed(seed)
|
166 |
|
167 |
init_image = download_image(imageUrl).resize((width, height))
|
168 |
-
init_image =
|
169 |
|
170 |
kwargs = {
|
171 |
"prompt": prompt,
|
@@ -361,7 +361,8 @@ class ControlNet(AbstractPipeline):
|
|
361 |
depth = Image.fromarray(depth)
|
362 |
return depth
|
363 |
|
364 |
-
|
|
|
365 |
image_array = np.array(image)
|
366 |
|
367 |
low_threshold = 100
|
|
|
165 |
torch.manual_seed(seed)
|
166 |
|
167 |
init_image = download_image(imageUrl).resize((width, height))
|
168 |
+
init_image = ControlNet.canny_detect_edge(init_image)
|
169 |
|
170 |
kwargs = {
|
171 |
"prompt": prompt,
|
|
|
361 |
depth = Image.fromarray(depth)
|
362 |
return depth
|
363 |
|
364 |
+
@staticmethod
|
365 |
+
def canny_detect_edge(image: Image.Image) -> Image.Image:
|
366 |
image_array = np.array(image)
|
367 |
|
368 |
low_threshold = 100
|
internals/pipelines/replace_background.py
CHANGED
@@ -7,6 +7,7 @@ from diffusers import (
|
|
7 |
ControlNetModel,
|
8 |
StableDiffusionControlNetInpaintPipeline,
|
9 |
StableDiffusionInpaintPipeline,
|
|
|
10 |
UniPCMultistepScheduler,
|
11 |
)
|
12 |
from PIL import Image, ImageFilter, ImageOps
|
@@ -36,25 +37,24 @@ class ReplaceBackground(AbstractPipeline):
|
|
36 |
self,
|
37 |
upscaler: Optional[Upscaler] = None,
|
38 |
remove_background: Optional[RemoveBackgroundV2] = None,
|
39 |
-
|
40 |
high_res: Optional[HighRes] = None,
|
41 |
):
|
42 |
if self.__loaded:
|
43 |
return
|
44 |
controlnet_model = ControlNetModel.from_pretrained(
|
45 |
-
"lllyasviel/
|
46 |
torch_dtype=torch.float16,
|
47 |
cache_dir=get_hf_cache_dir(),
|
48 |
).to("cuda")
|
49 |
-
if
|
50 |
-
|
51 |
-
|
52 |
-
**inpainter.pipe.components,
|
53 |
controlnet=controlnet_model,
|
54 |
)
|
55 |
else:
|
56 |
-
pipe =
|
57 |
-
|
58 |
controlnet=controlnet_model,
|
59 |
torch_dtype=torch.float16,
|
60 |
cache_dir=get_hf_cache_dir(),
|
@@ -88,46 +88,32 @@ class ReplaceBackground(AbstractPipeline):
|
|
88 |
image: Union[str, Image.Image],
|
89 |
width: int,
|
90 |
height: int,
|
91 |
-
product_scale_width: float,
|
92 |
prompt: List[str],
|
93 |
negative_prompt: List[str],
|
94 |
-
extend_object: bool,
|
95 |
conditioning_scale: float,
|
96 |
seed: int,
|
97 |
steps: int,
|
98 |
apply_high_res: bool = False,
|
99 |
model_type: ModelType = ModelType.REAL,
|
100 |
):
|
101 |
-
# image = Image.open("original.png")
|
102 |
if type(image) is str:
|
103 |
image = download_image(image)
|
104 |
|
105 |
torch.manual_seed(seed)
|
106 |
torch.cuda.manual_seed(seed)
|
107 |
|
108 |
-
image = image.convert("RGB")
|
109 |
-
if max(image.size) > 1024:
|
110 |
-
image = ImageUtil.resize_image(image, dimension=1024)
|
111 |
-
image = self.remove_background.remove(image, model_type=model_type)
|
112 |
-
|
113 |
width = int(width)
|
114 |
height = int(height)
|
115 |
|
116 |
-
|
117 |
-
n_height = int(n_width * height // width)
|
118 |
-
|
119 |
-
print(width, height, n_width, n_height)
|
120 |
|
121 |
-
|
122 |
-
if extend_object:
|
123 |
-
condition_image = ControlNet.linearart_condition_image(image)
|
124 |
-
condition_image = ImageUtil.resize_image(condition_image, resolution)
|
125 |
-
condition_image = ImageUtil.padd_image(condition_image, width, height)
|
126 |
-
condition_image = condition_image.convert("RGB")
|
127 |
|
128 |
image = ImageUtil.resize_image(image, resolution)
|
129 |
image = ImageUtil.padd_image(image, width, height)
|
130 |
|
|
|
|
|
131 |
mask = image.copy()
|
132 |
pixdata = mask.load()
|
133 |
|
@@ -140,19 +126,15 @@ class ReplaceBackground(AbstractPipeline):
|
|
140 |
else:
|
141 |
pixdata[x, y] = (0, 0, 0, 255)
|
142 |
|
143 |
-
|
144 |
-
condition_image = ControlNet.linearart_condition_image(image)
|
145 |
mask = mask.convert("RGB")
|
146 |
|
147 |
result = self.pipe.__call__(
|
148 |
prompt=prompt,
|
149 |
negative_prompt=negative_prompt,
|
150 |
-
image=
|
151 |
-
mask_image=mask,
|
152 |
-
control_image=condition_image,
|
153 |
controlnet_conditioning_scale=conditioning_scale,
|
154 |
guidance_scale=9,
|
155 |
-
strength=1,
|
156 |
height=height,
|
157 |
num_inference_steps=steps,
|
158 |
width=width,
|
|
|
7 |
ControlNetModel,
|
8 |
StableDiffusionControlNetInpaintPipeline,
|
9 |
StableDiffusionInpaintPipeline,
|
10 |
+
StableDiffusionControlNetPipeline,
|
11 |
UniPCMultistepScheduler,
|
12 |
)
|
13 |
from PIL import Image, ImageFilter, ImageOps
|
|
|
37 |
self,
|
38 |
upscaler: Optional[Upscaler] = None,
|
39 |
remove_background: Optional[RemoveBackgroundV2] = None,
|
40 |
+
base: Optional[AbstractPipeline] = None,
|
41 |
high_res: Optional[HighRes] = None,
|
42 |
):
|
43 |
if self.__loaded:
|
44 |
return
|
45 |
controlnet_model = ControlNetModel.from_pretrained(
|
46 |
+
"lllyasviel/control_v11p_sd15_canny",
|
47 |
torch_dtype=torch.float16,
|
48 |
cache_dir=get_hf_cache_dir(),
|
49 |
).to("cuda")
|
50 |
+
if base:
|
51 |
+
pipe = StableDiffusionControlNetPipeline(
|
52 |
+
**base.pipe.components,
|
|
|
53 |
controlnet=controlnet_model,
|
54 |
)
|
55 |
else:
|
56 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
57 |
+
get_model_dir(),
|
58 |
controlnet=controlnet_model,
|
59 |
torch_dtype=torch.float16,
|
60 |
cache_dir=get_hf_cache_dir(),
|
|
|
88 |
image: Union[str, Image.Image],
|
89 |
width: int,
|
90 |
height: int,
|
|
|
91 |
prompt: List[str],
|
92 |
negative_prompt: List[str],
|
|
|
93 |
conditioning_scale: float,
|
94 |
seed: int,
|
95 |
steps: int,
|
96 |
apply_high_res: bool = False,
|
97 |
model_type: ModelType = ModelType.REAL,
|
98 |
):
|
|
|
99 |
if type(image) is str:
|
100 |
image = download_image(image)
|
101 |
|
102 |
torch.manual_seed(seed)
|
103 |
torch.cuda.manual_seed(seed)
|
104 |
|
|
|
|
|
|
|
|
|
|
|
105 |
width = int(width)
|
106 |
height = int(height)
|
107 |
|
108 |
+
resolution = max(width, height)
|
|
|
|
|
|
|
109 |
|
110 |
+
image = image.convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
image = ImageUtil.resize_image(image, resolution)
|
113 |
image = ImageUtil.padd_image(image, width, height)
|
114 |
|
115 |
+
image = self.remove_background.remove(image, model_type=model_type)
|
116 |
+
|
117 |
mask = image.copy()
|
118 |
pixdata = mask.load()
|
119 |
|
|
|
126 |
else:
|
127 |
pixdata[x, y] = (0, 0, 0, 255)
|
128 |
|
129 |
+
condition_image = ControlNet.canny_detect_edge(image)
|
|
|
130 |
mask = mask.convert("RGB")
|
131 |
|
132 |
result = self.pipe.__call__(
|
133 |
prompt=prompt,
|
134 |
negative_prompt=negative_prompt,
|
135 |
+
image=condition_image,
|
|
|
|
|
136 |
controlnet_conditioning_scale=conditioning_scale,
|
137 |
guidance_scale=9,
|
|
|
138 |
height=height,
|
139 |
num_inference_steps=steps,
|
140 |
width=width,
|
internals/util/image.py
CHANGED
@@ -45,16 +45,6 @@ def from_bytes(data: bytes) -> Image.Image:
|
|
45 |
|
46 |
def padd_image(image: Image.Image, to_width: int, to_height: int) -> Image.Image:
|
47 |
iw, ih = image.size
|
48 |
-
|
49 |
-
value = min(to_width, to_height)
|
50 |
-
# resize Image
|
51 |
-
if iw > ih:
|
52 |
-
image = image.resize((value, int(value * ih / iw)))
|
53 |
-
elif ih > iw:
|
54 |
-
image = image.resize((int(value * iw / ih), value))
|
55 |
-
|
56 |
-
# padd Image
|
57 |
-
iw, ih = image.size
|
58 |
img = Image.new("RGBA", (to_width, to_height), (0, 0, 0, 0))
|
59 |
img.paste(image, ((to_width - iw) // 2, (to_height - ih) // 2))
|
60 |
return img
|
|
|
45 |
|
46 |
def padd_image(image: Image.Image, to_width: int, to_height: int) -> Image.Image:
|
47 |
iw, ih = image.size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
img = Image.new("RGBA", (to_width, to_height), (0, 0, 0, 0))
|
49 |
img.paste(image, ((to_width - iw) // 2, (to_height - ih) // 2))
|
50 |
return img
|