youssefKadaouiAbbassi commited on
Commit
1253198
·
verified ·
1 Parent(s): 8bf6961

revert: app.py

Browse files
Files changed (1) hide show
  1. app.py +599 -73
app.py CHANGED
@@ -1,125 +1,651 @@
1
- import os
2
- import torch
3
  import cv2
 
 
4
  import numpy as np
5
- from insightface.app import FaceAnalysis
6
- from diffusers import ControlNetModel, StableDiffusionXLInstantIDPipeline
 
 
 
 
 
 
7
  from diffusers.utils import load_image
8
- from torchvision.transforms import Compose
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  from depth_anything.dpt import DepthAnything
10
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
11
 
12
- # Suppress ONNX Runtime CPU thread affinity warnings
13
- os.environ["ORT_DISABLE_CPU_AFFINITY"] = "1"
14
-
15
- # Ensure CUDA provider is available for ONNX
16
- import onnxruntime as ort
17
- print("Available ONNX Runtime Providers:", ort.get_available_providers())
18
 
19
- # Global variables
 
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
- dtype = torch.float16 if device == "cuda" else torch.float32
 
 
 
 
 
 
22
 
23
- # Configure FaceAnalysis with GPU support
 
 
 
 
 
 
 
 
24
  app = FaceAnalysis(
25
  name="antelopev2",
26
  root="./",
27
- providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
28
  )
29
  app.prepare(ctx_id=0, det_size=(640, 640))
30
 
31
- # Initialize DepthAnything for depth map generation
32
- depth_anything = DepthAnything.from_pretrained("LiheYoung/depth_anything_vitl14").to(device).eval()
 
33
 
34
  transform = Compose([
35
- Resize(width=518, height=518, resize_target=False, keep_aspect_ratio=True, ensure_multiple_of=14),
 
 
 
 
 
 
 
 
36
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
37
  PrepareForNet(),
38
  ])
39
 
40
- # Load ControlNet models
41
- controlnet_identitynet = ControlNetModel.from_pretrained("./checkpoints/ControlNetModel", torch_dtype=dtype)
42
- controlnet_canny = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=dtype).to(device)
43
- controlnet_depth = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0-small", torch_dtype=dtype).to(device)
44
 
45
- # Load main pipeline
46
- pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
47
- "wangqixun/YamerMIX_v8",
48
- controlnet=[controlnet_identitynet],
49
- torch_dtype=dtype,
50
- safety_checker=None,
51
- feature_extractor=None,
52
- ).to(device)
53
 
54
- pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(pipe.scheduler.config)
55
- pipe.load_ip_adapter_instantid("./checkpoints/ip-adapter.bin")
56
- pipe.cuda()
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # Utility functions
59
  def get_depth_map(image):
 
60
  image = np.array(image) / 255.0
 
61
  h, w = image.shape[:2]
 
62
  image = transform({'image': image})['image']
63
- image = torch.from_numpy(image).unsqueeze(0).to(device)
 
64
  with torch.no_grad():
65
  depth = depth_anything(image)
66
- depth = torch.nn.functional.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
67
- depth = ((depth - depth.min()) / (depth.max() - depth.min()) * 255.0).cpu().numpy().astype(np.uint8)
68
- return Image.fromarray(depth)
 
 
 
 
 
 
69
 
70
  def get_canny_image(image, t1=100, t2=200):
71
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
72
  edges = cv2.Canny(image, t1, t2)
73
  return Image.fromarray(edges, "L")
74
 
75
- # Map for controlnet preprocessing
76
  controlnet_map = {
 
77
  "canny": controlnet_canny,
78
  "depth": controlnet_depth,
79
  }
80
-
81
  controlnet_map_fn = {
 
82
  "canny": get_canny_image,
83
  "depth": get_depth_map,
84
  }
85
 
86
- # Generate image function
87
- def generate_image(face_image_path, controlnet_selection, prompt, negative_prompt, num_steps, guidance_scale, seed):
88
- face_image = load_image(face_image_path).resize((1024, 1024))
89
- face_info = app.get(np.array(face_image))
90
- if not face_info:
91
- raise ValueError("No face detected in the image!")
92
-
93
- control_images = []
94
- for control_type in controlnet_selection:
95
- if control_type in controlnet_map_fn:
96
- control_images.append(controlnet_map_fn[control_type](face_image))
97
-
98
- pipe.controlnet = [controlnet_identitynet] + [controlnet_map[control_type] for control_type in controlnet_selection]
99
- generator = torch.manual_seed(seed)
 
 
 
100
 
101
- output = pipe(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  prompt=prompt,
103
  negative_prompt=negative_prompt,
 
104
  image=control_images,
105
- controlnet_conditioning_scale=1.0,
106
- guidance_scale=guidance_scale,
107
  num_inference_steps=num_steps,
 
 
 
108
  generator=generator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  )
110
- return output.images[0]
111
-
112
- # Example usage
113
- if __name__ == "__main__":
114
- face_image_path = "./examples/yann-lecun_resize.jpg"
115
- controlnet_selection = ["canny", "depth"]
116
- prompt = "A person in vibrant colors"
117
- negative_prompt = "(low quality, blurry)"
118
- num_steps = 30
119
- guidance_scale = 7.5
120
- seed = 42
121
-
122
- output_image = generate_image(
123
- face_image_path, controlnet_selection, prompt, negative_prompt, num_steps, guidance_scale, seed
124
- )
125
- output_image.show()
 
 
 
1
  import cv2
2
+ import torch
3
+ import random
4
  import numpy as np
5
+
6
+ import spaces
7
+
8
+ import PIL
9
+ from PIL import Image
10
+ from typing import Tuple
11
+
12
+ import diffusers
13
  from diffusers.utils import load_image
14
+ from diffusers.models import ControlNetModel
15
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
16
+
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ from insightface.app import FaceAnalysis
20
+
21
+ from style_template import styles
22
+ from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
+
24
+ # from controlnet_aux import OpenposeDetector
25
+
26
+ import gradio as gr
27
+
28
  from depth_anything.dpt import DepthAnything
29
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
30
 
31
+ import torch.nn.functional as F
32
+ from torchvision.transforms import Compose
 
 
 
 
33
 
34
+ # global variable
35
+ MAX_SEED = np.iinfo(np.int32).max
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
38
+ STYLE_NAMES = list(styles.keys())
39
+ DEFAULT_STYLE_NAME = "Spring Festival"
40
+ enable_lcm_arg = False
41
+
42
+ # download checkpoints
43
+ from huggingface_hub import hf_hub_download
44
 
45
+ hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
46
+ hf_hub_download(
47
+ repo_id="InstantX/InstantID",
48
+ filename="ControlNetModel/diffusion_pytorch_model.safetensors",
49
+ local_dir="./checkpoints",
50
+ )
51
+ hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
52
+
53
+ # Load face encoder
54
  app = FaceAnalysis(
55
  name="antelopev2",
56
  root="./",
57
+ providers=["CPUExecutionProvider"],
58
  )
59
  app.prepare(ctx_id=0, det_size=(640, 640))
60
 
61
+ # openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
62
+
63
+ depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
64
 
65
  transform = Compose([
66
+ Resize(
67
+ width=518,
68
+ height=518,
69
+ resize_target=False,
70
+ keep_aspect_ratio=True,
71
+ ensure_multiple_of=14,
72
+ resize_method='lower_bound',
73
+ image_interpolation_method=cv2.INTER_CUBIC,
74
+ ),
75
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
76
  PrepareForNet(),
77
  ])
78
 
79
+ # Path to InstantID models
80
+ face_adapter = f"./checkpoints/ip-adapter.bin"
81
+ controlnet_path = f"./checkpoints/ControlNetModel"
 
82
 
83
+ # Load pipeline face ControlNetModel
84
+ controlnet_identitynet = ControlNetModel.from_pretrained(
85
+ controlnet_path, torch_dtype=dtype
86
+ )
 
 
 
 
87
 
88
+ # controlnet-pose/canny/depth
89
+ # controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
90
+ controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
91
+ controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
92
+
93
+ # controlnet_pose = ControlNetModel.from_pretrained(
94
+ # controlnet_pose_model, torch_dtype=dtype
95
+ # ).to(device)
96
+ controlnet_canny = ControlNetModel.from_pretrained(
97
+ controlnet_canny_model, torch_dtype=dtype
98
+ ).to(device)
99
+ controlnet_depth = ControlNetModel.from_pretrained(
100
+ controlnet_depth_model, torch_dtype=dtype
101
+ ).to(device)
102
 
 
103
  def get_depth_map(image):
104
+
105
  image = np.array(image) / 255.0
106
+
107
  h, w = image.shape[:2]
108
+
109
  image = transform({'image': image})['image']
110
+ image = torch.from_numpy(image).unsqueeze(0).to("cuda")
111
+
112
  with torch.no_grad():
113
  depth = depth_anything(image)
114
+
115
+ depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
116
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
117
+
118
+ depth = depth.cpu().numpy().astype(np.uint8)
119
+
120
+ depth_image = Image.fromarray(depth)
121
+
122
+ return depth_image
123
 
124
  def get_canny_image(image, t1=100, t2=200):
125
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
126
  edges = cv2.Canny(image, t1, t2)
127
  return Image.fromarray(edges, "L")
128
 
 
129
  controlnet_map = {
130
+ #"pose": controlnet_pose,
131
  "canny": controlnet_canny,
132
  "depth": controlnet_depth,
133
  }
 
134
  controlnet_map_fn = {
135
+ #"pose": openpose,
136
  "canny": get_canny_image,
137
  "depth": get_depth_map,
138
  }
139
 
140
+ pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
141
+
142
+ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
143
+ pretrained_model_name_or_path,
144
+ controlnet=[controlnet_identitynet],
145
+ torch_dtype=dtype,
146
+ safety_checker=None,
147
+ feature_extractor=None,
148
+ ).to(device)
149
+
150
+ pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(
151
+ pipe.scheduler.config
152
+ )
153
+
154
+ # load and disable LCM
155
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
156
+ pipe.disable_lora()
157
 
158
+ pipe.cuda()
159
+ pipe.load_ip_adapter_instantid(face_adapter)
160
+ pipe.image_proj_model.to("cuda")
161
+ pipe.unet.to("cuda")
162
+
163
+ def toggle_lcm_ui(value):
164
+ if value:
165
+ return (
166
+ gr.update(minimum=0, maximum=100, step=1, value=5),
167
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=1.5),
168
+ )
169
+ else:
170
+ return (
171
+ gr.update(minimum=5, maximum=100, step=1, value=30),
172
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=5),
173
+ )
174
+
175
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
176
+ if randomize_seed:
177
+ seed = random.randint(0, MAX_SEED)
178
+ return seed
179
+
180
+ def remove_tips():
181
+ return gr.update(visible=False)
182
+
183
+ def get_example():
184
+ case = [
185
+ [
186
+ "./examples/yann-lecun_resize.jpg",
187
+ None,
188
+ "a man",
189
+ "Spring Festival",
190
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
191
+ ],
192
+ [
193
+ "./examples/musk_resize.jpeg",
194
+ "./examples/poses/pose2.jpg",
195
+ "a man flying in the sky in Mars",
196
+ "Mars",
197
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
198
+ ],
199
+ [
200
+ "./examples/sam_resize.png",
201
+ "./examples/poses/pose4.jpg",
202
+ "a man doing a silly pose wearing a suite",
203
+ "Jungle",
204
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, gree",
205
+ ],
206
+ [
207
+ "./examples/schmidhuber_resize.png",
208
+ "./examples/poses/pose3.jpg",
209
+ "a man sit on a chair",
210
+ "Neon",
211
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
212
+ ],
213
+ [
214
+ "./examples/kaifu_resize.png",
215
+ "./examples/poses/pose.jpg",
216
+ "a man",
217
+ "Vibrant Color",
218
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
219
+ ],
220
+ ]
221
+ return case
222
+
223
+ def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
224
+ return generate_image(
225
+ face_file,
226
+ pose_file,
227
+ prompt,
228
+ negative_prompt,
229
+ style,
230
+ 20, # num_steps
231
+ 0.8, # identitynet_strength_ratio
232
+ 0.8, # adapter_strength_ratio
233
+ #0.4, # pose_strength
234
+ 0.3, # canny_strength
235
+ 0.5, # depth_strength
236
+ ["depth", "canny"], # controlnet_selection
237
+ 5.0, # guidance_scale
238
+ 42, # seed
239
+ "EulerDiscreteScheduler", # scheduler
240
+ False, # enable_LCM
241
+ True, # enable_Face_Region
242
+ )
243
+
244
+ def convert_from_cv2_to_image(img: np.ndarray) -> Image:
245
+ return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
246
+
247
+ def convert_from_image_to_cv2(img: Image) -> np.ndarray:
248
+ return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
249
+
250
+ def resize_img(
251
+ input_image,
252
+ max_side=1280,
253
+ min_side=1024,
254
+ size=None,
255
+ pad_to_max_side=False,
256
+ mode=PIL.Image.BILINEAR,
257
+ base_pixel_number=64,
258
+ ):
259
+ w, h = input_image.size
260
+ if size is not None:
261
+ w_resize_new, h_resize_new = size
262
+ else:
263
+ ratio = min_side / min(h, w)
264
+ w, h = round(ratio * w), round(ratio * h)
265
+ ratio = max_side / max(h, w)
266
+ input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
267
+ w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
268
+ h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
269
+ input_image = input_image.resize([w_resize_new, h_resize_new], mode)
270
+
271
+ if pad_to_max_side:
272
+ res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
273
+ offset_x = (max_side - w_resize_new) // 2
274
+ offset_y = (max_side - h_resize_new) // 2
275
+ res[
276
+ offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
277
+ ] = np.array(input_image)
278
+ input_image = Image.fromarray(res)
279
+ return input_image
280
+
281
+ def apply_style(
282
+ style_name: str, positive: str, negative: str = ""
283
+ ) -> Tuple[str, str]:
284
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
285
+ return p.replace("{prompt}", positive), n + " " + negative
286
+
287
+ @spaces.GPU
288
+ def generate_image(
289
+ face_image_path,
290
+ pose_image_path,
291
+ prompt,
292
+ negative_prompt,
293
+ style_name,
294
+ num_steps,
295
+ identitynet_strength_ratio,
296
+ adapter_strength_ratio,
297
+ #pose_strength,
298
+ canny_strength,
299
+ depth_strength,
300
+ controlnet_selection,
301
+ guidance_scale,
302
+ seed,
303
+ scheduler,
304
+ enable_LCM,
305
+ enhance_face_region,
306
+ progress=gr.Progress(track_tqdm=True),
307
+ ):
308
+
309
+ if enable_LCM:
310
+ pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
311
+ pipe.enable_lora()
312
+ else:
313
+ pipe.disable_lora()
314
+ scheduler_class_name = scheduler.split("-")[0]
315
+
316
+ add_kwargs = {}
317
+ if len(scheduler.split("-")) > 1:
318
+ add_kwargs["use_karras_sigmas"] = True
319
+ if len(scheduler.split("-")) > 2:
320
+ add_kwargs["algorithm_type"] = "sde-dpmsolver++"
321
+ scheduler = getattr(diffusers, scheduler_class_name)
322
+ pipe.scheduler = scheduler.from_config(pipe.scheduler.config, **add_kwargs)
323
+
324
+ if face_image_path is None:
325
+ raise gr.Error(
326
+ f"Cannot find any input face image! Please upload the face image"
327
+ )
328
+
329
+ if prompt is None:
330
+ prompt = "a person"
331
+
332
+ # apply the style template
333
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
334
+
335
+ face_image = load_image(face_image_path)
336
+ face_image = resize_img(face_image, max_side=1024)
337
+ face_image_cv2 = convert_from_image_to_cv2(face_image)
338
+ height, width, _ = face_image_cv2.shape
339
+
340
+ # Extract face features
341
+ face_info = app.get(face_image_cv2)
342
+
343
+ if len(face_info) == 0:
344
+ raise gr.Error(
345
+ f"Unable to detect a face in the image. Please upload a different photo with a clear face."
346
+ )
347
+
348
+ face_info = sorted(
349
+ face_info,
350
+ key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1],
351
+ )[
352
+ -1
353
+ ] # only use the maximum face
354
+ face_emb = face_info["embedding"]
355
+ face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
356
+ img_controlnet = face_image
357
+ if pose_image_path is not None:
358
+ pose_image = load_image(pose_image_path)
359
+ pose_image = resize_img(pose_image, max_side=1024)
360
+ img_controlnet = pose_image
361
+ pose_image_cv2 = convert_from_image_to_cv2(pose_image)
362
+
363
+ face_info = app.get(pose_image_cv2)
364
+
365
+ if len(face_info) == 0:
366
+ raise gr.Error(
367
+ f"Cannot find any face in the reference image! Please upload another person image"
368
+ )
369
+
370
+ face_info = face_info[-1]
371
+ face_kps = draw_kps(pose_image, face_info["kps"])
372
+
373
+ width, height = face_kps.size
374
+
375
+ if enhance_face_region:
376
+ control_mask = np.zeros([height, width, 3])
377
+ x1, y1, x2, y2 = face_info["bbox"]
378
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
379
+ control_mask[y1:y2, x1:x2] = 255
380
+ control_mask = Image.fromarray(control_mask.astype(np.uint8))
381
+ else:
382
+ control_mask = None
383
+
384
+ if len(controlnet_selection) > 0:
385
+ controlnet_scales = {
386
+ #"pose": pose_strength,
387
+ "canny": canny_strength,
388
+ "depth": depth_strength,
389
+ }
390
+ pipe.controlnet = MultiControlNetModel(
391
+ [controlnet_identitynet]
392
+ + [controlnet_map[s] for s in controlnet_selection]
393
+ )
394
+ control_scales = [float(identitynet_strength_ratio)] + [
395
+ controlnet_scales[s] for s in controlnet_selection
396
+ ]
397
+ control_images = [face_kps] + [
398
+ controlnet_map_fn[s](img_controlnet).resize((width, height))
399
+ for s in controlnet_selection
400
+ ]
401
+ else:
402
+ pipe.controlnet = controlnet_identitynet
403
+ control_scales = float(identitynet_strength_ratio)
404
+ control_images = face_kps
405
+
406
+ generator = torch.Generator(device=device).manual_seed(seed)
407
+
408
+ print("Start inference...")
409
+ print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
410
+
411
+ pipe.set_ip_adapter_scale(adapter_strength_ratio)
412
+ images = pipe(
413
  prompt=prompt,
414
  negative_prompt=negative_prompt,
415
+ image_embeds=face_emb,
416
  image=control_images,
417
+ control_mask=control_mask,
418
+ controlnet_conditioning_scale=control_scales,
419
  num_inference_steps=num_steps,
420
+ guidance_scale=guidance_scale,
421
+ height=height,
422
+ width=width,
423
  generator=generator,
424
+ ).images
425
+
426
+ return images[0], gr.update(visible=True)
427
+
428
+ # Description
429
+ title = r"""
430
+ <h1 align="center">InstantID: Zero-shot Identity-Preserving Generation in Seconds</h1>
431
+ """
432
+
433
+ description = r"""
434
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
435
+ We are organizing a Spring Festival event with HuggingFace from 2.7 to 2.25, and you can now generate pictures of Spring Festival costumes. Happy Dragon Year 🐲 ! Share the joy with your family.<br>
436
+ How to use:<br>
437
+ 1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
438
+ 2. (Optional) You can upload another image as a reference for the face pose. If you don't, we will use the first detected face image to extract facial landmarks. If you use a cropped face at step 1, it is recommended to upload it to define a new face pose.
439
+ 3. (Optional) You can select multiple ControlNet models to control the generation process. The default is to use the IdentityNet only. The ControlNet models include pose skeleton, canny, and depth. You can adjust the strength of each ControlNet model to control the generation process.
440
+ 4. Enter a text prompt, as done in normal text-to-image models.
441
+ 5. Click the <b>Submit</b> button to begin customization.
442
+ 6. Share your customized photo with your friends and enjoy! 😊"""
443
+
444
+ article = r"""
445
+ ---
446
+ 📝 **Citation**
447
+ <br>
448
+ If our work is helpful for your research or applications, please cite us via:
449
+ ```bibtex
450
+ @article{wang2024instantid,
451
+ title={InstantID: Zero-shot Identity-Preserving Generation in Seconds},
452
+ author={Wang, Qixun and Bai, Xu and Wang, Haofan and Qin, Zekui and Chen, Anthony},
453
+ journal={arXiv preprint arXiv:2401.07519},
454
+ year={2024}
455
+ }
456
+ ```
457
+ 📧 **Contact**
458
+ <br>
459
+ If you have any questions, please feel free to open an issue or directly reach us out at <b>[email protected]</b>.
460
+ """
461
+
462
+ tips = r"""
463
+ ### Usage tips of InstantID
464
+ 1. If you're not satisfied with the similarity, try increasing the weight of "IdentityNet Strength" and "Adapter Strength."
465
+ 2. If you feel that the saturation is too high, first decrease the Adapter strength. If it remains too high, then decrease the IdentityNet strength.
466
+ 3. If you find that text control is not as expected, decrease Adapter strength.
467
+ 4. If you find that realistic style is not good enough, go for our Github repo and use a more realistic base model.
468
+ """
469
+
470
+ css = """
471
+ .gradio-container {width: 85% !important}
472
+ """
473
+ with gr.Blocks(css=css) as demo:
474
+ # description
475
+ gr.Markdown(title)
476
+ gr.Markdown(description)
477
+
478
+ with gr.Row():
479
+ with gr.Column():
480
+ with gr.Row(equal_height=True):
481
+ # upload face image
482
+ face_file = gr.Image(
483
+ label="Upload a photo of your face", type="filepath"
484
+ )
485
+ # optional: upload a reference pose image
486
+ pose_file = gr.Image(
487
+ label="Upload a reference pose image (Optional)",
488
+ type="filepath",
489
+ )
490
+
491
+ # prompt
492
+ prompt = gr.Textbox(
493
+ label="Prompt",
494
+ info="Give simple prompt is enough to achieve good face fidelity",
495
+ placeholder="A photo of a person",
496
+ value="",
497
+ )
498
+
499
+ submit = gr.Button("Submit", variant="primary")
500
+ enable_LCM = gr.Checkbox(
501
+ label="Enable Fast Inference with LCM", value=enable_lcm_arg,
502
+ info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
503
+ )
504
+ style = gr.Dropdown(
505
+ label="Style template",
506
+ choices=STYLE_NAMES,
507
+ value=DEFAULT_STYLE_NAME,
508
+ )
509
+
510
+ # strength
511
+ identitynet_strength_ratio = gr.Slider(
512
+ label="IdentityNet strength (for fidelity)",
513
+ minimum=0,
514
+ maximum=1.5,
515
+ step=0.05,
516
+ value=0.80,
517
+ )
518
+ adapter_strength_ratio = gr.Slider(
519
+ label="Image adapter strength (for detail)",
520
+ minimum=0,
521
+ maximum=1.5,
522
+ step=0.05,
523
+ value=0.80,
524
+ )
525
+ with gr.Accordion("Controlnet"):
526
+ controlnet_selection = gr.CheckboxGroup(
527
+ ["canny", "depth"], label="Controlnet", value=["depth"],
528
+ info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
529
+ )
530
+ # pose_strength = gr.Slider(
531
+ # label="Pose strength",
532
+ # minimum=0,
533
+ # maximum=1.5,
534
+ # step=0.05,
535
+ # value=0.40,
536
+ # )
537
+ canny_strength = gr.Slider(
538
+ label="Canny strength",
539
+ minimum=0,
540
+ maximum=1.5,
541
+ step=0.05,
542
+ value=0.40,
543
+ )
544
+ depth_strength = gr.Slider(
545
+ label="Depth strength",
546
+ minimum=0,
547
+ maximum=1.5,
548
+ step=0.05,
549
+ value=0.40,
550
+ )
551
+ with gr.Accordion(open=False, label="Advanced Options"):
552
+ negative_prompt = gr.Textbox(
553
+ label="Negative Prompt",
554
+ placeholder="low quality",
555
+ value="(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
556
+ )
557
+ num_steps = gr.Slider(
558
+ label="Number of sample steps",
559
+ minimum=1,
560
+ maximum=100,
561
+ step=1,
562
+ value=5 if enable_lcm_arg else 30,
563
+ )
564
+ guidance_scale = gr.Slider(
565
+ label="Guidance scale",
566
+ minimum=0.1,
567
+ maximum=20.0,
568
+ step=0.1,
569
+ value=0.0 if enable_lcm_arg else 5.0,
570
+ )
571
+ seed = gr.Slider(
572
+ label="Seed",
573
+ minimum=0,
574
+ maximum=MAX_SEED,
575
+ step=1,
576
+ value=42,
577
+ )
578
+ schedulers = [
579
+ "DEISMultistepScheduler",
580
+ "HeunDiscreteScheduler",
581
+ "EulerDiscreteScheduler",
582
+ "DPMSolverMultistepScheduler",
583
+ "DPMSolverMultistepScheduler-Karras",
584
+ "DPMSolverMultistepScheduler-Karras-SDE",
585
+ ]
586
+ scheduler = gr.Dropdown(
587
+ label="Schedulers",
588
+ choices=schedulers,
589
+ value="EulerDiscreteScheduler",
590
+ )
591
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
592
+ enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
593
+
594
+ with gr.Column(scale=1):
595
+ gallery = gr.Image(label="Generated Images")
596
+ usage_tips = gr.Markdown(
597
+ label="InstantID Usage Tips", value=tips, visible=False
598
+ )
599
+
600
+ submit.click(
601
+ fn=remove_tips,
602
+ outputs=usage_tips,
603
+ ).then(
604
+ fn=randomize_seed_fn,
605
+ inputs=[seed, randomize_seed],
606
+ outputs=seed,
607
+ queue=False,
608
+ api_name=False,
609
+ ).then(
610
+ fn=generate_image,
611
+ inputs=[
612
+ face_file,
613
+ pose_file,
614
+ prompt,
615
+ negative_prompt,
616
+ style,
617
+ num_steps,
618
+ identitynet_strength_ratio,
619
+ adapter_strength_ratio,
620
+ #pose_strength,
621
+ canny_strength,
622
+ depth_strength,
623
+ controlnet_selection,
624
+ guidance_scale,
625
+ seed,
626
+ scheduler,
627
+ enable_LCM,
628
+ enhance_face_region,
629
+ ],
630
+ outputs=[gallery, usage_tips],
631
+ )
632
+
633
+ enable_LCM.input(
634
+ fn=toggle_lcm_ui,
635
+ inputs=[enable_LCM],
636
+ outputs=[num_steps, guidance_scale],
637
+ queue=False,
638
+ )
639
+
640
+ gr.Examples(
641
+ examples=get_example(),
642
+ inputs=[face_file, pose_file, prompt, style, negative_prompt],
643
+ fn=run_for_examples,
644
+ outputs=[gallery, usage_tips],
645
+ cache_examples=True,
646
  )
647
+
648
+ gr.Markdown(article)
649
+
650
+ demo.queue(api_open=False)
651
+ demo.launch()