zhiweili commited on
Commit
c823534
1 Parent(s): 9afe403

change to app_haircolor_inpaint_15

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. app_haircolor_inpaint_15.py +6 -45
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from app_haircolor_inpaint_adapter_15 import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
 
1
  import gradio as gr
2
 
3
+ from app_haircolor_inpaint_15 import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
app_haircolor_inpaint_15.py CHANGED
@@ -10,22 +10,12 @@ from segment_utils import(
10
  restore_result,
11
  )
12
  from diffusers import (
13
- StableDiffusionControlNetInpaintPipeline,
14
- ControlNetModel,
15
- DDIMScheduler,
16
- DPMSolverMultistepScheduler,
17
  EulerAncestralDiscreteScheduler,
18
  )
19
 
20
- from controlnet_aux import (
21
- CannyDetector,
22
- LineartDetector,
23
- PidiNetDetector,
24
- HEDdetector,
25
- )
26
-
27
- BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
28
- # BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-inpainting"
29
  # BASE_MODEL = "SG161222/Realistic_Vision_V2.0"
30
 
31
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@@ -35,34 +25,12 @@ DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res,
35
 
36
  DEFAULT_CATEGORY = "hair"
37
 
38
- canny_detector = CannyDetector()
39
- lineart_detector = LineartDetector.from_pretrained("lllyasviel/Annotators")
40
- lineart_detector = lineart_detector.to(DEVICE)
41
-
42
- pidiNet_detector = PidiNetDetector.from_pretrained('lllyasviel/Annotators')
43
- pidiNet_detector = pidiNet_detector.to(DEVICE)
44
-
45
- hed_detector = HEDdetector.from_pretrained('lllyasviel/Annotators')
46
- hed_detector = hed_detector.to(DEVICE)
47
-
48
- controlnet = [
49
- ControlNetModel.from_pretrained(
50
- "lllyasviel/control_v11e_sd15_ip2p",
51
- torch_dtype=torch.float16,
52
- ),
53
- ControlNetModel.from_pretrained(
54
- "lllyasviel/control_v11p_sd15_lineart",
55
- torch_dtype=torch.float16,
56
- ),
57
- ]
58
-
59
- basepipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
60
  BASE_MODEL,
61
  torch_dtype=torch.float16,
62
- use_safetensors=True,
63
- controlnet=controlnet,
64
  )
65
- # basepipeline.scheduler = DDIMScheduler.from_config(basepipeline.scheduler.config)
66
  basepipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(basepipeline.scheduler.config)
67
 
68
  basepipeline = basepipeline.to(DEVICE)
@@ -84,11 +52,6 @@ def image_to_image(
84
  run_task_time = 0
85
  time_cost_str = ''
86
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
87
- # canny_image = canny_detector(input_image, int(generate_size*1), generate_size)
88
- lineart_image = lineart_detector(input_image, 384, generate_size)
89
- run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
90
- # pidiNet_image = pidiNet_detector(input_image, 512, generate_size)
91
- control_image = [lineart_image, input_image]
92
 
93
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
94
  generated_image = basepipeline(
@@ -97,12 +60,10 @@ def image_to_image(
97
  negative_prompt=DEFAULT_NEGATIVE_PROMPT,
98
  image=input_image,
99
  mask_image=mask_image,
100
- control_image=control_image,
101
  height=generate_size,
102
  width=generate_size,
103
  guidance_scale=guidance_scale,
104
  num_inference_steps=num_steps,
105
- controlnet_conditioning_scale=[cond_scale1, cond_scale2],
106
  ).images[0]
107
 
108
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
 
10
  restore_result,
11
  )
12
  from diffusers import (
13
+ StableDiffusionInpaintPipeline,
 
 
 
14
  EulerAncestralDiscreteScheduler,
15
  )
16
 
17
+ # BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
18
+ BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-inpainting"
 
 
 
 
 
 
 
19
  # BASE_MODEL = "SG161222/Realistic_Vision_V2.0"
20
 
21
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
25
 
26
  DEFAULT_CATEGORY = "hair"
27
 
28
+ basepipeline = StableDiffusionInpaintPipeline.from_pretrained(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  BASE_MODEL,
30
  torch_dtype=torch.float16,
31
+ # use_safetensors=True,
 
32
  )
33
+
34
  basepipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(basepipeline.scheduler.config)
35
 
36
  basepipeline = basepipeline.to(DEVICE)
 
52
  run_task_time = 0
53
  time_cost_str = ''
54
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
 
 
 
 
 
55
 
56
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
57
  generated_image = basepipeline(
 
60
  negative_prompt=DEFAULT_NEGATIVE_PROMPT,
61
  image=input_image,
62
  mask_image=mask_image,
 
63
  height=generate_size,
64
  width=generate_size,
65
  guidance_scale=guidance_scale,
66
  num_inference_steps=num_steps,
 
67
  ).images[0]
68
 
69
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)