zhiweili commited on
Commit
81d6134
1 Parent(s): f0a547a

change to img2img

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. app_haircolor.py +11 -20
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from app_haircolor_inpaint import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
 
1
  import gradio as gr
2
 
3
+ from app_haircolor import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
app_haircolor.py CHANGED
@@ -24,7 +24,7 @@ from controlnet_aux import (
24
  BASE_MODEL = "stabilityai/sdxl-turbo"
25
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
26
 
27
- DEFAULT_EDIT_PROMPT = "a woman, blue hair, high detailed"
28
  DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
29
 
30
  DEFAULT_CATEGORY = "hair"
@@ -53,12 +53,7 @@ adapters = MultiAdapter(
53
  "TencentARC/t2i-adapter-canny-sdxl-1.0",
54
  torch_dtype=torch.float16,
55
  varient="fp16",
56
- ),
57
- T2IAdapter.from_pretrained(
58
- "TencentARC/t2i-adapter-sketch-sdxl-1.0",
59
- torch_dtype=torch.float16,
60
- varient="fp16",
61
- ),
62
  ]
63
  )
64
  adapters = adapters.to(torch.float16)
@@ -86,20 +81,17 @@ def image_to_image(
86
  generate_size: int,
87
  lineart_scale: float = 1.0,
88
  canny_scale: float = 0.5,
89
- sketch_scale:float = 0.5,
90
  ):
91
  run_task_time = 0
92
  time_cost_str = ''
93
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
94
- lineart_image = lineart_detector(input_image, 384, generate_size)
95
- run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
96
- canny_image = canndy_detector(input_image, 384, generate_size)
97
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
98
- sketch_image = pidinet_detector(input_image, 512, generate_size)
99
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
100
 
101
- cond_image = [lineart_image, canny_image, sketch_image]
102
- cond_scale = [lineart_scale, canny_scale, sketch_scale]
103
 
104
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
105
  generated_image = basepipeline(
@@ -136,18 +128,17 @@ def create_demo() -> gr.Blocks:
136
  with gr.Row():
137
  with gr.Column():
138
  edit_prompt = gr.Textbox(lines=1, label="Edit Prompt", value=DEFAULT_EDIT_PROMPT)
139
- generate_size = gr.Number(label="Generate Size", value=512)
140
  seed = gr.Number(label="Seed", value=8)
141
  category = gr.Textbox(label="Category", value=DEFAULT_CATEGORY, visible=False)
142
  with gr.Column():
143
- num_steps = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Num Steps")
144
- guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
145
- mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
146
  with gr.Column():
147
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
148
  lineart_scale = gr.Slider(minimum=0, maximum=5, value=1, step=0.1, label="Lineart Scale")
149
  canny_scale = gr.Slider(minimum=0, maximum=5, value=0.7, step=0.1, label="Canny Scale")
150
- sketch_scale = gr.Slider(minimum=0, maximum=5, value=1, step=0.1, label="Sketch Scale")
151
  g_btn = gr.Button("Edit Image")
152
 
153
  with gr.Row():
@@ -166,7 +157,7 @@ def create_demo() -> gr.Blocks:
166
  outputs=[origin_area_image, croper],
167
  ).success(
168
  fn=image_to_image,
169
- inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, generate_size, lineart_scale, canny_scale, sketch_scale],
170
  outputs=[generated_image, generated_cost],
171
  ).success(
172
  fn=restore_result,
 
24
  BASE_MODEL = "stabilityai/sdxl-turbo"
25
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
26
 
27
+ DEFAULT_EDIT_PROMPT = "blue hair"
28
  DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
29
 
30
  DEFAULT_CATEGORY = "hair"
 
53
  "TencentARC/t2i-adapter-canny-sdxl-1.0",
54
  torch_dtype=torch.float16,
55
  varient="fp16",
56
+ )
 
 
 
 
 
57
  ]
58
  )
59
  adapters = adapters.to(torch.float16)
 
81
  generate_size: int,
82
  lineart_scale: float = 1.0,
83
  canny_scale: float = 0.5,
 
84
  ):
85
  run_task_time = 0
86
  time_cost_str = ''
87
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
88
+ lineart_image = lineart_detector(input_image, int(generate_size*0.375), generate_size)
 
 
89
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
90
+ canny_image = canndy_detector(input_image, int(generate_size*0.375), generate_size)
91
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
92
 
93
+ cond_image = [lineart_image, canny_image]
94
+ cond_scale = [lineart_scale, canny_scale]
95
 
96
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
97
  generated_image = basepipeline(
 
128
  with gr.Row():
129
  with gr.Column():
130
  edit_prompt = gr.Textbox(lines=1, label="Edit Prompt", value=DEFAULT_EDIT_PROMPT)
131
+ generate_size = gr.Number(label="Generate Size", value=1024)
132
  seed = gr.Number(label="Seed", value=8)
133
  category = gr.Textbox(label="Category", value=DEFAULT_CATEGORY, visible=False)
134
  with gr.Column():
135
+ num_steps = gr.Slider(minimum=1, maximum=100, value=5, step=1, label="Num Steps")
136
+ guidance_scale = gr.Slider(minimum=0, maximum=30, value=2.5, step=0.5, label="Guidance Scale")
137
+ mask_expansion = gr.Number(label="Mask Expansion", value=20, visible=True)
138
  with gr.Column():
139
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
140
  lineart_scale = gr.Slider(minimum=0, maximum=5, value=1, step=0.1, label="Lineart Scale")
141
  canny_scale = gr.Slider(minimum=0, maximum=5, value=0.7, step=0.1, label="Canny Scale")
 
142
  g_btn = gr.Button("Edit Image")
143
 
144
  with gr.Row():
 
157
  outputs=[origin_area_image, croper],
158
  ).success(
159
  fn=image_to_image,
160
+ inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, generate_size, lineart_scale, canny_scale],
161
  outputs=[generated_image, generated_cost],
162
  ).success(
163
  fn=restore_result,