zhiweili commited on
Commit
deca47d
·
1 Parent(s): 0577725

modify steps

Browse files
Files changed (3) hide show
  1. app_base.py +8 -7
  2. enhance_utils.py +3 -1
  3. inversion_run_base.py +0 -1
app_base.py CHANGED
@@ -31,13 +31,16 @@ def create_demo() -> gr.Blocks:
31
  start_step: int,
32
  guidance_scale: float,
33
  generate_size: int,
34
- adapter_weights: float,
35
- enhance_face: bool = True,
36
  ):
37
  w2 = 1.0
38
  run_task_time = 0
39
  time_cost_str = ''
40
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
 
 
 
 
41
  run_model = base_run
42
  res_image = run_model(
43
  input_image,
@@ -50,7 +53,6 @@ def create_demo() -> gr.Blocks:
50
  num_steps,
51
  start_step,
52
  guidance_scale,
53
- adapter_weights,
54
  )
55
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
56
  enhanced_image = enhance_image(res_image, enhance_face)
@@ -81,11 +83,10 @@ def create_demo() -> gr.Blocks:
81
  start_step = gr.Slider(minimum=1, maximum=100, value=15, step=1, label="Start Step")
82
  with gr.Accordion("Advanced Options", open=False):
83
  guidance_scale = gr.Slider(minimum=0, maximum=20, value=0, step=0.5, label="Guidance Scale")
84
- generate_size = gr.Number(label="Generate Size", value=1024)
85
  mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
86
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
87
- enhance_face = gr.Checkbox(label="Enhance Face", value=False)
88
- adapter_weights = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Adapter Weights", visible=False)
89
  with gr.Column():
90
  seed = gr.Number(label="Seed", value=8)
91
  w1 = gr.Number(label="W1", value=2)
@@ -109,7 +110,7 @@ def create_demo() -> gr.Blocks:
109
  outputs=[origin_area_image, croper],
110
  ).success(
111
  fn=image_to_image,
112
- inputs=[origin_area_image, input_image_prompt, edit_prompt,seed,w1, num_steps, start_step, guidance_scale, generate_size, adapter_weights, enhance_face],
113
  outputs=[enhanced_image, generated_image, generated_cost],
114
  ).success(
115
  fn=restore_result,
 
31
  start_step: int,
32
  guidance_scale: float,
33
  generate_size: int,
34
+ pre_enhance: bool = True,
 
35
  ):
36
  w2 = 1.0
37
  run_task_time = 0
38
  time_cost_str = ''
39
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
40
+ if pre_enhance:
41
+ input_image = enhance_image(input_image, enhance_face=True)
42
+ input_image = input_image.resize((generate_size, generate_size))
43
+ run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
44
  run_model = base_run
45
  res_image = run_model(
46
  input_image,
 
53
  num_steps,
54
  start_step,
55
  guidance_scale,
 
56
  )
57
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
58
  enhanced_image = enhance_image(res_image, enhance_face)
 
83
  start_step = gr.Slider(minimum=1, maximum=100, value=15, step=1, label="Start Step")
84
  with gr.Accordion("Advanced Options", open=False):
85
  guidance_scale = gr.Slider(minimum=0, maximum=20, value=0, step=0.5, label="Guidance Scale")
86
+ generate_size = gr.Number(label="Generate Size", value=512)
87
  mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
88
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
89
+ pre_enhance = gr.Checkbox(label="Pre Enhance", value=True)
 
90
  with gr.Column():
91
  seed = gr.Number(label="Seed", value=8)
92
  w1 = gr.Number(label="W1", value=2)
 
110
  outputs=[origin_area_image, croper],
111
  ).success(
112
  fn=image_to_image,
113
+ inputs=[origin_area_image, input_image_prompt, edit_prompt,seed,w1, num_steps, start_step, guidance_scale, generate_size, pre_enhance],
114
  outputs=[enhanced_image, generated_image, generated_cost],
115
  ).success(
116
  fn=restore_result,
enhance_utils.py CHANGED
@@ -21,12 +21,14 @@ model_path = 'realesr-general-x4v3.pth'
21
  half = True if torch.cuda.is_available() else False
22
  upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
23
 
24
- face_enhancer = GFPGANer(model_path='GFPGANv1.4.pth', upscale=1, arch='clean', channel_multiplier=2)
25
 
26
  def enhance_image(
27
  pil_image: Image,
28
  enhance_face: bool = True,
 
29
  ):
 
30
  img = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
31
 
32
  h, w = img.shape[0:2]
 
21
  half = True if torch.cuda.is_available() else False
22
  upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
23
 
24
+ face_enhancer = GFPGANer(model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2)
25
 
26
  def enhance_image(
27
  pil_image: Image,
28
  enhance_face: bool = True,
29
+ scale: int = 2,
30
  ):
31
+ face_enhancer.upscale = scale
32
  img = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
33
 
34
  h, w = img.shape[0:2]
inversion_run_base.py CHANGED
@@ -79,7 +79,6 @@ def run(
79
  num_steps:int,
80
  start_step:int,
81
  guidance_scale:float,
82
- adapter_weights:float,
83
  ):
84
  generator = torch.Generator().manual_seed(seed)
85
 
 
79
  num_steps:int,
80
  start_step:int,
81
  guidance_scale:float,
 
82
  ):
83
  generator = torch.Generator().manual_seed(seed)
84