KenjieDec commited on
Commit
19dcca7
·
verified ·
1 Parent(s): 4a9cd96

Options Fix

Browse files
Files changed (1) hide show
  1. app.py +82 -39
app.py CHANGED
@@ -30,7 +30,8 @@ import __init_paths
30
  from face_enhancement import FaceEnhancement
31
  from face_colorization import FaceColorization
32
  from face_inpainting import FaceInpainting
33
-
 
34
  def brush_stroke_mask(img, color=(255,255,255)):
35
  min_num_vertex = 8
36
  max_num_vertex = 28
@@ -87,49 +88,49 @@ def resize(image, width = 1024):
87
  image = cv2.resize(image, (int(height),int(width)))
88
  return image
89
 
90
- def inference(file, mode):
91
 
92
- im = cv2.imread(file, cv2.IMREAD_COLOR)
93
- im = cv2.resize(im, (0,0), fx=2, fy=2)
94
- faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=False)
95
- img, orig_faces, enhanced_faces = faceenhancer.process(im)
96
- cv2.imwrite(os.path.join("e.png"), img)
97
-
98
 
99
  if mode == "enhance":
100
- return os.path.join("e.png")
 
 
101
  elif mode == "colorize":
102
  model = {'name':'GPEN-1024-Color', 'size':1024}
103
- grayf = cv2.imread("e.png", cv2.IMREAD_GRAYSCALE)
 
 
 
 
 
 
 
 
104
  grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
105
  facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
106
  colorf = facecolorizer.process(grayf)
107
-
108
- colorf = cv2.resize(colorf, (grayf.shape[1], grayf.shape[0]))
109
- cv2.imwrite(os.path.join("output.png"), colorf)
110
- return os.path.join("output.png")
111
  elif mode == "inpainting":
112
- im1 = cv2.imread(file, cv2.IMREAD_COLOR)
113
  im2 = resize(im1, width = 1024)
114
  model = {'name':'GPEN-Inpainting-1024', 'size':1024}
115
  faceinpainter = FaceInpainting(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
116
  im3 = np.asarray(brush_stroke_mask(Image.fromarray(im2)))
117
- inpaint = faceinpainter.process(im3)
118
-
119
- cv2.imwrite(os.path.join("output.png"), inpaint)
120
- return os.path.join("output.png")
121
  elif mode == "selfie":
122
  model = {'name':'GPEN-BFR-2048', 'size':2048}
123
- im = cv2.resize(im, (0,0), fx=2, fy=2)
124
  faceenhancer = FaceEnhancement(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
125
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
126
- cv2.imwrite(os.path.join("output.png"), img)
127
- return os.path.join("output.png")
128
  else:
129
  faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=True)
130
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
131
- cv2.imwrite(os.path.join("output.png"), img)
132
- return os.path.join("output.png")
 
 
133
 
134
 
135
  title = "GPEN"
@@ -138,18 +139,60 @@ description = "Gradio demo for GAN Prior Embedded Network for Blind Face Restora
138
  article = "<p style='text-align: center;'><a href='https://arxiv.org/abs/2105.06070' target='_blank'>GAN Prior Embedded Network for Blind Face Restoration in the Wild</a> | <a href='https://github.com/yangxy/GPEN' target='_blank'>Github Repo</a></p><p style='text-align: center;'><img src='https://img.shields.io/badge/Hugging%20Face-Original%20demo-blue' alt='https://huggingface.co/spaces/akhaliq/GPEN' width='172' height='20' /></p>"
139
 
140
 
141
- gr.Interface(
142
- inference,
143
- [gr.inputs.Image(type="filepath", label="Input"),gr.inputs.Radio(["enhance", "colorize", "inpainting", "selfie", "enhanced+background"], type="value", default="enhance", label="Type")],
144
- gr.outputs.Image(type="filepath", label="Output"),
145
- title=title,
146
- description=description,
147
- article=article,
148
- examples=[
149
- ['enhance.png', 'enhance'],
150
- ['color.png', 'colorize'],
151
- ['inpainting.png', 'inpainting'],
152
- ['selfie.png', 'selfie']
153
- ],
154
- enable_queue=True
155
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  from face_enhancement import FaceEnhancement
31
  from face_colorization import FaceColorization
32
  from face_inpainting import FaceInpainting
33
+ from gradio_imageslider import ImageSlider
34
+
35
  def brush_stroke_mask(img, color=(255,255,255)):
36
  min_num_vertex = 8
37
  max_num_vertex = 28
 
88
  image = cv2.resize(image, (int(height),int(width)))
89
  return image
90
 
91
+ def inference(file, mode, res_percentage, zoom, x_shift, y_shift):
92
 
93
+ im = cv2.resize(file, None, fx = (res_percentage/100), fy = (res_percentage/100))
 
 
 
 
 
94
 
95
  if mode == "enhance":
96
+ faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=False)
97
+ img, orig_faces, enhanced_faces = faceenhancer.process(im)
98
+
99
  elif mode == "colorize":
100
  model = {'name':'GPEN-1024-Color', 'size':1024}
101
+ if len(im.shape) == 3:
102
+ if im.shape[2] == 1:
103
+ grayf = im[:, :, 0]
104
+ elif im.shape[2] == 3:
105
+ grayf = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
106
+ elif im.shape[2] == 4:
107
+ grayf = cv2.cvtColor(im, cv2.COLOR_BGRA2GRAY)
108
+ grayf = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
109
+
110
  grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
111
  facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
112
  colorf = facecolorizer.process(grayf)
113
+ img = cv2.resize(colorf, (grayf.shape[1], grayf.shape[0]))
114
+
 
 
115
  elif mode == "inpainting":
 
116
  im2 = resize(im1, width = 1024)
117
  model = {'name':'GPEN-Inpainting-1024', 'size':1024}
118
  faceinpainter = FaceInpainting(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
119
  im3 = np.asarray(brush_stroke_mask(Image.fromarray(im2)))
120
+ img = faceinpainter.process(im3)
121
+
 
 
122
  elif mode == "selfie":
123
  model = {'name':'GPEN-BFR-2048', 'size':2048}
 
124
  faceenhancer = FaceEnhancement(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
125
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
126
+
 
127
  else:
128
  faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=True)
129
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
130
+
131
+ (in_img, out_img) = zoom_image(zoom, x_shift, y_shift, im, img)
132
+
133
+ return img, (in_img, out_img)
134
 
135
 
136
  title = "GPEN"
 
139
  article = "<p style='text-align: center;'><a href='https://arxiv.org/abs/2105.06070' target='_blank'>GAN Prior Embedded Network for Blind Face Restoration in the Wild</a> | <a href='https://github.com/yangxy/GPEN' target='_blank'>Github Repo</a></p><p style='text-align: center;'><img src='https://img.shields.io/badge/Hugging%20Face-Original%20demo-blue' alt='https://huggingface.co/spaces/akhaliq/GPEN' width='172' height='20' /></p>"
140
 
141
 
142
+ def zoom_image(zoom, x_shift, y_shift, input_img, output_img = None):
143
+ if output_img is None:
144
+ return None
145
+
146
+ img = Image.fromarray(input_img)
147
+ out_img = Image.fromarray(output_img)
148
+
149
+ img_w, img_h = img.size
150
+ zoom_factor = (100 - zoom) / 100
151
+ x_shift /= 100
152
+ y_shift /= 100
153
+
154
+ zoom_w, zoom_h = int(img_w * zoom_factor), int(img_h * zoom_factor)
155
+ x_offset = int((img_w - zoom_w) * x_shift)
156
+ y_offset = int((img_h - zoom_h) * y_shift)
157
+
158
+ crop_box = (x_offset, y_offset, x_offset + zoom_w, y_offset + zoom_h)
159
+ img = img.resize((img_w, img_h), Image.BILINEAR).crop(crop_box)
160
+ out_img = out_img.resize((img_w, img_h), Image.BILINEAR).crop(crop_box)
161
+
162
+ return (img, out_img)
163
+
164
+ with gr.Blocks() as demo:
165
+ with gr.Row():
166
+ input_img = gr.Image(label="Input Image")
167
+ output_img = gr.Image(label="Result")
168
+
169
+ max_res = gr.Slider(1, 200, step=0.5, value=100, label="Output image Resolution Percentage (Higher% = longer processing time)")
170
+ restore_type = gr.Radio(["enhance", "colorize", "inpainting", "selfie", "enhanced+background"], value="enhance", type="value", label="Type")
171
+
172
+ zoom = gr.Slider(0, 100, step=1, value=50, label="Zoom Percentage (0 = original size)")
173
+ x_shift = gr.Slider(0, 100, step=1, label="Horizontal shift Percentage (Before/After)")
174
+ y_shift = gr.Slider(0, 100, step=1, label="Vertical shift Percentage (Before/After)")
175
+
176
+ run = gr.Button("Run")
177
+
178
+ with gr.Row():
179
+ before_after = ImageSlider(label="Before/After", type="pil", value=None)
180
+
181
+ run.click(
182
+ inference,
183
+ inputs=[input_img, restore_type, max_res, zoom, x_shift, y_shift],
184
+ outputs=[output_img, before_after]
185
+ )
186
+
187
+ gr.Examples([
188
+ ['enhance.png', 'enhance', 100, 0, 0, 0],
189
+ ['color.png', 'colorize', 100, 0, 0, 0],
190
+ ['inpainting.png', 'inpainting', 100, 0, 0, 0],
191
+ ['selfie.png', 'selfie', 100, 0, 0, 0]
192
+ ], inputs=[input_img, restore_type, max_res, zoom, x_shift, y_shift])
193
+
194
+ zoom.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img, output_img], outputs=[before_after])
195
+ x_shift.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img, output_img], outputs=[before_after])
196
+ y_shift.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img, output_img], outputs=[before_after])
197
+
198
+ demo.launch()