AK391
commited on
Commit
·
01d08da
1
Parent(s):
0145b71
ffhq
Browse files- ffhq_dataset/README.md +6 -0
- ffhq_dataset/__init__.py +0 -0
- ffhq_dataset/face_alignment.py +81 -0
- ffhq_dataset/gen_aligned_image.py +24 -0
- ffhq_dataset/landmarks_detector.py +22 -0
ffhq_dataset/README.md
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Download *shape_predictor_68_face_landmarks.dat* here
|
2 |
+
|
3 |
+
```bash
|
4 |
+
wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
|
5 |
+
bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2
|
6 |
+
```
|
ffhq_dataset/__init__.py
ADDED
File without changes
|
ffhq_dataset/face_alignment.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import scipy.ndimage
|
3 |
+
import os
|
4 |
+
import PIL.Image
|
5 |
+
|
6 |
+
|
7 |
+
def image_align(image, face_landmarks, output_size=1024, transform_size=4096, enable_padding=True):
|
8 |
+
# Align function from FFHQ dataset pre-processing step
|
9 |
+
# https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
|
10 |
+
|
11 |
+
lm = np.array(face_landmarks)
|
12 |
+
lm_chin = lm[0 : 17] # left-right
|
13 |
+
lm_eyebrow_left = lm[17 : 22] # left-right
|
14 |
+
lm_eyebrow_right = lm[22 : 27] # left-right
|
15 |
+
lm_nose = lm[27 : 31] # top-down
|
16 |
+
lm_nostrils = lm[31 : 36] # top-down
|
17 |
+
lm_eye_left = lm[36 : 42] # left-clockwise
|
18 |
+
lm_eye_right = lm[42 : 48] # left-clockwise
|
19 |
+
lm_mouth_outer = lm[48 : 60] # left-clockwise
|
20 |
+
lm_mouth_inner = lm[60 : 68] # left-clockwise
|
21 |
+
|
22 |
+
# Calculate auxiliary vectors.
|
23 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
24 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
25 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
26 |
+
eye_to_eye = eye_right - eye_left
|
27 |
+
mouth_left = lm_mouth_outer[0]
|
28 |
+
mouth_right = lm_mouth_outer[6]
|
29 |
+
mouth_avg = (mouth_left + mouth_right) * 0.5
|
30 |
+
eye_to_mouth = mouth_avg - eye_avg
|
31 |
+
|
32 |
+
# Choose oriented crop rectangle.
|
33 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
34 |
+
x /= np.hypot(*x)
|
35 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
|
36 |
+
y = np.flipud(x) * [-1, 1]
|
37 |
+
c = eye_avg + eye_to_mouth * 0.1
|
38 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
39 |
+
qsize = np.hypot(*x) * 2
|
40 |
+
|
41 |
+
img = PIL.Image.fromarray(image)
|
42 |
+
|
43 |
+
# Shrink.
|
44 |
+
shrink = int(np.floor(qsize / output_size * 0.5))
|
45 |
+
if shrink > 1:
|
46 |
+
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
|
47 |
+
img = img.resize(rsize, PIL.Image.ANTIALIAS)
|
48 |
+
quad /= shrink
|
49 |
+
qsize /= shrink
|
50 |
+
|
51 |
+
# Crop.
|
52 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
53 |
+
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
|
54 |
+
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
|
55 |
+
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
|
56 |
+
img = img.crop(crop)
|
57 |
+
quad -= crop[0:2]
|
58 |
+
|
59 |
+
# Pad.
|
60 |
+
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
|
61 |
+
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
|
62 |
+
if enable_padding and max(pad) > border - 4:
|
63 |
+
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
|
64 |
+
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
65 |
+
h, w, _ = img.shape
|
66 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
67 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
|
68 |
+
blur = qsize * 0.02
|
69 |
+
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
70 |
+
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
|
71 |
+
img = np.uint8(np.clip(np.rint(img), 0, 255))
|
72 |
+
img = PIL.Image.fromarray(img, 'RGB')
|
73 |
+
quad += pad[:2]
|
74 |
+
|
75 |
+
# Transform.
|
76 |
+
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
|
77 |
+
if output_size < transform_size:
|
78 |
+
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
|
79 |
+
|
80 |
+
img_np = np.array(img)
|
81 |
+
return img_np
|
ffhq_dataset/gen_aligned_image.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from .face_alignment import image_align
|
4 |
+
from .landmarks_detector import LandmarksDetector
|
5 |
+
|
6 |
+
|
7 |
+
cur_dir = os.path.split(os.path.realpath(__file__))[0]
|
8 |
+
model_path = os.path.join(cur_dir, 'shape_predictor_68_face_landmarks.dat')
|
9 |
+
|
10 |
+
|
11 |
+
class FaceAlign:
|
12 |
+
def __init__(self, predictor_model_path=model_path):
|
13 |
+
self.landmarks_detector = LandmarksDetector(predictor_model_path)
|
14 |
+
|
15 |
+
def get_crop_image(self, image):
|
16 |
+
lms = []
|
17 |
+
for i, face_landmarks in enumerate(self.landmarks_detector.get_landmarks(image), start=1):
|
18 |
+
lms.append(face_landmarks)
|
19 |
+
if len(lms) < 1:
|
20 |
+
return None
|
21 |
+
out_image = image_align(image, lms[0])
|
22 |
+
|
23 |
+
return out_image
|
24 |
+
|
ffhq_dataset/landmarks_detector.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dlib
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
|
5 |
+
class LandmarksDetector:
|
6 |
+
def __init__(self, predictor_model_path):
|
7 |
+
"""
|
8 |
+
:param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
|
9 |
+
"""
|
10 |
+
self.detector = dlib.get_frontal_face_detector()
|
11 |
+
self.shape_predictor = dlib.shape_predictor(predictor_model_path)
|
12 |
+
|
13 |
+
def get_landmarks(self, image):
|
14 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
15 |
+
dets = self.detector(gray, 1)
|
16 |
+
|
17 |
+
for detection in dets:
|
18 |
+
try:
|
19 |
+
face_landmarks = [(item.x, item.y) for item in self.shape_predictor(gray, detection).parts()]
|
20 |
+
yield face_landmarks
|
21 |
+
except:
|
22 |
+
print("Exception in get_landmarks()!")
|