kimjy0411 commited on
Commit
8988535
·
verified ·
1 Parent(s): 1484991

Upload src/utils/mp_utils.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/utils/mp_utils.py +95 -0
src/utils/mp_utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import cv2
4
+ import time
5
+ from tqdm import tqdm
6
+ import multiprocessing
7
+ import glob
8
+
9
+ import mediapipe as mp
10
+ from mediapipe import solutions
11
+ from mediapipe.framework.formats import landmark_pb2
12
+ from mediapipe.tasks import python
13
+ from mediapipe.tasks.python import vision
14
+ from . import face_landmark
15
+
16
+ CUR_DIR = os.path.dirname(__file__)
17
+
18
+
19
+ class LMKExtractor():
20
+ def __init__(self, FPS=25):
21
+ # Create an FaceLandmarker object.
22
+ self.mode = mp.tasks.vision.FaceDetectorOptions.running_mode.IMAGE
23
+ base_options = python.BaseOptions(model_asset_path=os.path.join(CUR_DIR, 'mp_models/face_landmarker_v2_with_blendshapes.task'))
24
+ base_options.delegate = mp.tasks.BaseOptions.Delegate.CPU
25
+ options = vision.FaceLandmarkerOptions(base_options=base_options,
26
+ running_mode=self.mode,
27
+ output_face_blendshapes=True,
28
+ output_facial_transformation_matrixes=True,
29
+ num_faces=1)
30
+ self.detector = face_landmark.FaceLandmarker.create_from_options(options)
31
+ self.last_ts = 0
32
+ self.frame_ms = int(1000 / FPS)
33
+
34
+ det_base_options = python.BaseOptions(model_asset_path=os.path.join(CUR_DIR, 'mp_models/blaze_face_short_range.tflite'))
35
+ det_options = vision.FaceDetectorOptions(base_options=det_base_options)
36
+ self.det_detector = vision.FaceDetector.create_from_options(det_options)
37
+
38
+
39
+ def __call__(self, img):
40
+ frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
41
+ image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
42
+ t0 = time.time()
43
+ if self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.VIDEO:
44
+ det_result = self.det_detector.detect(image)
45
+ if len(det_result.detections) != 1:
46
+ return None
47
+ self.last_ts += self.frame_ms
48
+ try:
49
+ detection_result, mesh3d = self.detector.detect_for_video(image, timestamp_ms=self.last_ts)
50
+ except:
51
+ return None
52
+ elif self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.IMAGE:
53
+ # det_result = self.det_detector.detect(image)
54
+
55
+ # if len(det_result.detections) != 1:
56
+ # return None
57
+ try:
58
+ detection_result, mesh3d = self.detector.detect(image)
59
+ except:
60
+ return None
61
+
62
+
63
+ bs_list = detection_result.face_blendshapes
64
+ if len(bs_list) == 1:
65
+ bs = bs_list[0]
66
+ bs_values = []
67
+ for index in range(len(bs)):
68
+ bs_values.append(bs[index].score)
69
+ bs_values = bs_values[1:] # remove neutral
70
+ trans_mat = detection_result.facial_transformation_matrixes[0]
71
+ face_landmarks_list = detection_result.face_landmarks
72
+ face_landmarks = face_landmarks_list[0]
73
+ lmks = []
74
+ for index in range(len(face_landmarks)):
75
+ x = face_landmarks[index].x
76
+ y = face_landmarks[index].y
77
+ z = face_landmarks[index].z
78
+ lmks.append([x, y, z])
79
+ lmks = np.array(lmks)
80
+
81
+ lmks3d = np.array(mesh3d.vertex_buffer)
82
+ lmks3d = lmks3d.reshape(-1, 5)[:, :3]
83
+ mp_tris = np.array(mesh3d.index_buffer).reshape(-1, 3) + 1
84
+
85
+ return {
86
+ "lmks": lmks,
87
+ 'lmks3d': lmks3d,
88
+ "trans_mat": trans_mat,
89
+ 'faces': mp_tris,
90
+ "bs": bs_values
91
+ }
92
+ else:
93
+ # print('multiple faces in the image: {}'.format(img_path))
94
+ return None
95
+