zejunyang commited on
Commit
9a70da5
1 Parent(s): e24f684
src/utils/crop_face_single.py CHANGED
@@ -3,7 +3,10 @@ import cv2
3
 
4
 
5
  def crop_face(img, lmk_extractor, expand=1.5):
 
6
  result = lmk_extractor(img) # cv2 BGR
 
 
7
 
8
  if result is None:
9
  return None
 
3
 
4
 
5
  def crop_face(img, lmk_extractor, expand=1.5):
6
+ print('****=====1======')
7
  result = lmk_extractor(img) # cv2 BGR
8
+
9
+ print('****=====2======')
10
 
11
  if result is None:
12
  return None
src/utils/mp_utils.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import numpy as np
3
  import cv2
4
  import time
@@ -11,7 +12,7 @@ from mediapipe import solutions
11
  from mediapipe.framework.formats import landmark_pb2
12
  from mediapipe.tasks import python
13
  from mediapipe.tasks.python import vision
14
- from . import face_landmark
15
 
16
  CUR_DIR = os.path.dirname(__file__)
17
 
@@ -31,35 +32,36 @@ class LMKExtractor():
31
  self.last_ts = 0
32
  self.frame_ms = int(1000 / FPS)
33
 
34
- det_base_options = python.BaseOptions(model_asset_path=os.path.join(CUR_DIR, 'mp_models/blaze_face_short_range.tflite'))
35
- det_options = vision.FaceDetectorOptions(base_options=det_base_options)
36
- self.det_detector = vision.FaceDetector.create_from_options(det_options)
37
 
38
 
39
  def __call__(self, img):
 
40
  frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
41
  image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
42
- t0 = time.time()
43
- if self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.VIDEO:
44
- det_result = self.det_detector.detect(image)
45
- if len(det_result.detections) != 1:
46
- return None
47
- self.last_ts += self.frame_ms
48
- try:
49
- detection_result, mesh3d = self.detector.detect_for_video(image, timestamp_ms=self.last_ts)
50
- except:
51
- return None
52
- elif self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.IMAGE:
53
  # det_result = self.det_detector.detect(image)
54
 
55
  # if len(det_result.detections) != 1:
56
  # return None
57
- try:
58
- detection_result, mesh3d = self.detector.detect(image)
59
- except:
60
- return None
61
 
62
-
63
  bs_list = detection_result.face_blendshapes
64
  if len(bs_list) == 1:
65
  bs = bs_list[0]
 
1
  import os
2
+ import sys
3
  import numpy as np
4
  import cv2
5
  import time
 
12
  from mediapipe.framework.formats import landmark_pb2
13
  from mediapipe.tasks import python
14
  from mediapipe.tasks.python import vision
15
+ from src.utils import face_landmark
16
 
17
  CUR_DIR = os.path.dirname(__file__)
18
 
 
32
  self.last_ts = 0
33
  self.frame_ms = int(1000 / FPS)
34
 
35
+ # det_base_options = python.BaseOptions(model_asset_path=os.path.join(CUR_DIR, 'mp_models/blaze_face_short_range.tflite'))
36
+ # det_options = vision.FaceDetectorOptions(base_options=det_base_options)
37
+ # self.det_detector = vision.FaceDetector.create_from_options(det_options)
38
 
39
 
40
  def __call__(self, img):
41
+ print('///=====1======')
42
  frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
43
  image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
44
+ # t0 = time.time()
45
+ # if self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.VIDEO:
46
+ # det_result = self.det_detector.detect(image)
47
+ # if len(det_result.detections) != 1:
48
+ # return None
49
+ # self.last_ts += self.frame_ms
50
+ # try:
51
+ # detection_result, mesh3d = self.detector.detect_for_video(image, timestamp_ms=self.last_ts)
52
+ # except:
53
+ # return None
54
+ # elif self.mode == mp.tasks.vision.FaceDetectorOptions.running_mode.IMAGE:
55
  # det_result = self.det_detector.detect(image)
56
 
57
  # if len(det_result.detections) != 1:
58
  # return None
59
+ try:
60
+ detection_result, mesh3d = self.detector.detect(image)
61
+ except:
62
+ return None
63
 
64
+ print('///=====2======')
65
  bs_list = detection_result.face_blendshapes
66
  if len(bs_list) == 1:
67
  bs = bs_list[0]