Create face_reconstruction_main.py
Browse files- face_reconstruction_main.py +174 -0
face_reconstruction_main.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import mediapipe as mp
|
3 |
+
import numpy as np
|
4 |
+
import time
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
class FaceMeshDetector:
|
8 |
+
def __init__(self, static_mode=False, max_faces=1, min_detection_confidence=0.5, min_tracking_confidence=0.5):
|
9 |
+
self.static_mode = static_mode
|
10 |
+
self.max_faces = max_faces
|
11 |
+
self.min_detection_confidence = min_detection_confidence
|
12 |
+
self.min_tracking_confidence = min_tracking_confidence
|
13 |
+
|
14 |
+
# Initialize MediaPipe Face Mesh
|
15 |
+
self.mp_face_mesh = mp.solutions.face_mesh
|
16 |
+
self.face_mesh = self.mp_face_mesh.FaceMesh(
|
17 |
+
static_image_mode=static_mode,
|
18 |
+
max_num_faces=max_faces,
|
19 |
+
min_detection_confidence=min_detection_confidence,
|
20 |
+
min_tracking_confidence=min_tracking_confidence,
|
21 |
+
refine_landmarks=True
|
22 |
+
)
|
23 |
+
self.mp_draw = mp.solutions.drawing_utils
|
24 |
+
self.drawing_spec = self.mp_draw.DrawingSpec(thickness=1, circle_radius=1)
|
25 |
+
|
26 |
+
# Store MediaPipe's topology information
|
27 |
+
self.FACE_CONNECTIONS = self.mp_face_mesh.FACEMESH_TESSELATION
|
28 |
+
|
29 |
+
def detect_faces(self, img, draw=True):
|
30 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
31 |
+
results = self.face_mesh.process(img_rgb)
|
32 |
+
faces = []
|
33 |
+
|
34 |
+
if results.multi_face_landmarks:
|
35 |
+
for face_landmarks in results.multi_face_landmarks:
|
36 |
+
if draw:
|
37 |
+
self.mp_draw.draw_landmarks(
|
38 |
+
image=img,
|
39 |
+
landmark_list=face_landmarks,
|
40 |
+
connections=self.FACE_CONNECTIONS,
|
41 |
+
landmark_drawing_spec=self.drawing_spec,
|
42 |
+
connection_drawing_spec=self.drawing_spec
|
43 |
+
)
|
44 |
+
|
45 |
+
face = []
|
46 |
+
for lm in face_landmarks.landmark:
|
47 |
+
# Improve scaling and position
|
48 |
+
x = (lm.x - 0.5) * 1.5
|
49 |
+
y = -(lm.y - 0.5) * 1.8
|
50 |
+
z = -lm.z * 2.5
|
51 |
+
face.append([x, y, z])
|
52 |
+
faces.append(face)
|
53 |
+
|
54 |
+
return img, faces
|
55 |
+
|
56 |
+
def create_mesh_topology(self, vertices):
|
57 |
+
"""
|
58 |
+
Create mesh faces using MediaPipe's topology information.
|
59 |
+
"""
|
60 |
+
faces = []
|
61 |
+
edges = {}
|
62 |
+
|
63 |
+
# Create edges from MediaPipe's connections
|
64 |
+
for connection in self.FACE_CONNECTIONS:
|
65 |
+
idx1, idx2 = connection
|
66 |
+
if idx1 > idx2:
|
67 |
+
idx1, idx2 = idx2, idx1
|
68 |
+
edge = (idx1, idx2)
|
69 |
+
if edge not in edges:
|
70 |
+
edges[edge] = []
|
71 |
+
|
72 |
+
# Create triangles from connected edges
|
73 |
+
for edge in edges:
|
74 |
+
connected_vertices = []
|
75 |
+
for other_edge in edges:
|
76 |
+
if edge[0] in other_edge or edge[1] in other_edge:
|
77 |
+
v = other_edge[0] if other_edge[0] not in edge else other_edge[1]
|
78 |
+
connected_vertices.append(v)
|
79 |
+
|
80 |
+
# Create triangles with connected vertices
|
81 |
+
for v in connected_vertices:
|
82 |
+
if v < len(vertices):
|
83 |
+
triangle = sorted([edge[0], edge[1], v])
|
84 |
+
if triangle not in faces:
|
85 |
+
faces.append(triangle)
|
86 |
+
|
87 |
+
return np.array(faces)
|
88 |
+
|
89 |
+
def save_obj_file(self, vertices, filename):
|
90 |
+
"""
|
91 |
+
Save the 3D mesh as an OBJ file using MediaPipe's topology.
|
92 |
+
"""
|
93 |
+
Path('output').mkdir(exist_ok=True)
|
94 |
+
full_path = f"output/{filename}.obj"
|
95 |
+
|
96 |
+
try:
|
97 |
+
# Create faces using MediaPipe's topology
|
98 |
+
faces = self.create_mesh_topology(vertices)
|
99 |
+
|
100 |
+
with open(full_path, 'w') as f:
|
101 |
+
# Write vertices
|
102 |
+
for v in vertices:
|
103 |
+
f.write(f"v {v[0]:.6f} {v[1]:.6f} {v[2]:.6f}\n")
|
104 |
+
|
105 |
+
# Write faces
|
106 |
+
for face in faces:
|
107 |
+
# OBJ files are 1-indexed
|
108 |
+
f.write(f"f {face[0]+1} {face[1]+1} {face[2]+1}\n")
|
109 |
+
|
110 |
+
# Write vertex normals
|
111 |
+
f.write("# Vertex normals\n")
|
112 |
+
for face in faces:
|
113 |
+
v1, v2, v3 = vertices[face]
|
114 |
+
normal = np.cross(v2 - v1, v3 - v1)
|
115 |
+
normal = normal / (np.linalg.norm(normal) + 1e-10)
|
116 |
+
f.write(f"vn {normal[0]:.6f} {normal[1]:.6f} {normal[2]:.6f}\n")
|
117 |
+
|
118 |
+
print(f"Saved 3D model to {full_path}")
|
119 |
+
return True
|
120 |
+
except Exception as e:
|
121 |
+
print(f"Error saving OBJ file: {e}")
|
122 |
+
return False
|
123 |
+
|
124 |
+
def main():
|
125 |
+
detector = FaceMeshDetector()
|
126 |
+
cap = cv2.VideoCapture(0)
|
127 |
+
prev_time = time.time()
|
128 |
+
model_saved = False
|
129 |
+
|
130 |
+
print("Press 's' to capture and save 3D model")
|
131 |
+
print("Press 'q' to quit")
|
132 |
+
|
133 |
+
while True:
|
134 |
+
success, img = cap.read()
|
135 |
+
if not success:
|
136 |
+
print("Failed to grab frame")
|
137 |
+
break
|
138 |
+
|
139 |
+
current_time = time.time()
|
140 |
+
fps = 1 / (current_time - prev_time)
|
141 |
+
prev_time = current_time
|
142 |
+
|
143 |
+
img, faces = detector.detect_faces(img)
|
144 |
+
|
145 |
+
y_pos = 30
|
146 |
+
cv2.putText(img, f"FPS: {fps:.1f}", (10, y_pos),
|
147 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
148 |
+
y_pos += 30
|
149 |
+
|
150 |
+
if model_saved:
|
151 |
+
cv2.putText(img, "Model Saved!", (10, y_pos),
|
152 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
153 |
+
if time.time() - current_time > 2:
|
154 |
+
model_saved = False
|
155 |
+
else:
|
156 |
+
cv2.putText(img, "Press 's' to save 3D model", (10, y_pos),
|
157 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
158 |
+
|
159 |
+
cv2.imshow("Face Mesh", img)
|
160 |
+
|
161 |
+
key = cv2.waitKey(1) & 0xFF
|
162 |
+
if key == ord('q'):
|
163 |
+
break
|
164 |
+
elif key == ord('s') and faces and not model_saved:
|
165 |
+
vertices = np.array(faces[0]) * 150
|
166 |
+
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
167 |
+
if detector.save_obj_file(vertices, f"face_mesh_{timestamp}"):
|
168 |
+
model_saved = True
|
169 |
+
|
170 |
+
cap.release()
|
171 |
+
cv2.destroyAllWindows()
|
172 |
+
|
173 |
+
if __name__ == "__main__":
|
174 |
+
main()
|