Spaces:
Sleeping
Sleeping
TusharLNT1
commited on
Commit
·
a565168
1
Parent(s):
cfaf4fa
Initial commit
Browse files
app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import mediapipe as mp
|
3 |
+
import cv2
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
# Initialize MediaPipe Face Mesh
|
8 |
+
mp_face_mesh = mp.solutions.face_mesh
|
9 |
+
mp_drawing = mp.solutions.drawing_utils
|
10 |
+
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
|
11 |
+
|
12 |
+
# Face Mesh inference
|
13 |
+
def process_image(image):
|
14 |
+
with mp_face_mesh.FaceMesh(
|
15 |
+
static_image_mode=True,
|
16 |
+
max_num_faces=2,
|
17 |
+
min_detection_confidence=0.5) as face_mesh:
|
18 |
+
# Convert the image to RGB for MediaPipe processing
|
19 |
+
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
20 |
+
results = face_mesh.process(rgb_image)
|
21 |
+
|
22 |
+
# Annotate the image
|
23 |
+
annotated_image = image.copy()
|
24 |
+
if results.multi_face_landmarks:
|
25 |
+
for face_landmarks in results.multi_face_landmarks:
|
26 |
+
mp_drawing.draw_landmarks(
|
27 |
+
image=annotated_image,
|
28 |
+
landmark_list=face_landmarks,
|
29 |
+
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
30 |
+
landmark_drawing_spec=drawing_spec,
|
31 |
+
connection_drawing_spec=drawing_spec
|
32 |
+
)
|
33 |
+
return annotated_image
|
34 |
+
|
35 |
+
# Streamlit interface
|
36 |
+
st.title("Face Mesh with MediaPipe")
|
37 |
+
st.write("""
|
38 |
+
This app uses MediaPipe's Face Mesh to detect facial landmarks.
|
39 |
+
Upload an image to get started.
|
40 |
+
""")
|
41 |
+
|
42 |
+
# Image upload
|
43 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
44 |
+
|
45 |
+
if uploaded_file:
|
46 |
+
image = np.array(Image.open(uploaded_file))
|
47 |
+
|
48 |
+
# Process and display results
|
49 |
+
processed_image = process_image(image)
|
50 |
+
st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Annotated Image", use_container_width=True)
|