Spaces:
Running
Running
import streamlit as st | |
import mediapipe as mp | |
import cv2 | |
from PIL import Image | |
import numpy as np | |
# Initialize MediaPipe Face Mesh | |
mp_face_mesh = mp.solutions.face_mesh | |
mp_drawing = mp.solutions.drawing_utils | |
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) | |
# Face Mesh inference | |
def process_image(image): | |
with mp_face_mesh.FaceMesh( | |
static_image_mode=True, | |
max_num_faces=2, | |
min_detection_confidence=0.5) as face_mesh: | |
# Convert the image to RGB for MediaPipe processing | |
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
results = face_mesh.process(rgb_image) | |
# Annotate the image | |
annotated_image = image.copy() | |
if results.multi_face_landmarks: | |
for face_landmarks in results.multi_face_landmarks: | |
mp_drawing.draw_landmarks( | |
image=annotated_image, | |
landmark_list=face_landmarks, | |
connections=mp_face_mesh.FACEMESH_TESSELATION, | |
landmark_drawing_spec=drawing_spec, | |
connection_drawing_spec=drawing_spec | |
) | |
return annotated_image | |
# Streamlit interface | |
st.title("Face Mesh with MediaPipe") | |
st.write(""" | |
This app uses MediaPipe's Face Mesh to detect facial landmarks. | |
Upload an image to get started. | |
""") | |
# Image upload | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
if uploaded_file: | |
image = np.array(Image.open(uploaded_file)) | |
# Process and display results | |
processed_image = process_image(image) | |
st.image(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB), caption="Annotated Image", use_container_width=True) | |