|
import gradio as gr |
|
from scipy.spatial import distance as dist |
|
from imutils import face_utils |
|
import numpy as np |
|
import imutils |
|
import time |
|
import dlib |
|
import cv2 |
|
from keras.preprocessing.image import img_to_array |
|
from keras.models import load_model |
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
points = [] |
|
emotion_classifier = None |
|
|
|
|
|
repo_id = "jaimin/Age_detection" |
|
|
|
|
|
predictor_path = hf_hub_download(repo_id=repo_id, filename="shape_predictor_68_face_landmarks.dat") |
|
emotion_model_path = hf_hub_download(repo_id=repo_id, filename="XCEPTION.102-0.66.hdf5") |
|
|
|
def eye_brow_distance(leye, reye): |
|
global points |
|
distq = dist.euclidean(leye, reye) |
|
points.append(int(distq)) |
|
return distq |
|
|
|
def emotion_finder(faces, frame): |
|
global emotion_classifier |
|
EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"] |
|
x, y, w, h = face_utils.rect_to_bb(faces) |
|
frame = frame[y:y + h, x:x + w] |
|
roi = cv2.resize(frame, (64, 64)) |
|
roi = roi.astype("float") / 255.0 |
|
roi = img_to_array(roi) |
|
roi = np.expand_dims(roi, axis=0) |
|
preds = emotion_classifier.predict(roi)[0] |
|
emotion_probability = np.max(preds) |
|
label = EMOTIONS[preds.argmax()] |
|
return label |
|
|
|
def normalize_values(points, disp): |
|
normalized_value = abs(disp - np.min(points)) / abs(np.max(points) - np.min(points)) |
|
stress_value = np.exp(-(normalized_value)) |
|
return stress_value |
|
|
|
def stress(video_path, duration): |
|
global points, emotion_classifier |
|
detector = dlib.get_frontal_face_detector() |
|
predictor = dlib.shape_predictor(predictor_path) |
|
emotion_classifier = load_model(emotion_model_path, compile=False) |
|
|
|
|
|
cap = cv2.VideoCapture(video_path) |
|
points = [] |
|
stress_labels = [] |
|
start_time = time.time() |
|
|
|
while True: |
|
current_time = time.time() |
|
if current_time - start_time >= duration: |
|
break |
|
|
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
frame = cv2.flip(frame, 1) |
|
frame = imutils.resize(frame, width=500, height=500) |
|
|
|
(lBegin, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"] |
|
(rBegin, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"] |
|
|
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
|
|
try: |
|
detections = detector(gray, 0) |
|
for detection in detections: |
|
emotion = emotion_finder(detection, gray) |
|
shape = predictor(gray, detection) |
|
shape = face_utils.shape_to_np(shape) |
|
|
|
leyebrow = shape[lBegin:lEnd] |
|
reyebrow = shape[rBegin:rEnd] |
|
|
|
distq = eye_brow_distance(leyebrow[-1], reyebrow[0]) |
|
stress_value = normalize_values(points, distq) |
|
|
|
|
|
if emotion in ['scared', 'sad', 'angry'] and stress_value >= 0.75: |
|
stress_label = 'stressed' |
|
else: |
|
stress_label = 'not stressed' |
|
|
|
|
|
stress_labels.append(stress_label) |
|
|
|
except Exception as e: |
|
print(f'Error: {e}') |
|
|
|
cap.release() |
|
|
|
|
|
stressed_count = stress_labels.count('stressed') |
|
not_stressed_count = stress_labels.count('not stressed') |
|
|
|
|
|
if stressed_count > not_stressed_count: |
|
most_frequent_label = 'stressed' |
|
else: |
|
most_frequent_label = 'not stressed' |
|
|
|
return stressed_count, not_stressed_count, most_frequent_label |
|
|
|
def gradio_interface(video, duration): |
|
stressed_count, not_stressed_count, most_frequent_label = stress(video, duration) |
|
return f"Stressed frames: {stressed_count}", f"Not stressed frames: {not_stressed_count}", f"Most frequent state: {most_frequent_label}" |
|
|
|
|
|
gr.Interface( |
|
fn=gradio_interface, |
|
inputs=[gr.Video(label="Upload a video file"), gr.Number(value=30, label="Duration (seconds)")], |
|
outputs="json", |
|
title="Heart Rate and Stress Measurement" |
|
).launch(server_name="0.0.0.0") |
|
|