Shreyaanp commited on
Commit
ee79ad8
·
1 Parent(s): 606543a

Upload 2 files

Browse files
Files changed (2) hide show
  1. best_model.h5 +3 -0
  2. videotester.py +52 -0
best_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b703591706ef7325381a29aacc66fdd849dfa57f8a47800c373ea6a187019a
3
+ size 17392056
videotester.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ from keras.preprocessing import image
7
+ import warnings
8
+ warnings.filterwarnings("ignore")
9
+ from keras_preprocessing.image import ImageDataGenerator
10
+ from keras.utils import img_to_array, load_img
11
+ from keras.models import load_model
12
+ import matplotlib.pyplot as plt
13
+ import numpy as np
14
+ from PIL import Image
15
+ import tensorflow as tf
16
+
17
+ # load model
18
+ model = load_model(r'best_model.h5')
19
+
20
+ face_haar_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
21
+
22
+ def analyze_emotion(frame):
23
+ gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
24
+ faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
25
+
26
+ for (x, y, w, h) in faces_detected:
27
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)
28
+ roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image
29
+ roi_gray = cv2.resize(roi_gray, (224, 224))
30
+ img_pixels = tf.keras.preprocessing.image.img_to_array(roi_gray)
31
+ img_pixels = np.expand_dims(img_pixels, axis=0)
32
+ img_pixels /= 255
33
+
34
+ predictions = model.predict(img_pixels)
35
+
36
+ # find max indexed array
37
+ max_index = np.argmax(predictions[0])
38
+
39
+ emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
40
+ predicted_emotion = emotions[max_index]
41
+
42
+ cv2.putText(frame, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
43
+
44
+ return frame[...,::-1]
45
+
46
+ inputs = gr.inputs.Video(source="webcam")
47
+ outputs = gr.outputs.Image(type="numpy")
48
+
49
+ iface = gr.Interface(fn=analyze_emotion, inputs=inputs, outputs=outputs, title="Facial Emotion Analysis",
50
+ description="Detects emotions in real-time from webcam video input")
51
+
52
+ iface.launch()