Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,147 +1,64 @@
|
|
1 |
import streamlit as st
|
2 |
-
from
|
3 |
-
|
4 |
-
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
import numpy as np
|
7 |
-
import
|
8 |
-
import
|
9 |
-
from sklearn.metrics import accuracy_score, confusion_matrix
|
10 |
-
import seaborn as sns
|
11 |
-
|
12 |
-
# Function to plot the confusion matrix
|
13 |
-
def plot_confusion_matrix(cm, class_labels):
|
14 |
-
plt.figure(figsize=(8, 6))
|
15 |
-
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=class_labels, yticklabels=class_labels)
|
16 |
-
plt.ylabel('True label')
|
17 |
-
plt.xlabel('Predicted label')
|
18 |
-
st.pyplot()
|
19 |
-
|
20 |
-
# Function to plot the training loss and accuracy
|
21 |
-
def plot_training_history(history):
|
22 |
-
epochs = range(1, len(history.history['loss']) + 1)
|
23 |
-
plt.figure(figsize=(10, 6))
|
24 |
-
|
25 |
-
plt.subplot(1, 2, 1)
|
26 |
-
plt.plot(epochs, history.history['loss'], 'y', label='Training loss')
|
27 |
-
plt.plot(epochs, history.history['val_loss'], 'r', label='Validation loss')
|
28 |
-
plt.title('Training and validation loss')
|
29 |
-
plt.xlabel('Epochs')
|
30 |
-
plt.ylabel('Loss')
|
31 |
-
plt.legend()
|
32 |
-
|
33 |
-
plt.subplot(1, 2, 2)
|
34 |
-
plt.plot(epochs, history.history['accuracy'], 'y', label='Training acc')
|
35 |
-
plt.plot(epochs, history.history['val_accuracy'], 'r', label='Validation acc')
|
36 |
-
plt.title('Training and validation accuracy')
|
37 |
-
plt.xlabel('Epochs')
|
38 |
-
plt.ylabel('Accuracy')
|
39 |
-
plt.legend()
|
40 |
-
|
41 |
-
st.pyplot()
|
42 |
-
|
43 |
-
# Streamlit UI layout
|
44 |
-
st.title('Emotion Detection from Facial Expressions')
|
45 |
-
|
46 |
-
# Paths
|
47 |
-
train_data_dir = 'data/train/'
|
48 |
-
validation_data_dir = 'data/test/'
|
49 |
-
|
50 |
-
# Hyperparameters
|
51 |
-
IMG_HEIGHT = 48
|
52 |
-
IMG_WIDTH = 48
|
53 |
-
batch_size = 32
|
54 |
-
epochs = 50
|
55 |
-
|
56 |
-
# Image generators
|
57 |
-
train_datagen = ImageDataGenerator(rescale=1./255,
|
58 |
-
rotation_range=30,
|
59 |
-
shear_range=0.3,
|
60 |
-
zoom_range=0.3,
|
61 |
-
horizontal_flip=True,
|
62 |
-
fill_mode='nearest')
|
63 |
-
|
64 |
-
validation_datagen = ImageDataGenerator(rescale=1./255)
|
65 |
-
|
66 |
-
train_generator = train_datagen.flow_from_directory(train_data_dir,
|
67 |
-
color_mode='grayscale',
|
68 |
-
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
69 |
-
batch_size=batch_size,
|
70 |
-
class_mode='categorical',
|
71 |
-
shuffle=True)
|
72 |
-
|
73 |
-
validation_generator = validation_datagen.flow_from_directory(validation_data_dir,
|
74 |
-
color_mode='grayscale',
|
75 |
-
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
76 |
-
batch_size=batch_size,
|
77 |
-
class_mode='categorical',
|
78 |
-
shuffle=True)
|
79 |
|
|
|
|
|
80 |
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
81 |
|
82 |
-
#
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
Dense(7, activation='softmax')
|
98 |
-
])
|
99 |
-
|
100 |
-
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
|
101 |
-
st.text(model.summary())
|
102 |
-
|
103 |
-
train_path = "data/train/"
|
104 |
-
test_path = "data/test/"
|
105 |
-
|
106 |
-
num_train_imgs = sum([len(files) for r, d, files in os.walk(train_path)])
|
107 |
-
num_test_imgs = sum([len(files) for r, d, files in os.walk(test_path)])
|
108 |
-
|
109 |
-
# Train the model if button is pressed
|
110 |
-
if st.button('Train Model'):
|
111 |
-
history = model.fit(train_generator,
|
112 |
-
steps_per_epoch=num_train_imgs // batch_size,
|
113 |
-
epochs=epochs,
|
114 |
-
validation_data=validation_generator,
|
115 |
-
validation_steps=num_test_imgs // batch_size)
|
116 |
-
model.save('models/emotion_detection_model_50epochs.h5')
|
117 |
-
st.success("Model trained and saved successfully!")
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from keras.models import load_model
|
3 |
+
import cv2
|
|
|
|
|
4 |
import numpy as np
|
5 |
+
import time
|
6 |
+
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# Load the pre-trained model
|
9 |
+
model = load_model('models/emotion_detection_model_50epochs.h5', compile=False)
|
10 |
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
11 |
|
12 |
+
# Function to preprocess each frame for prediction
|
13 |
+
def preprocess_frame(frame):
|
14 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
|
15 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
16 |
+
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(48, 48))
|
17 |
+
|
18 |
+
if len(faces) > 0:
|
19 |
+
(x, y, w, h) = faces[0] # Use the first detected face
|
20 |
+
face = gray[y:y+h, x:x+w]
|
21 |
+
face = cv2.resize(face, (48, 48))
|
22 |
+
face = face.astype('float32') / 255.0 # Normalize pixel values
|
23 |
+
face = np.expand_dims(face, axis=0)
|
24 |
+
face = np.expand_dims(face, axis=-1) # Reshape for the model (48,48,1)
|
25 |
+
return face, (x, y, w, h)
|
26 |
+
return None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
# Streamlit UI layout
|
29 |
+
st.title("Real-time Emotion Detection")
|
30 |
+
|
31 |
+
run = st.checkbox('Run Camera')
|
32 |
+
|
33 |
+
FRAME_WINDOW = st.image([])
|
34 |
+
|
35 |
+
# Start the camera and predict emotion
|
36 |
+
cap = cv2.VideoCapture(0) # Open default camera (change index if necessary)
|
37 |
+
|
38 |
+
if run:
|
39 |
+
while cap.isOpened():
|
40 |
+
ret, frame = cap.read()
|
41 |
+
if not ret:
|
42 |
+
break
|
43 |
+
|
44 |
+
# Preprocess the frame
|
45 |
+
face, face_coords = preprocess_frame(frame)
|
46 |
+
if face is not None:
|
47 |
+
# Make emotion prediction
|
48 |
+
predictions = model.predict(face)
|
49 |
+
emotion_label = class_labels[np.argmax(predictions)]
|
50 |
+
|
51 |
+
# Draw a rectangle around the face and display the emotion label
|
52 |
+
(x, y, w, h) = face_coords
|
53 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
54 |
+
cv2.putText(frame, emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
|
55 |
+
|
56 |
+
# Convert BGR image to RGB
|
57 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
58 |
+
FRAME_WINDOW.image(frame_rgb)
|
59 |
+
|
60 |
+
# Small delay for smooth output
|
61 |
+
time.sleep(0.03)
|
62 |
+
else:
|
63 |
+
cap.release()
|
64 |
+
cv2.destroyAllWindows()
|