annapurnapadmaprema-ji commited on
Commit
d56bf6b
·
verified ·
1 Parent(s): d98d4e2

Upload 6 files

Browse files
Files changed (6) hide show
  1. SVMexec_modeltesting113.pkl +3 -0
  2. app.py +119 -0
  3. packages.txt +1 -0
  4. requirements.txt +16 -0
  5. scaler.pkl +3 -0
  6. style.css +8 -0
SVMexec_modeltesting113.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d74e83d3a13350c461631313d215a466465db8fcb64db2a89c530c7a38e2d78
3
+ size 71814547
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import cv2
4
+ import librosa
5
+ import joblib
6
+ from deepface import DeepFace
7
+ import streamlit as st
8
+ from collections import Counter
9
+ from moviepy.editor import VideoFileClip
10
+
11
+
12
+ emotion_map = {
13
+ 'angry': 0,
14
+ 'disgust': 1,
15
+ 'fear': 2,
16
+ 'happy': 3,
17
+ 'neutral': 4,
18
+ 'sad': 5
19
+ }
20
+
21
+
22
+ def split_video_into_frames_and_analyze_emotions(video_path, frame_rate=1):
23
+ cap = cv2.VideoCapture(video_path)
24
+ if not cap.isOpened():
25
+ st.error("Error: Could not open video.")
26
+ return
27
+
28
+ frame_count = 0
29
+ success, frame = cap.read()
30
+
31
+ emotion_counter = Counter()
32
+
33
+ while success:
34
+ if frame_count % frame_rate == 0:
35
+ try:
36
+ analysis = DeepFace.analyze(frame, actions=['emotion'])
37
+ if isinstance(analysis, list):
38
+ for result in analysis:
39
+ dominant_emotion = result['dominant_emotion']
40
+ emotion_counter[dominant_emotion] += 1
41
+ else:
42
+ dominant_emotion = analysis['dominant_emotion']
43
+ emotion_counter[dominant_emotion] += 1
44
+ except Exception as e:
45
+ pass
46
+
47
+ success, frame = cap.read()
48
+ frame_count += 1
49
+
50
+ cap.release()
51
+
52
+ if emotion_counter:
53
+ highest_occurring_emotion = emotion_counter.most_common(1)[0][0]
54
+ else:
55
+ highest_occurring_emotion = None
56
+
57
+ return highest_occurring_emotion
58
+
59
+ def extract_audio_from_video(video_path):
60
+ video_clip = VideoFileClip(video_path)
61
+ audio_path = "temp_audio.wav"
62
+ video_clip.audio.write_audiofile(audio_path)
63
+ audio_array, sr = librosa.load(audio_path, sr=None)
64
+ os.remove(audio_path)
65
+ return audio_array, sr
66
+
67
+ def extract_features(audio_array, sr, max_length=100):
68
+ try:
69
+ mfccs = librosa.feature.mfcc(y=audio_array, sr=sr, n_mfcc=13)
70
+ chroma = librosa.feature.chroma_stft(y=audio_array, sr=sr)
71
+ spectral_contrast = librosa.feature.spectral_contrast(y=audio_array, sr=sr)
72
+
73
+ features = np.vstack([mfccs, chroma, spectral_contrast])
74
+ if features.shape[1] < max_length:
75
+ features = np.pad(features, ((0, 0), (0, max_length - features.shape[1])), mode='constant')
76
+ elif features.shape[1] > max_length:
77
+ features = features[:, :max_length]
78
+ return features.T
79
+ except Exception as e:
80
+ st.error(f"Error extracting features from audio: {str(e)}")
81
+ return None
82
+
83
+ def main():
84
+ with open("style.css") as f:
85
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
86
+ st.title("Emotion Detection from Video")
87
+
88
+ uploaded_file = st.file_uploader("Upload a video", type=["mp4"])
89
+ if uploaded_file is not None:
90
+ video_path = "uploaded_video.mp4"
91
+ with open(video_path, "wb") as f:
92
+ f.write(uploaded_file.read())
93
+
94
+ st.write("Processing video...please wait")
95
+ highest_emotion = split_video_into_frames_and_analyze_emotions(video_path)
96
+ audio_array, sr = extract_audio_from_video(video_path)
97
+
98
+ model_path = "SVMexec_modeltesting113.pkl"
99
+ svm_model = joblib.load(model_path)
100
+ scaler = joblib.load('scaler.pkl')
101
+
102
+ features = extract_features(audio_array, sr)
103
+ if features is not None:
104
+ features_2d = features.reshape(1, -1)
105
+ features_normalized = scaler.transform(features_2d)
106
+
107
+ predicted_class = svm_model.predict(features_normalized)[0]
108
+ emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad']
109
+ predicted_emotion = emotion_labels[predicted_class]
110
+
111
+ if highest_emotion == predicted_emotion:
112
+ st.write(f"The person in the video is {predicted_emotion}.")
113
+ else:
114
+ st.write(f"The emotions from the frames and audio do not match, but the facial expression seems to be {highest_emotion}, while the audio emotion seems to be {predicted_emotion}.")
115
+ else:
116
+ st.write("Failed to extract features from the audio file.")
117
+
118
+ if __name__ == "__main__":
119
+ main()
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy<2
2
+ librosa
3
+ joblib
4
+ torch
5
+ moviepy
6
+ scikit-learn
7
+ opencv-python-headless
8
+ streamlit
9
+ Pillow
10
+ deepface
11
+ tensorflow
12
+ tf-keras
13
+ pydub
14
+ imageio
15
+ ffmpeg-python
16
+
scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d373f3b7e456bf96fec1d03b56b42e99ab43ff10e4623f8e6970ca63bbba27dd
3
+ size 77415
style.css ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .stApp {
2
+ background-image: url('https://i.postimg.cc/2yFrwJWM/Blue-And-Pink-Aesthetic-Desktop-Wallpaper.png');
3
+ background-size: cover;
4
+ background-position: center;
5
+ background-repeat: no-repeat;
6
+
7
+
8
+ }