abrar-adnan commited on
Commit
fbbc260
1 Parent(s): 2d50f88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -18
app.py CHANGED
@@ -67,6 +67,15 @@ def video_processing(video_file, encoded_video):
67
  surprise = 0
68
  neutral = 0
69
  emotion_count = 0
 
 
 
 
 
 
 
 
 
70
 
71
  if encoded_video != "":
72
 
@@ -120,7 +129,10 @@ def video_processing(video_file, encoded_video):
120
  resized_face_image = cv2.resize(face_image, (128,128))
121
 
122
  try:
123
- emotion = DeepFace.analyze(color_image,actions=['emotion'],detector_backend = backends[2],enforce_detection = False)# 2,3, 4 works
 
 
 
124
  emotion_count += 1
125
  except Exception as e:
126
  emotion = 0
@@ -135,13 +147,13 @@ def video_processing(video_file, encoded_video):
135
  off_camera += 1
136
  total += 1
137
 
138
- angry += emotion[0]['emotion']['angry']
139
- disgust += emotion[0]['emotion']['disgust']
140
- fear += emotion[0]['emotion']['fear']
141
- happy += emotion[0]['emotion']['happy']
142
- sad += emotion[0]['emotion']['sad']
143
- surprise += emotion[0]['emotion']['surprise']
144
- neutral += emotion[0]['emotion']['neutral']
145
 
146
  try:
147
  # your processing code here
@@ -167,18 +179,18 @@ def video_processing(video_file, encoded_video):
167
  sad = sad / emotion_count
168
  surprise = surprise / emotion_count
169
  neutral = neutral / emotion_count
170
- emotion = {
171
- 'angry': angry,
172
- 'disgust': disgust,
173
- 'fear': fear,
174
- 'happy': happy,
175
- 'sad': sad,
176
- 'surprise': surprise,
177
- 'neutral': neutral
178
- },
179
  final_result_dict = {
180
  "gaze_percentage" : gaze_percentage,
181
- "face_emotion" : emotion,
182
  "text_emotion" : text_emotion,
183
  "transcription" : transcription,
184
  "text_sentiment" : text_sentiment
 
67
  surprise = 0
68
  neutral = 0
69
  emotion_count = 0
70
+ video_emotions = {
71
+ 'angry': 0,
72
+ 'disgust': 0,
73
+ 'fear': 0,
74
+ 'happy': 0,
75
+ 'sad': 0,
76
+ 'surprise': 0,
77
+ 'neutral':0
78
+ }
79
 
80
  if encoded_video != "":
81
 
 
129
  resized_face_image = cv2.resize(face_image, (128,128))
130
 
131
  try:
132
+ detected_face_emotion = DeepFace.analyze(color_image,actions=['emotion'],detector_backend = backends[2],enforce_detection = False)# 2,3, 4 works
133
+ for emotion in detected_face_emotion:
134
+ for key in video_emotions.keys():
135
+ video_emotions[key] += emotion['emotion'][key]
136
  emotion_count += 1
137
  except Exception as e:
138
  emotion = 0
 
147
  off_camera += 1
148
  total += 1
149
 
150
+ # angry += emotion[0]['emotion']['angry']
151
+ # disgust += emotion[0]['emotion']['disgust']
152
+ # fear += emotion[0]['emotion']['fear']
153
+ # happy += emotion[0]['emotion']['happy']
154
+ # sad += emotion[0]['emotion']['sad']
155
+ # surprise += emotion[0]['emotion']['surprise']
156
+ # neutral += emotion[0]['emotion']['neutral']
157
 
158
  try:
159
  # your processing code here
 
179
  sad = sad / emotion_count
180
  surprise = surprise / emotion_count
181
  neutral = neutral / emotion_count
182
+ # emotion = {
183
+ # 'angry': angry,
184
+ # 'disgust': disgust,
185
+ # 'fear': fear,
186
+ # 'happy': happy,
187
+ # 'sad': sad,
188
+ # 'surprise': surprise,
189
+ # 'neutral': neutral
190
+ # },
191
  final_result_dict = {
192
  "gaze_percentage" : gaze_percentage,
193
+ "face_emotion" : video_emotions,
194
  "text_emotion" : text_emotion,
195
  "transcription" : transcription,
196
  "text_sentiment" : text_sentiment