Spaces:
Running
Running
File size: 6,147 Bytes
761f4da 96705f2 2587bd0 96705f2 0c2700d 961c5fc 197af76 8ced839 0c2700d d5de1e1 0c2700d 961c5fc 761f4da 7dfcf08 d0d585b 7472be1 d0d585b 8ced839 197af76 8ced839 0c2700d 961c5fc fbbc260 0c2700d 96705f2 b45a874 197af76 e4c9916 8ced839 e4c9916 d0d585b e4c9916 d0d585b 0c2700d 96705f2 0c2700d 96705f2 9e3243b 96705f2 761f4da 96705f2 961c5fc ddd31a2 89a385f ddd31a2 fbbc260 ddd31a2 96705f2 c8aa6df f20ef5e f97c524 96705f2 0c2700d 4190733 78eae32 961c5fc f20ef5e 964aa5f ed2cf11 964aa5f 378962d fbbc260 79b2a39 378962d 961c5fc 378962d 96705f2 0c2700d c2343d1 761f4da 0c2700d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
import gradio as gr
import os
import cv2
import face_recognition
from fastai.vision.all import load_learner
import time
import base64
from deepface import DeepFace
import torchaudio
import moviepy.editor as mp
from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
# import pathlib
# temp = pathlib.PosixPath
# pathlib.PosixPath = pathlib.WindowsPath
backends = [
'opencv',
'ssd',
'dlib',
'mtcnn',
'retinaface',
'mediapipe'
]
emotion_pipeline = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
model = load_learner("gaze-recognizer-v4.pkl")
def analyze_emotion(text):
result = emotion_pipeline(text)
return result
def analyze_sentiment(text):
result = sentiment_pipeline(text)
return result
def getTranscription(path):
# Insert Local Video File Path
clip = mp.VideoFileClip(path)
# Insert Local Audio File Path
clip.audio.write_audiofile(r"audio.wav")
waveform, sample_rate = torchaudio.load("audio.wav")
resampler = torchaudio.transforms.Resample(sample_rate, 16000)
waveform = resampler(waveform)[0]
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
model.config.forced_decoder_ids = None
input_features = processor(waveform.squeeze(dim=0), return_tensors="pt").input_features
predicted_ids = model.generate(input_features)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
return transcription[0]
def video_processing(video_file, encoded_video):
emotion_count = 0
video_emotions = {
'angry': 0,
'disgust': 0,
'fear': 0,
'happy': 0,
'sad': 0,
'surprise': 0,
'neutral':0
}
if encoded_video != "":
decoded_file_data = base64.b64decode(encoded_video)
with open("temp_video.mp4", "wb") as f:
f.write(decoded_file_data)
video_file = "temp_video.mp4"
start_time = time.time()
transcription = getTranscription(video_file)
print(transcription)
text_emotion = analyze_emotion(transcription)
print(text_emotion)
text_sentiment = analyze_sentiment(transcription)
print(text_sentiment)
video_capture = cv2.VideoCapture(video_file)
on_camera = 0
off_camera = 0
total = 0
while True:
# Read a single frame from the video
for i in range(24*3):
ret, frame = video_capture.read()
if not ret:
break
# If there are no more frames, break out of the loop
if not ret:
break
# Convert the frame to RGB color (face_recognition uses RGB)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Find all the faces in the frame using a pre-trained convolutional neural network.
face_locations = face_recognition.face_locations(gray)
if len(face_locations) > 0:
# Show the original frame with face rectangles drawn around the faces
for top, right, bottom, left in face_locations:
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
face_image = gray[top:bottom, left:right]
color_image = frame[top:bottom, left:right]
# Resize the face image to the desired size
resized_face_image = cv2.resize(face_image, (128,128))
try:
detected_face_emotion = DeepFace.analyze(color_image,actions=['emotion'],detector_backend = backends[2],enforce_detection = False)# 2,3, 4 works
for emotion in detected_face_emotion:
for key in video_emotions.keys():
video_emotions[key] += emotion['emotion'][key]
emotion_count += 1
except Exception as e:
emotion = 0
pass
# Predict the class of the resized face image using the model
result = model.predict(resized_face_image)
print(result[0])
if result[0] == 'on_camera':
on_camera += 1
elif result[0] == 'off_camera':
off_camera += 1
total += 1
try:
# your processing code here
gaze_percentage = on_camera / total * 100
except Exception as e:
print(f"An error occurred while processing the video: {e}")
gaze_percentage = 'ERROR : no face detected'
print(f'Total = {total},on_camera = {on_camera},off_camera = {off_camera}')
# Release the video capture object and close all windows
video_capture.release()
cv2.destroyAllWindows()
end_time = time.time()
print(f'Time taken: {end_time-start_time}')
if os.path.exists("temp_video.mp4"):
os.remove("temp_video.mp4")
if os.path.exists("audio.wav"):
os.remove("audio.wav")
print(gaze_percentage)
# Divide all emotion values by emotion count
if emotion_count > 0:
for key in video_emotions.keys():
video_emotions[key] /= emotion_count
# Modify 'angry' key to 'anger'
video_emotions['anger'] = video_emotions.pop('angry')
# Modify 'happy' key to 'joy'
video_emotions['joy'] = video_emotions.pop('happy')
# Modify 'sad' key to 'sadness'
video_emotions['sadness'] = video_emotions.pop('sad')
final_result_dict = {
"gaze_percentage" : gaze_percentage,
"face_emotion" : video_emotions,
"text_emotion" : text_emotion[0],
"transcription" : transcription,
"text_sentiment" : text_sentiment
}
return final_result_dict
demo = gr.Interface(fn=video_processing,
inputs=["video", "text"],
outputs="json")
if __name__ == "__main__":
demo.launch() |