File size: 8,918 Bytes
8c9e75a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
import math
import os
from io import BytesIO
import gradio as gr
import cv2
import requests
from pydub import AudioSegment
from faster_whisper import WhisperModel
theme = gr.themes.Base(
primary_hue="cyan",
secondary_hue="blue",
neutral_hue="slate",
)
model = WhisperModel("small", device="cpu", compute_type="int8")
API_KEY = os.getenv("API_KEY")
FACE_API_URL = "https://api-inference.huggingface.co/models/dima806/facial_emotions_image_detection"
TEXT_API_URL = "https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions"
headers = {"Authorization": "Bearer " + API_KEY + ""}
result = []
def extract_frames(video_path):
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
interval = fps
images = []
for i in range(0, total_frames, interval):
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
if ret:
_, img_encoded = cv2.imencode('.jpg', frame)
img_bytes = img_encoded.tobytes()
response = requests.post(FACE_API_URL, headers=headers, data=img_bytes)
temp = {item['label']: item['score'] for item in response.json()}
result.append(temp)
images.append((cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), f"Sentiments: {temp}"))
print("Frame extraction completed.")
cap.release()
return images, result
def analyze_sentiment(text):
response = requests.post(TEXT_API_URL, headers=headers, json=text)
sentiment_list = response.json()[0]
sentiment_results = {results['label']: results['score'] for results in sentiment_list}
return sentiment_results
def video_to_audio(input_video):
cap = cv2.VideoCapture(input_video)
fps = int(cap.get(cv2.CAP_PROP_FPS))
audio = AudioSegment.from_file(input_video)
audio_binary = audio.export(format="wav").read()
audio_bytesio = BytesIO(audio_binary)
segments, info = model.transcribe(audio_bytesio, beam_size=5)
print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
frames_images, frames_sentiments = extract_frames(input_video)
transcript = ''
audio_divide_sentiment = ''
video_sentiment_markdown = ''
video_sentiment_final = []
final_output = []
for segment in segments:
transcript = transcript + segment.text + " "
transcript_segment_sentiment = analyze_sentiment(segment.text)
audio_divide_sentiment += "[%.2fs -> %.2fs] %s : %s`\`" % (segment.start, segment.end, segment.text, transcript_segment_sentiment)
emotion_totals = {
'admiration': 0.0,
'amusement': 0.0,
'angry': 0.0,
'annoyance': 0.0,
'approval': 0.0,
'caring': 0.0,
'confusion': 0.0,
'curiosity': 0.0,
'desire': 0.0,
'disappointment': 0.0,
'disapproval': 0.0,
'disgust': 0.0,
'embarrassment': 0.0,
'excitement': 0.0,
'fear': 0.0,
'gratitude': 0.0,
'grief': 0.0,
'happy': 0.0,
'love': 0.0,
'nervousness': 0.0,
'optimism': 0.0,
'pride': 0.0,
'realization': 0.0,
'relief': 0.0,
'remorse': 0.0,
'sad': 0.0,
'surprise': 0.0,
'neutral': 0.0
}
counter = 0
for i in range(math.ceil(segment.start), math.floor(segment.end)):
for emotion in frames_sentiments[i].keys():
emotion_totals[emotion] += frames_sentiments[i].get(emotion)
counter += 1
for emotion in emotion_totals:
emotion_totals[emotion] /= counter
video_sentiment_final.append(emotion_totals)
video_segment_sentiment = {key: value for key, value in emotion_totals.items() if value != 0.0}
video_sentiment_markdown += f"Frame {fps*math.ceil(segment.start)} - Frame {fps*math.floor(segment.end)} : {video_segment_sentiment}`\`"
segment_finals = {segment.id: (segment.text, segment.start, segment.end, transcript_segment_sentiment, video_segment_sentiment)}
final_output.append(segment_finals)
total_transcript_sentiment = {key: value for key, value in analyze_sentiment(transcript).items() if value >= 0.01}
emotion_finals = {
'admiration': 0.0,
'amusement': 0.0,
'angry': 0.0,
'annoyance': 0.0,
'approval': 0.0,
'caring': 0.0,
'confusion': 0.0,
'curiosity': 0.0,
'desire': 0.0,
'disappointment': 0.0,
'disapproval': 0.0,
'disgust': 0.0,
'embarrassment': 0.0,
'excitement': 0.0,
'fear': 0.0,
'gratitude': 0.0,
'grief': 0.0,
'happy': 0.0,
'love': 0.0,
'nervousness': 0.0,
'optimism': 0.0,
'pride': 0.0,
'realization': 0.0,
'relief': 0.0,
'remorse': 0.0,
'sad': 0.0,
'surprise': 0.0,
'neutral': 0.0
}
for i in range(0, video_sentiment_final.__len__()-1):
for emotion in video_sentiment_final[i].keys():
emotion_finals[emotion] += video_sentiment_final[i].get(emotion)
for emotion in emotion_finals:
emotion_finals[emotion] /= video_sentiment_final.__len__()
emotion_finals = {key: value for key, value in emotion_finals.items() if value != 0.0}
print("Processing Completed!!")
return str(final_output), frames_images, total_transcript_sentiment, audio_divide_sentiment, video_sentiment_markdown, emotion_finals
with gr.Blocks(theme=theme, css=".gradio-container { background: rgba(255, 255, 255, 0.2) !important; box-shadow: 0 8px 32px 0 rgba( 31, 38, 135, 0.37 ) !important; backdrop-filter: blur( 10px ) !important; -webkit-backdrop-filter: blur( 10px ) !important; border-radius: 10px !important; border: 1px solid rgba( 0, 0, 0, 0.5 ) !important;}") as Video:
with gr.Column():
gr.Markdown("""# Cross Model Machine Learning Model""")
with gr.Row():
gr.Markdown("""
### π€ A cross-model ML model for video processing in healthcare sentiment analysis involves combining different machine learning models to analyze sentiments expressed in healthcare-related videos.
- Facial Expression Recognition Model [Google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) ππ’π°
- Speech Recognition Model [OpenAI/Whisper](https://github.com/openai/whisper) π£οΈπ€
- Text Analysis Model [RoBERTa-base-go-emotions](https://huggingface.co/SamLowe/roberta-base-go_emotions) ππ
- Contextual Understanding Model (Sentiment Analysis) ππ
""")
gr.Markdown("""### By combining the outputs of these models, the cross-model approach aims to capture a more comprehensive view of the sentiment within the healthcare-related video. This way, healthcare providers can gain insights into patient experiences and emotions, facilitating better understanding and improvements in healthcare services. π©ββοΈππ¨ββοΈ """)
with gr.Row():
with gr.Column():
input_video = gr.Video(sources=["upload", "webcam"])
button = gr.Button("Process", variant="primary")
gr.Examples(inputs=input_video, examples=[os.path.join(os.path.dirname(__file__), "test_video_1.mp4")])
with gr.Row():
overall_score = gr.Label(label="Overall Score")
video_sentiment_final = gr.Label(label="Video Sentiment Score")
with gr.Column():
frames_gallery = gr.Gallery(label="Video Frames", show_label=True, elem_id="gallery", columns=[3], rows=[1], object_fit="contain", height="auto")
with gr.Accordion(label="JSON detailed Responses", open=False):
json_output = gr.Textbox(label="JSON Output", info="Overall scores of the above video in segments.", show_label=True, lines=5, show_copy_button=True, interactive=False)
audio_sentiment = gr.Textbox(label="Audio Sentiments", info="Outputs of Audio Processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
video_sentiment_markdown = gr.Textbox(label="Video Sentiments", info="Outputs of Video Frames processing from the video.", show_label=True, lines=5, show_copy_button=True, interactive=False)
button.click(
fn=video_to_audio,
inputs=input_video,
outputs=[json_output, frames_gallery, overall_score, audio_sentiment, video_sentiment_markdown, video_sentiment_final]
)
Video.launch() |