diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..371668e1ae533bf2675a1e94e6abb171baee9918 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Elena Ryumina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 8af9536ad8b9c8d55176603c3b0199fbddc2bdc4..ba963cb4c26d35ac7fe7df8639d7540cb26d4739 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,13 @@ --- -title: Gpu -emoji: 🐨 -colorFrom: red -colorTo: purple +title: Multi-Modal for Emotion and Sentiment Analysis (for GITEX 2024) +emoji: 😀😲😐😥🥴😱😡 +colorFrom: blue +colorTo: pink sdk: gradio -sdk_version: 5.0.2 +sdk_version: '4.24.0' app_file: app.py pinned: false -license: apache-2.0 -short_description: gpu +license: mit --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..ee64095abb33af73a9219a0c1acd7635a55ec707 --- /dev/null +++ b/app.py @@ -0,0 +1,48 @@ +import gradio as gr +import torch +from tabs.FACS_analysis import create_facs_analysis_tab +from ui_components import CUSTOM_CSS, HEADER_HTML, DISCLAIMER_HTML +import spaces # Importing spaces to utilize Zero GPU + +# Initialize Zero GPU +if torch.cuda.is_available(): + zero = torch.Tensor([0]).cuda() + print(f"Initial device: {zero.device}") +else: + zero = torch.Tensor([0]) + print("CUDA is not available. Using CPU.") + +# Define the tab structure +TAB_STRUCTURE = [ + ("Visual Analysis", [ + ("FACS for Stress, Anxiety, Depression", create_facs_analysis_tab), + ]) +] + +# Decorate GPU-dependent function with Zero GPU +@spaces.GPU(duration=120) # Allocates GPU for 120 seconds when needed +def create_demo(): + if torch.cuda.is_available(): + print(f"Device inside create_demo: {zero.device}") + else: + print("CUDA is not available inside create_demo.") + + # Gradio blocks to create the interface + with gr.Blocks(css=CUSTOM_CSS) as demo: + gr.Markdown(HEADER_HTML) + with gr.Tabs(elem_classes=["main-tab"]): + for main_tab, sub_tabs in TAB_STRUCTURE: + with gr.Tab(main_tab): + with gr.Tabs(): + for sub_tab, create_fn in sub_tabs: + with gr.Tab(sub_tab): + create_fn() + gr.HTML(DISCLAIMER_HTML) + + return demo + +# Create the demo instance +demo = create_demo() + +if __name__ == "__main__": + demo.launch() diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/__pycache__/__init__.cpython-310.pyc b/app/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c15410a94c80b460bc1fc01a228fdbbdb827ba4 Binary files /dev/null and b/app/__pycache__/__init__.cpython-310.pyc differ diff --git a/app/__pycache__/__init__.cpython-312.pyc b/app/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f52faa26ade7317c137860171296602da7951f3e Binary files /dev/null and b/app/__pycache__/__init__.cpython-312.pyc differ diff --git a/app/__pycache__/__init__.cpython-38.pyc b/app/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d756f30d8e47e499e4a900f5c37713c3e67481 Binary files /dev/null and b/app/__pycache__/__init__.cpython-38.pyc differ diff --git a/app/__pycache__/app_utils.cpython-310.pyc b/app/__pycache__/app_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a198d42b4f53a19ef999edc7e70151dd0e6308 Binary files /dev/null and b/app/__pycache__/app_utils.cpython-310.pyc differ diff --git a/app/__pycache__/app_utils.cpython-312.pyc b/app/__pycache__/app_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be10a56223cf874d2212e6f6961f1b7b6c06607f Binary files /dev/null and b/app/__pycache__/app_utils.cpython-312.pyc differ diff --git a/app/__pycache__/app_utils.cpython-38.pyc b/app/__pycache__/app_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f575ffaf5fb4802ec4af2a14d11c850f9cd47d9c Binary files /dev/null and b/app/__pycache__/app_utils.cpython-38.pyc differ diff --git a/app/__pycache__/authors.cpython-310.pyc b/app/__pycache__/authors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a35c86dc0507b7b5d120380557fb0c0077db44c Binary files /dev/null and b/app/__pycache__/authors.cpython-310.pyc differ diff --git a/app/__pycache__/authors.cpython-312.pyc b/app/__pycache__/authors.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc33aeb5a1a65a582b44ccd83356c4ca5c92204 Binary files /dev/null and b/app/__pycache__/authors.cpython-312.pyc differ diff --git a/app/__pycache__/authors.cpython-38.pyc b/app/__pycache__/authors.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dae92066583a36e33bbe043cd290a172cdbcdd71 Binary files /dev/null and b/app/__pycache__/authors.cpython-38.pyc differ diff --git a/app/__pycache__/config.cpython-310.pyc b/app/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e6699289b74be5046d542cf98b681171c86000c Binary files /dev/null and b/app/__pycache__/config.cpython-310.pyc differ diff --git a/app/__pycache__/config.cpython-312.pyc b/app/__pycache__/config.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e919d893b0f581c7ae4595f1d03d5efc8e9b6e9 Binary files /dev/null and b/app/__pycache__/config.cpython-312.pyc differ diff --git a/app/__pycache__/config.cpython-38.pyc b/app/__pycache__/config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff01378c8bbf844391970c090fbd42a334b79148 Binary files /dev/null and b/app/__pycache__/config.cpython-38.pyc differ diff --git a/app/__pycache__/description.cpython-310.pyc b/app/__pycache__/description.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9eeab94eeda9c46e814e5f4ec9a83a6ef859dfc Binary files /dev/null and b/app/__pycache__/description.cpython-310.pyc differ diff --git a/app/__pycache__/description.cpython-312.pyc b/app/__pycache__/description.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05e374d2edad30de42f8d89a72bb4af2e67dbe84 Binary files /dev/null and b/app/__pycache__/description.cpython-312.pyc differ diff --git a/app/__pycache__/description.cpython-38.pyc b/app/__pycache__/description.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4e8a705a29d3db014a2dd8fbf0622241d8d3f07 Binary files /dev/null and b/app/__pycache__/description.cpython-38.pyc differ diff --git a/app/__pycache__/face_utils.cpython-310.pyc b/app/__pycache__/face_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a689a1de75610ffbd6cc1e542e67e324b2daad22 Binary files /dev/null and b/app/__pycache__/face_utils.cpython-310.pyc differ diff --git a/app/__pycache__/face_utils.cpython-312.pyc b/app/__pycache__/face_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9f4aadd2b55a1a985fe5138bae5f89782a5d05 Binary files /dev/null and b/app/__pycache__/face_utils.cpython-312.pyc differ diff --git a/app/__pycache__/face_utils.cpython-38.pyc b/app/__pycache__/face_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d9562b5b623b81e26f3d18273e86c585e565e57 Binary files /dev/null and b/app/__pycache__/face_utils.cpython-38.pyc differ diff --git a/app/__pycache__/model.cpython-310.pyc b/app/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2197e824a011b0f297f98e143a1773b49e3d24 Binary files /dev/null and b/app/__pycache__/model.cpython-310.pyc differ diff --git a/app/__pycache__/model.cpython-312.pyc b/app/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e68bec2da5870c4e66df3b9a7d65d7c5969faac0 Binary files /dev/null and b/app/__pycache__/model.cpython-312.pyc differ diff --git a/app/__pycache__/model.cpython-38.pyc b/app/__pycache__/model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5323a0982079af4b3f451986464af152dfce0960 Binary files /dev/null and b/app/__pycache__/model.cpython-38.pyc differ diff --git a/app/__pycache__/model_architectures.cpython-310.pyc b/app/__pycache__/model_architectures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a232c450d192cc4f090f125d38e28b756883262 Binary files /dev/null and b/app/__pycache__/model_architectures.cpython-310.pyc differ diff --git a/app/__pycache__/model_architectures.cpython-312.pyc b/app/__pycache__/model_architectures.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3bb410b777d4fad0f22e23f528d38e58ca86943 Binary files /dev/null and b/app/__pycache__/model_architectures.cpython-312.pyc differ diff --git a/app/__pycache__/model_architectures.cpython-38.pyc b/app/__pycache__/model_architectures.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17ba7433c2d14127bcc4a5418a72aeedf5d2eb07 Binary files /dev/null and b/app/__pycache__/model_architectures.cpython-38.pyc differ diff --git a/app/__pycache__/plot.cpython-310.pyc b/app/__pycache__/plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d5f56e284ccc256d027346ccdfd84f8e9ba187 Binary files /dev/null and b/app/__pycache__/plot.cpython-310.pyc differ diff --git a/app/__pycache__/plot.cpython-312.pyc b/app/__pycache__/plot.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f8909c80a0246aaba2004c365319bf6b3114ae Binary files /dev/null and b/app/__pycache__/plot.cpython-312.pyc differ diff --git a/app/__pycache__/plot.cpython-38.pyc b/app/__pycache__/plot.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f34c91d010ea26dd7f7db3011132455278d3e5 Binary files /dev/null and b/app/__pycache__/plot.cpython-38.pyc differ diff --git a/app/app_utils.py b/app/app_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bb5ae2177ff8ac7e4d917c8bed7410c7159915cd --- /dev/null +++ b/app/app_utils.py @@ -0,0 +1,333 @@ +import torch +import numpy as np +import mediapipe as mp +from PIL import Image +import cv2 +from pytorch_grad_cam.utils.image import show_cam_on_image +import matplotlib.pyplot as plt + +# Importing necessary components for the Gradio app +from app.model import pth_model_static, pth_model_dynamic, cam, pth_processing +from app.face_utils import get_box, display_info +from app.config import DICT_EMO, config_data +from app.plot import statistics_plot + +mp_face_mesh = mp.solutions.face_mesh + +def get_device(): + if torch.backends.mps.is_available(): + return torch.device("mps") + elif torch.cuda.is_available(): + return torch.device("cuda") + else: + return torch.device("cpu") + +device = get_device() +print(f"Using device: {device}") + +# Move models to the selected device +pth_model_static = pth_model_static.to(device) +pth_model_dynamic = pth_model_dynamic.to(device) + +def preprocess_image_and_predict(inp): + inp = np.array(inp) + + if inp is None: + return None, None, None + + try: + h, w = inp.shape[:2] + except Exception: + return None, None, None + + with mp_face_mesh.FaceMesh( + max_num_faces=1, + refine_landmarks=False, + min_detection_confidence=0.5, + min_tracking_confidence=0.5, + ) as face_mesh: + results = face_mesh.process(inp) + if results.multi_face_landmarks: + for fl in results.multi_face_landmarks: + startX, startY, endX, endY = get_box(fl, w, h) + cur_face = inp[startY:endY, startX:endX] + cur_face_n = pth_processing(Image.fromarray(cur_face)).to(device) + with torch.no_grad(): + prediction = ( + torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1) + .detach() + .cpu() + .numpy()[0] + ) + confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)} + grayscale_cam = cam(input_tensor=cur_face_n) + grayscale_cam = grayscale_cam[0, :] + cur_face_hm = cv2.resize(cur_face,(224,224)) + cur_face_hm = np.float32(cur_face_hm) / 255 + heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True) + + return cur_face, heatmap, confidences + +def preprocess_frame_and_predict_aus(frame): + if len(frame.shape) == 2: + frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) + elif frame.shape[2] == 4: + frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB) + + with mp_face_mesh.FaceMesh( + max_num_faces=1, + refine_landmarks=False, + min_detection_confidence=0.5, + min_tracking_confidence=0.5 + ) as face_mesh: + results = face_mesh.process(frame) + + if results.multi_face_landmarks: + h, w = frame.shape[:2] + for fl in results.multi_face_landmarks: + startX, startY, endX, endY = get_box(fl, w, h) + cur_face = frame[startY:endY, startX:endX] + cur_face_n = pth_processing(Image.fromarray(cur_face)).to(device) + + with torch.no_grad(): + features = pth_model_static(cur_face_n) + au_intensities = features_to_au_intensities(features) + + grayscale_cam = cam(input_tensor=cur_face_n) + grayscale_cam = grayscale_cam[0, :] + cur_face_hm = cv2.resize(cur_face, (224, 224)) + cur_face_hm = np.float32(cur_face_hm) / 255 + heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True) + + return cur_face, au_intensities, heatmap + + return None, None, None + +def features_to_au_intensities(features): + features_np = features.detach().cpu().numpy()[0] + au_intensities = (features_np - features_np.min()) / (features_np.max() - features_np.min()) + return au_intensities[:24] # Assuming we want 24 AUs + +def preprocess_video_and_predict(video): + cap = cv2.VideoCapture(video) + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = np.round(cap.get(cv2.CAP_PROP_FPS)) + + path_save_video_face = 'result_face.mp4' + vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224)) + + path_save_video_hm = 'result_hm.mp4' + vid_writer_hm = cv2.VideoWriter(path_save_video_hm, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224)) + + lstm_features = [] + count_frame = 1 + count_face = 0 + probs = [] + frames = [] + au_intensities_list = [] + last_output = None + last_heatmap = None + last_au_intensities = None + cur_face = None + + with mp_face_mesh.FaceMesh( + max_num_faces=1, + refine_landmarks=False, + min_detection_confidence=0.5, + min_tracking_confidence=0.5) as face_mesh: + + while cap.isOpened(): + _, frame = cap.read() + if frame is None: break + + frame_copy = frame.copy() + frame_copy.flags.writeable = False + frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB) + results = face_mesh.process(frame_copy) + frame_copy.flags.writeable = True + + if results.multi_face_landmarks: + for fl in results.multi_face_landmarks: + startX, startY, endX, endY = get_box(fl, w, h) + cur_face = frame_copy[startY:endY, startX: endX] + + if count_face%config_data.FRAME_DOWNSAMPLING == 0: + cur_face_copy = pth_processing(Image.fromarray(cur_face)).to(device) + with torch.no_grad(): + features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().cpu().numpy() + au_intensities = features_to_au_intensities(pth_model_static(cur_face_copy)) + + grayscale_cam = cam(input_tensor=cur_face_copy) + grayscale_cam = grayscale_cam[0, :] + cur_face_hm = cv2.resize(cur_face,(224,224), interpolation = cv2.INTER_AREA) + cur_face_hm = np.float32(cur_face_hm) / 255 + heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=False) + last_heatmap = heatmap + last_au_intensities = au_intensities + + if len(lstm_features) == 0: + lstm_features = [features]*10 + else: + lstm_features = lstm_features[1:] + [features] + + lstm_f = torch.from_numpy(np.vstack(lstm_features)).to(device) + lstm_f = torch.unsqueeze(lstm_f, 0) + with torch.no_grad(): + output = pth_model_dynamic(lstm_f).detach().cpu().numpy() + last_output = output + + if count_face == 0: + count_face += 1 + + else: + if last_output is not None: + output = last_output + heatmap = last_heatmap + au_intensities = last_au_intensities + + elif last_output is None: + output = np.empty((1, 7)) + output[:] = np.nan + au_intensities = np.empty(24) + au_intensities[:] = np.nan + + probs.append(output[0]) + frames.append(count_frame) + au_intensities_list.append(au_intensities) + else: + if last_output is not None: + lstm_features = [] + empty = np.empty((7)) + empty[:] = np.nan + probs.append(empty) + frames.append(count_frame) + au_intensities_list.append(np.full(24, np.nan)) + + if cur_face is not None: + heatmap_f = display_info(heatmap, 'Frame: {}'.format(count_frame), box_scale=.3) + + cur_face = cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR) + cur_face = cv2.resize(cur_face, (224,224), interpolation = cv2.INTER_AREA) + cur_face = display_info(cur_face, 'Frame: {}'.format(count_frame), box_scale=.3) + vid_writer_face.write(cur_face) + vid_writer_hm.write(heatmap_f) + + count_frame += 1 + if count_face != 0: + count_face += 1 + + vid_writer_face.release() + vid_writer_hm.release() + + stat = statistics_plot(frames, probs) + au_stat = au_statistics_plot(frames, au_intensities_list) + + if not stat or not au_stat: + return None, None, None, None, None + + return video, path_save_video_face, path_save_video_hm, stat, au_stat + +# The rest of the functions remain the same +# ... + +def au_statistics_plot(frames, au_intensities_list): + fig, ax = plt.subplots(figsize=(12, 6)) + au_intensities_array = np.array(au_intensities_list) + + for i in range(au_intensities_array.shape[1]): + ax.plot(frames, au_intensities_array[:, i], label=f'AU{i+1}') + + ax.set_xlabel('Frame') + ax.set_ylabel('AU Intensity') + ax.set_title('Action Unit Intensities Over Time') + ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left') + plt.tight_layout() + return fig + +def preprocess_video_and_predict_sleep_quality(video): + cap = cv2.VideoCapture(video) + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = np.round(cap.get(cv2.CAP_PROP_FPS)) + + path_save_video_original = 'result_original.mp4' + path_save_video_face = 'result_face.mp4' + path_save_video_sleep = 'result_sleep.mp4' + + vid_writer_original = cv2.VideoWriter(path_save_video_original, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224)) + vid_writer_sleep = cv2.VideoWriter(path_save_video_sleep, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224)) + + frames = [] + sleep_quality_scores = [] + eye_bags_images = [] + + with mp_face_mesh.FaceMesh( + max_num_faces=1, + refine_landmarks=False, + min_detection_confidence=0.5, + min_tracking_confidence=0.5) as face_mesh: + + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = face_mesh.process(frame_rgb) + + if results.multi_face_landmarks: + for fl in results.multi_face_landmarks: + startX, startY, endX, endY = get_box(fl, w, h) + cur_face = frame_rgb[startY:endY, startX:endX] + + sleep_quality_score, eye_bags_image = analyze_sleep_quality(cur_face) + sleep_quality_scores.append(sleep_quality_score) + eye_bags_images.append(cv2.resize(eye_bags_image, (224, 224))) + + sleep_quality_viz = create_sleep_quality_visualization(cur_face, sleep_quality_score) + + cur_face = cv2.resize(cur_face, (224, 224)) + + vid_writer_face.write(cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR)) + vid_writer_sleep.write(sleep_quality_viz) + + vid_writer_original.write(frame) + frames.append(len(frames) + 1) + + cap.release() + vid_writer_original.release() + vid_writer_face.release() + vid_writer_sleep.release() + + sleep_stat = sleep_quality_statistics_plot(frames, sleep_quality_scores) + + if eye_bags_images: + average_eye_bags_image = np.mean(np.array(eye_bags_images), axis=0).astype(np.uint8) + else: + average_eye_bags_image = np.zeros((224, 224, 3), dtype=np.uint8) + + return (path_save_video_original, path_save_video_face, path_save_video_sleep, + average_eye_bags_image, sleep_stat) + +def analyze_sleep_quality(face_image): + # Placeholder function - implement your sleep quality analysis here + sleep_quality_score = np.random.random() + eye_bags_image = cv2.resize(face_image, (224, 224)) + return sleep_quality_score, eye_bags_image + +def create_sleep_quality_visualization(face_image, sleep_quality_score): + viz = face_image.copy() + cv2.putText(viz, f"Sleep Quality: {sleep_quality_score:.2f}", (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + return cv2.cvtColor(viz, cv2.COLOR_RGB2BGR) + +def sleep_quality_statistics_plot(frames, sleep_quality_scores): + # Placeholder function - implement your statistics plotting here + fig, ax = plt.subplots() + ax.plot(frames, sleep_quality_scores) + ax.set_xlabel('Frame') + ax.set_ylabel('Sleep Quality Score') + ax.set_title('Sleep Quality Over Time') + return fig \ No newline at end of file diff --git a/app/face_utils.py b/app/face_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d58d9b8efe044b667a822240d3ca342f652f08f4 --- /dev/null +++ b/app/face_utils.py @@ -0,0 +1,68 @@ +""" +File: face_utils.py +Author: Elena Ryumina and Dmitry Ryumin +Description: This module contains utility functions related to facial landmarks and image processing. +License: MIT License +""" + +import numpy as np +import math +import cv2 + + +def norm_coordinates(normalized_x, normalized_y, image_width, image_height): + x_px = min(math.floor(normalized_x * image_width), image_width - 1) + y_px = min(math.floor(normalized_y * image_height), image_height - 1) + return x_px, y_px + + +def get_box(fl, w, h): + idx_to_coors = {} + for idx, landmark in enumerate(fl.landmark): + landmark_px = norm_coordinates(landmark.x, landmark.y, w, h) + if landmark_px: + idx_to_coors[idx] = landmark_px + + x_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 0]) + y_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 1]) + endX = np.max(np.asarray(list(idx_to_coors.values()))[:, 0]) + endY = np.max(np.asarray(list(idx_to_coors.values()))[:, 1]) + + (startX, startY) = (max(0, x_min), max(0, y_min)) + (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) + + return startX, startY, endX, endY + +def display_info(img, text, margin=1.0, box_scale=1.0): + img_copy = img.copy() + img_h, img_w, _ = img_copy.shape + line_width = int(min(img_h, img_w) * 0.001) + thickness = max(int(line_width / 3), 1) + + font_face = cv2.FONT_HERSHEY_SIMPLEX + font_color = (0, 0, 0) + font_scale = thickness / 1.5 + + t_w, t_h = cv2.getTextSize(text, font_face, font_scale, None)[0] + + margin_n = int(t_h * margin) + sub_img = img_copy[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale), + img_w - t_w - margin_n - int(2 * t_h * box_scale): img_w - margin_n] + + white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255 + + img_copy[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale), + img_w - t_w - margin_n - int(2 * t_h * box_scale):img_w - margin_n] = cv2.addWeighted(sub_img, 0.5, white_rect, .5, 1.0) + + cv2.putText(img=img_copy, + text=text, + org=(img_w - t_w - margin_n - int(2 * t_h * box_scale) // 2, + 0 + margin_n + t_h + int(2 * t_h * box_scale) // 2), + fontFace=font_face, + fontScale=font_scale, + color=font_color, + thickness=thickness, + lineType=cv2.LINE_AA, + bottomLeftOrigin=False) + + return img_copy diff --git a/app/model.py b/app/model.py new file mode 100644 index 0000000000000000000000000000000000000000..97a733f4cef19572da14b21c66eb343d8de06839 --- /dev/null +++ b/app/model.py @@ -0,0 +1,78 @@ +import os +import torch +import torch.nn as nn +import torchvision.transforms as transforms +from pytorch_grad_cam import GradCAM +from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget +import logging +from app.model_architectures import ResNet50, LSTMPyTorch + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Determine the device +device = torch.device('mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu') +logger.info(f"Using device: {device}") + +# Define paths +STATIC_MODEL_PATH = 'assets/models/FER_static_ResNet50_AffectNet.pt' +DYNAMIC_MODEL_PATH = 'assets/models/FER_dynamic_LSTM.pt' + +def load_model(model_class, model_path, *args, **kwargs): + model = model_class(*args, **kwargs).to(device) + if os.path.exists(model_path): + try: + model.load_state_dict(torch.load(model_path, map_location=device)) + model.eval() + logger.info(f"Model loaded successfully from {model_path}") + except Exception as e: + logger.error(f"Error loading model from {model_path}: {str(e)}") + logger.info("Initializing with random weights.") + else: + logger.warning(f"Model file not found at {model_path}. Initializing with random weights.") + return model + +# Load the static model +pth_model_static = load_model(ResNet50, STATIC_MODEL_PATH, num_classes=7, channels=3) + +# Load the dynamic model +pth_model_dynamic = load_model(LSTMPyTorch, DYNAMIC_MODEL_PATH, input_size=2048, hidden_size=256, num_layers=2, num_classes=7) + +# Set up GradCAM +target_layers = [pth_model_static.resnet.layer4[-1]] +cam = GradCAM(model=pth_model_static, target_layers=target_layers) + +# Define image preprocessing +pth_transform = transforms.Compose([ + transforms.Resize((224, 224)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), +]) + +def pth_processing(img): + img = pth_transform(img).unsqueeze(0).to(device) + return img + +def predict_emotion(img): + with torch.no_grad(): + output = pth_model_static(pth_processing(img)) + _, predicted = torch.max(output, 1) + return predicted.item() + +def get_emotion_probabilities(img): + with torch.no_grad(): + output = nn.functional.softmax(pth_model_static(pth_processing(img)), dim=1) + return output.squeeze().cpu().numpy() + +def generate_cam(img): + input_tensor = pth_processing(img) + targets = [ClassifierOutputTarget(predict_emotion(img))] + grayscale_cam = cam(input_tensor=input_tensor, targets=targets) + return grayscale_cam[0, :] + +# Add any other necessary functions or variables here + +if __name__ == "__main__": + logger.info("Model initialization complete.") + # You can add some test code here to verify everything is working correctly \ No newline at end of file diff --git a/app/model_architectures.py b/app/model_architectures.py new file mode 100644 index 0000000000000000000000000000000000000000..259e12d205ecb2bacf8ca7ace8d098c37a23bfb0 --- /dev/null +++ b/app/model_architectures.py @@ -0,0 +1,46 @@ +import torch +import torch.nn as nn +import torchvision.models as models + +class ResNet50(nn.Module): + def __init__(self, num_classes=7, channels=3): + super(ResNet50, self).__init__() + self.resnet = models.resnet50(pretrained=True) + # Modify the first convolutional layer if channels != 3 + if channels != 3: + self.resnet.conv1 = nn.Conv2d(channels, 64, kernel_size=7, stride=2, padding=3, bias=False) + num_features = self.resnet.fc.in_features + self.resnet.fc = nn.Linear(num_features, num_classes) + + def forward(self, x): + return self.resnet(x) + + def extract_features(self, x): + x = self.resnet.conv1(x) + x = self.resnet.bn1(x) + x = self.resnet.relu(x) + x = self.resnet.maxpool(x) + + x = self.resnet.layer1(x) + x = self.resnet.layer2(x) + x = self.resnet.layer3(x) + x = self.resnet.layer4(x) + + x = self.resnet.avgpool(x) + x = torch.flatten(x, 1) + return x + +class LSTMPyTorch(nn.Module): + def __init__(self, input_size, hidden_size, num_layers, num_classes): + super(LSTMPyTorch, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) + self.fc = nn.Linear(hidden_size, num_classes) + + def forward(self, x): + h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) + c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) + out, _ = self.lstm(x, (h0, c0)) + out = self.fc(out[:, -1, :]) + return out \ No newline at end of file diff --git a/assets/.DS_Store b/assets/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5d027663072f40481ff932c316219f52d467cfcf Binary files /dev/null and b/assets/.DS_Store differ diff --git a/assets/audio/fitness.wav b/assets/audio/fitness.wav new file mode 100644 index 0000000000000000000000000000000000000000..949b02efd9aa877e19b79a1d29c7057c45b1b5af Binary files /dev/null and b/assets/audio/fitness.wav differ diff --git a/assets/images/dyaglogo.webp b/assets/images/dyaglogo.webp new file mode 100644 index 0000000000000000000000000000000000000000..9daa94dd2c5253ef60dbbb8578111e71d8dc66c3 Binary files /dev/null and b/assets/images/dyaglogo.webp differ diff --git a/assets/images/fitness.jpg b/assets/images/fitness.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27d90926b96d64d57d99fab4cc5b8700d0591fec Binary files /dev/null and b/assets/images/fitness.jpg differ diff --git a/assets/resources/README.md b/assets/resources/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ba181d6e11261ff91fea76ae62797bb9793d5f7a --- /dev/null +++ b/assets/resources/README.md @@ -0,0 +1,7 @@ +https://huggingface.co/ElenaRyumina/face_emotion_recognition/tree/main + +https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pt + +https://huggingface.co/public-data/dlib_face_landmark_model/tree/main + +wget https://huggingface.co/public-data/dlib_face_landmark_model/resolve/main/shape_predictor_68_face_landmarks.dat diff --git a/assets/videos/fitness.mp4 b/assets/videos/fitness.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e29086ac2ed29ba8142200e0a7fe483df2b65a4b Binary files /dev/null and b/assets/videos/fitness.mp4 differ diff --git a/config.toml b/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..1c849e674c507de82cf616188f06404daa777be2 --- /dev/null +++ b/config.toml @@ -0,0 +1,10 @@ +APP_VERSION = "0.2.0" +FRAME_DOWNSAMPLING = 5 + +[model_static] +url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pt" +path = "assets/models/FER_static_ResNet50_AffectNet.pt" + +[model_dynamic] +url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_dinamic_LSTM_IEMOCAP.pt" +path = "assets/models/FER_dinamic_LSTM_IEMOCAP.pt" diff --git a/css/app.css b/css/app.css new file mode 100644 index 0000000000000000000000000000000000000000..22ad2533d59094eb0b1079409811100c93bd8d40 --- /dev/null +++ b/css/app.css @@ -0,0 +1,101 @@ +div.app-flex-container { + display: flex; + align-items: left; +} + +div.app-flex-container > a { + margin-left: 6px; +} + +div.dl1 div.upload-container { + height: 350px; + max-height: 350px; +} + +div.dl2 { + max-height: 200px; +} + +div.dl2 img { + max-height: 200px; +} + +div.dl5 { + max-height: 200px; +} + +div.dl5 img { + max-height: 200px; +} + +div.video1 div.video-container { + height: 500px; +} + +div.video2 { + height: 200px; +} + +div.video3 { + height: 200px; +} + +div.video4 { + height: 200px; +} + +div.stat { + height: 286px; +} + +div.settings-wrapper { + display: none; +} + +.submit { + display: inline-block; + padding: 10px 20px; + font-size: 16px; + font-weight: bold; + text-align: center; + text-decoration: none; + cursor: pointer; + border: var(--button-border-width) solid var(--button-primary-border-color); + background: var(--button-primary-background-fill); + color: var(--button-primary-text-color); + border-radius: 8px; + transition: all 0.3s ease; +} + +.submit[disabled] { + cursor: not-allowed; + opacity: 0.6; +} + +.submit:hover:not([disabled]) { + border-color: var(--button-primary-border-color-hover); + background: var(--button-primary-background-fill-hover); + color: var(--button-primary-text-color-hover); +} + +.clear { + display: inline-block; + padding: 10px 20px; + font-size: 16px; + font-weight: bold; + text-align: center; + text-decoration: none; + cursor: pointer; + border-radius: 8px; + transition: all 0.3s ease; +} + +.clear[disabled] { + cursor: not-allowed; + opacity: 0.6; +} + +.submit:active:not([disabled]), +.clear:active:not([disabled]) { + transform: scale(0.98); +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2672738b5ed0a6f0e2599a7187663f31cb264fa6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,50 @@ +# CUDA-enabled PyTorch packages +torch==2.0.1+cu118 +torchvision==0.15.2+cu118 +torchaudio==2.0.2+cu118 +-f https://download.pytorch.org/whl/torch_stable.html + +# Core dependencies +gradio==4.38.1 +gradio_client==1.1.0 + +# Additional dependencies +absl-py==2.1.0 +aiofiles==23.2.1 +altair==5.3.0 +anyio==4.4.0 +attrs==23.2.0 +audioread==3.0.1 +certifi==2024.7.4 +charset-normalizer==3.3.2 +click==8.1.7 +decorator==4.4.2 +fastapi==0.111.1 +h5py==3.11.0 +huggingface-hub==0.23.5 +idna==3.7 +Jinja2==3.1.4 +joblib==1.4.2 +jsonschema==4.23.0 +kiwisolver==1.4.5 +librosa==0.10.2.post1 +MarkupSafe==2.1.5 +matplotlib==3.9.1 +numpy==1.26.4 +pandas==2.2.2 +Pillow==10.4.0 +pydantic==2.8.2 +python-multipart==0.0.9 +pytz==2024.1 +PyYAML==6.0.1 +requests==2.32.3 +scikit-learn==1.5.1 +scipy==1.14.0 +soundfile==0.12.1 +starlette==0.37.2 +tqdm==4.66.4 +transformers==4.42.4 +uvicorn==0.30.1 + +# Any other necessary dependencies +# Add your additional dependencies here diff --git a/tabs/.DS_Store b/tabs/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e687a95706c131efef2994c4cb951d03becdaaa8 Binary files /dev/null and b/tabs/.DS_Store differ diff --git a/tabs/FACS_analysis.py b/tabs/FACS_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..cb7a19a712e2499626ae25a35e463e4348cddf55 --- /dev/null +++ b/tabs/FACS_analysis.py @@ -0,0 +1,101 @@ +import gradio as gr +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from app.app_utils import preprocess_frame_and_predict_aus + +# Define the AUs associated with stress, anxiety, and depression +STRESS_AUS = [4, 7, 17, 23, 24] +ANXIETY_AUS = [1, 2, 4, 5, 20] +DEPRESSION_AUS = [1, 4, 15, 17] + +AU_DESCRIPTIONS = { + 1: "Inner Brow Raiser", + 2: "Outer Brow Raiser", + 4: "Brow Lowerer", + 5: "Upper Lid Raiser", + 7: "Lid Tightener", + 15: "Lip Corner Depressor", + 17: "Chin Raiser", + 20: "Lip Stretcher", + 23: "Lip Tightener", + 24: "Lip Pressor" +} + +def normalize_score(score): + return max(0, min(1, (score + 1.5) / 3)) # Adjust the range as needed + +def process_video_for_facs(video_path): + cap = cv2.VideoCapture(video_path) + frames = [] + au_intensities_list = [] + + while True: + ret, frame = cap.read() + if not ret: + break + + processed_frame, au_intensities, _ = preprocess_frame_and_predict_aus(frame) + + if processed_frame is not None and au_intensities is not None: + frames.append(processed_frame) + au_intensities_list.append(au_intensities) + + cap.release() + + if not frames: + return None, None + + # Calculate average AU intensities + avg_au_intensities = np.mean(au_intensities_list, axis=0) + + # Calculate and normalize emotional state scores + stress_score = normalize_score(np.mean([avg_au_intensities[au-1] for au in STRESS_AUS if au <= len(avg_au_intensities)])) + anxiety_score = normalize_score(np.mean([avg_au_intensities[au-1] for au in ANXIETY_AUS if au <= len(avg_au_intensities)])) + depression_score = normalize_score(np.mean([avg_au_intensities[au-1] for au in DEPRESSION_AUS if au <= len(avg_au_intensities)])) + + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10)) + + # Emotional state scores + states = ['Stress', 'Anxiety', 'Depression'] + scores = [stress_score, anxiety_score, depression_score] + bars = ax1.bar(states, scores) + ax1.set_ylim(0, 1) + ax1.set_title('Emotional State Scores') + for bar in bars: + height = bar.get_height() + ax1.text(bar.get_x() + bar.get_width()/2., height, + f'{height:.2f}', ha='center', va='bottom') + + # AU intensities + all_aus = sorted(set(STRESS_AUS + ANXIETY_AUS + DEPRESSION_AUS)) + all_aus = [au for au in all_aus if au <= len(avg_au_intensities)] + au_labels = [f"AU{au}\n{AU_DESCRIPTIONS.get(au, '')}" for au in all_aus] + au_values = [avg_au_intensities[au-1] for au in all_aus] + ax2.bar(range(len(au_labels)), au_values) + ax2.set_xticks(range(len(au_labels))) + ax2.set_xticklabels(au_labels, rotation=45, ha='right') + ax2.set_ylim(0, 1) + ax2.set_title('Average Action Unit Intensities') + + plt.tight_layout() + + return frames[-1], fig # Return the last processed frame and the plot + +def create_facs_analysis_tab(): + with gr.Row(): + with gr.Column(scale=1): + input_video = gr.Video() + gr.Examples(["./assets/videos/fitness.mp4"], inputs=[input_video]) + with gr.Column(scale=2): + output_image = gr.Image(label="Processed Frame") + facs_chart = gr.Plot(label="FACS Analysis for SAD") + + # Automatically trigger the analysis when a video is uploaded + input_video.change( + fn=process_video_for_facs, + inputs=[input_video], + outputs=[output_image, facs_chart] + ) + + return input_video, output_image, facs_chart diff --git a/tabs/__pycache__/FACS_analysis.cpython-310.pyc b/tabs/__pycache__/FACS_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d7c223bec1344a252fb0a87e76888c5d6498ec Binary files /dev/null and b/tabs/__pycache__/FACS_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/audio_emotion_recognition.cpython-310.pyc b/tabs/__pycache__/audio_emotion_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c31ef926ad47e44ffe476c2b1dc98aabb3fa8d2 Binary files /dev/null and b/tabs/__pycache__/audio_emotion_recognition.cpython-310.pyc differ diff --git a/tabs/__pycache__/blink_detection.cpython-310.pyc b/tabs/__pycache__/blink_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfeca50b3326c7d8eb0563c6351686a1d2a1ea12 Binary files /dev/null and b/tabs/__pycache__/blink_detection.cpython-310.pyc differ diff --git a/tabs/__pycache__/body_movement_analysis.cpython-310.pyc b/tabs/__pycache__/body_movement_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6c5f3f95018760a6069b2cf55ecc274c5c62476 Binary files /dev/null and b/tabs/__pycache__/body_movement_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/breathing_pattern.cpython-310.pyc b/tabs/__pycache__/breathing_pattern.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0703352d30bc2420e77c96cec45b3f4ae78aebbe Binary files /dev/null and b/tabs/__pycache__/breathing_pattern.cpython-310.pyc differ diff --git a/tabs/__pycache__/emotion_analysis.cpython-310.pyc b/tabs/__pycache__/emotion_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5199982b96d590304ce1e0daf68685de350da1d Binary files /dev/null and b/tabs/__pycache__/emotion_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/emotion_recognition.cpython-310.pyc b/tabs/__pycache__/emotion_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16c7feca8bc5df7a1d94c04b653b74d21c14fa1b Binary files /dev/null and b/tabs/__pycache__/emotion_recognition.cpython-310.pyc differ diff --git a/tabs/__pycache__/face_expressions.cpython-310.pyc b/tabs/__pycache__/face_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2b6fdd5362ce88699808b4827a1cb4c3f163325 Binary files /dev/null and b/tabs/__pycache__/face_expressions.cpython-310.pyc differ diff --git a/tabs/__pycache__/facs_analysis_sad.cpython-310.pyc b/tabs/__pycache__/facs_analysis_sad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..533c954bbcfe714084e76492e0b6e83afce502df Binary files /dev/null and b/tabs/__pycache__/facs_analysis_sad.cpython-310.pyc differ diff --git a/tabs/__pycache__/gaze_estimation.cpython-310.pyc b/tabs/__pycache__/gaze_estimation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bc60dfc041a51f9c77d0761104396101ed25e68 Binary files /dev/null and b/tabs/__pycache__/gaze_estimation.cpython-310.pyc differ diff --git a/tabs/__pycache__/head_posture_detection.cpython-310.pyc b/tabs/__pycache__/head_posture_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86856848db17d8d916c2489b99d4c9b59d429e25 Binary files /dev/null and b/tabs/__pycache__/head_posture_detection.cpython-310.pyc differ diff --git a/tabs/__pycache__/heart_rate_variability.cpython-310.pyc b/tabs/__pycache__/heart_rate_variability.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3be61f41592015258ca1ee87d7805607420809e Binary files /dev/null and b/tabs/__pycache__/heart_rate_variability.cpython-310.pyc differ diff --git a/tabs/__pycache__/onxxchatbot.cpython-310.pyc b/tabs/__pycache__/onxxchatbot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b8f2df6ba912ab2a9ac733183b5551e65d2e746 Binary files /dev/null and b/tabs/__pycache__/onxxchatbot.cpython-310.pyc differ diff --git a/tabs/__pycache__/posture_analysis.cpython-310.pyc b/tabs/__pycache__/posture_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27753a80727237d0b5b6e378943b488273c72918 Binary files /dev/null and b/tabs/__pycache__/posture_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/roberta_chatbot.cpython-310.pyc b/tabs/__pycache__/roberta_chatbot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9844e8430805cbb55309833f7897385d3d196e68 Binary files /dev/null and b/tabs/__pycache__/roberta_chatbot.cpython-310.pyc differ diff --git a/tabs/__pycache__/sentiment_analysis.cpython-310.pyc b/tabs/__pycache__/sentiment_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e2a73060a8c2d01ccdd0d67969b9c39ac86ee7f Binary files /dev/null and b/tabs/__pycache__/sentiment_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/sentiment_emotion_analysis.cpython-310.pyc b/tabs/__pycache__/sentiment_emotion_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7a0871049d1e129647374047a4a3bcc4d192b76 Binary files /dev/null and b/tabs/__pycache__/sentiment_emotion_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/skin_analysis.cpython-310.pyc b/tabs/__pycache__/skin_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3776e8f75c256cc1b9ac51ca708fe135c6610e4 Binary files /dev/null and b/tabs/__pycache__/skin_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/skin_conductance.cpython-310.pyc b/tabs/__pycache__/skin_conductance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ebef426bb0a67fb62e46758a4d57c820ca1ffb8 Binary files /dev/null and b/tabs/__pycache__/skin_conductance.cpython-310.pyc differ diff --git a/tabs/__pycache__/sleep_quality.cpython-310.pyc b/tabs/__pycache__/sleep_quality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39d7195a88ff4fd101dfedb45ff4213b9c298cb4 Binary files /dev/null and b/tabs/__pycache__/sleep_quality.cpython-310.pyc differ diff --git a/tabs/__pycache__/speech_emotion_recognition.cpython-310.pyc b/tabs/__pycache__/speech_emotion_recognition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca5e660b8f20b7ff3b0b590c52c9ede9584fdbb Binary files /dev/null and b/tabs/__pycache__/speech_emotion_recognition.cpython-310.pyc differ diff --git a/tabs/__pycache__/speech_stress_analysis.cpython-310.pyc b/tabs/__pycache__/speech_stress_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2226eac401d8b07f6c30cfc35fa1379f80f310c6 Binary files /dev/null and b/tabs/__pycache__/speech_stress_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/text_sentiment_emotion_analysis.cpython-310.pyc b/tabs/__pycache__/text_sentiment_emotion_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5c3901fa27e664cde61955cbc7623568aa14e12 Binary files /dev/null and b/tabs/__pycache__/text_sentiment_emotion_analysis.cpython-310.pyc differ diff --git a/tabs/__pycache__/voice_stress_analysis.cpython-310.pyc b/tabs/__pycache__/voice_stress_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198196f2a719b7c72e00c6f095c408208a9ca1c4 Binary files /dev/null and b/tabs/__pycache__/voice_stress_analysis.cpython-310.pyc differ diff --git a/ui_components.py b/ui_components.py new file mode 100644 index 0000000000000000000000000000000000000000..01d05f155c36f5f80ca592f6eef9c39da0109d14 --- /dev/null +++ b/ui_components.py @@ -0,0 +1,58 @@ +# ui_components.py + +CUSTOM_CSS = """ +.main-tab > .tab-nav > button { + font-size: 20px; + font-weight: bold; +} +""" + +HEADER_HTML = """ +
+
+ + Dyagnosys Logo + +

Multi-Modal for Emotion and Sentiment Analysis (GITEX)

+
+ Important Disclaimer +
+""" + +DISCLAIMER_HTML = ''' +
+

IMPORTANT DISCLAIMER

+ +
+

Not a Medical Device

+

This software is not intended to be a medical device as defined by the FDA, EMA, or other regulatory bodies. It is not designed, intended, or authorized for use in the diagnosis of disease or other conditions, or in the cure, mitigation, treatment, or prevention of disease.

+
+ +
+

Research and Educational Use Only

+

This software is provided solely for research, educational, and informational purposes. It should not be relied upon for medical advice, diagnosis, or treatment.

+
+ +
+

No Substitute for Professional Medical Advice

+

The information provided by this software is not a substitute for professional medical advice, diagnosis, or treatment. Always seek the advice of your physician or other qualified health provider with any questions you may have regarding a medical condition.

+
+ +
+

Data Privacy and Security

+

While we implement reasonable data protection measures, users should be aware of the inherent risks of transmitting information over the internet. By using this software, you acknowledge and accept these risks.

+
+ +
+

No Warranty

+

This software is provided "as is" without warranty of any kind, either expressed or implied, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose.

+
+ +
+

Limitation of Liability

+

In no event shall the creators, copyright holders, or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.

+
+ +

By using this software, you acknowledge that you have read, understood, and agree to be bound by this disclaimer.

+
+''' \ No newline at end of file