import cv2 import mediapipe as mp from mediapipe.tasks import python from mediapipe.tasks.python import vision import gradio as gr import os eng_ara_mapping ={ '':'not clear', "ain":'ع', "aleff":'أ', "bb":'ب', "dal":'د', "dha":'ظ', "dhad":"ض", "fa":"ف", "gaaf":'ق', "ghain":'غ', "ha":'ه', "haa":'ح', "jeem":'ج', "kaaf":'ك', "laam":'ل', "meem":'م', "nun":"ن", "ra":'ر', "saad":'ص', "seen":'س', "sheen":"ش", "ta":'ت', "taa":'ط', "thaa":"ث", "thal":"ذ", "waw":'و', "ya" : "ي", "zay":'ز', "khaa":'خ' } def recognize_gesture(image): # Load the gesture recognition model model_path = os.path.abspath("arabic_signlanguage_characters_model.task") recognizer = vision.GestureRecognizer.create_from_model_path(model_path) # Convert image to MediaPipe format if isinstance(image, gr.Image): # Check if image is from Gradio image = image.to_ndarray(format="bgr") # Convert to a NumPy array in BGR format # Convert image to MediaPipe format image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image) # Perform gesture recognition recognition_result = recognizer.recognize(image) # Extract the top gesture top_gesture = recognition_result.gestures[0][0] # Return the gesture label and score return dict(zip(eng_ara_mapping[top_gesture.category_name], map(float,top_gesture.score))) iface = gr.Interface( fn=recognize_gesture, inputs=gr.Image(sources=["upload","clipboard"]), # Input type: image outputs=gr.Label(num_top_classes=4), # Output type: label title="Arabic Sign Language Character Recognition", description="Upload an image to recognize the character", ) iface.launch(share=True) # Launch the interface in a web browser