mohamedsaeed823's picture
Update app.py
56364c9 verified
import cv2
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import gradio as gr
import os
eng_ara_mapping ={
'':'not clear',
"ain":'ع',
"aleff":'أ',
"bb":'ب',
"dal":'د',
"dha":'ظ',
"dhad":"ض",
"fa":"ف",
"gaaf":'ق',
"ghain":'غ',
"ha":'ه',
"haa":'ح',
"jeem":'ج',
"kaaf":'ك',
"laam":'ل',
"meem":'م',
"nun":"ن",
"ra":'ر',
"saad":'ص',
"seen":'س',
"sheen":"ش",
"ta":'ت',
"taa":'ط',
"thaa":"ث",
"thal":"ذ",
"waw":'و',
"ya" : "ي",
"zay":'ز',
"khaa":'خ' }
def recognize_gesture(image):
# Load the gesture recognition model
model_path = os.path.abspath("arabic_signlanguage_characters_model.task")
recognizer = vision.GestureRecognizer.create_from_model_path(model_path)
# Convert image to MediaPipe format
if isinstance(image, gr.Image): # Check if image is from Gradio
image = image.to_ndarray(format="bgr") # Convert to a NumPy array in BGR format
# Convert image to MediaPipe format
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
# Perform gesture recognition
recognition_result = recognizer.recognize(image)
# Extract the top gesture
top_gesture = recognition_result.gestures[0][0]
# Return the gesture label and score
return f"Gesture recognized: {eng_ara_mapping[top_gesture.category_name]} ({top_gesture.score:.2f})"
iface = gr.Interface(
fn=recognize_gesture,
inputs=["image"], # Input type: image
outputs="text", # Output type: text
title="Arabic Sign Language Character Recognition",
description="Upload an image to recognize the gesture",
)
iface.launch() # Launch the interface in a web browser