mohamedsaeed823's picture
Update app.py
6ae941d verified
raw
history blame
1.7 kB
import cv2
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import gradio as gr
import os
eng_ara_mapping ={
' ':' ',
"ain":'ع',
"aleff":'أ',
"bb":'ب',
"dal":'د',
"dha":'ط',
"dhad":"ض",
"fa":"ف",
"gaaf":'ق',
"ghain":'غ',
"ha":'ه',
"haa":'ح',
"jeem":'ج',
"kaaf":'ك',
"laam":'ل',
"meem":'م',
"nun":"ن",
"ra":'ر',
"saad":'ص',
"seen":'س',
"sheen":"ش",
"ta":'ت',
"taa":'ط',
"thaa":"ث",
"thal":"ذ",
"waw":'و',
"ya":"ى",
"zay":'ز',
"khaa":'خ' }
def recognize_gesture(image):
# Load the gesture recognition model
model_path = os.path.abspath("arabic_signlanguage_characters_model.task")
recognizer = vision.GestureRecognizer.create_from_model_path(model_path)
# Convert image to MediaPipe format
if isinstance(image, gr.Image): # Check if image is from Gradio
image = image.to_ndarray(format="bgr") # Convert to a NumPy array in BGR format
# Convert image to MediaPipe format
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
# Perform gesture recognition
recognition_result = recognizer.recognize(image)
# Extract the top gesture
top_gesture = recognition_result.gestures[0][0]
# Return the gesture label and score
return f"Character recognized: {eng_ara_mapping[top_gesture.category_name]} probability: ({top_gesture.score:.2f})"
iface = gr.Interface(
fn=recognize_gesture,
inputs=["image"], # Input type: image
outputs="text", # Output type: text
title="Arabic Sign Language Character Recognition",
description="Upload an image to recognize the character",
)
iface.launch(share=True) # Launch the interface in a web browser