|
import cv2 |
|
import mediapipe as mp |
|
from mediapipe.tasks import python |
|
from mediapipe.tasks.python import vision |
|
import gradio as gr |
|
import os |
|
|
|
eng_ara_mapping ={ |
|
'':'not clear', |
|
"ain":'ع', |
|
"aleff":'أ', |
|
"bb":'ب', |
|
"dal":'د', |
|
"dha":'ظ', |
|
"dhad":"ض", |
|
"fa":"ف", |
|
"gaaf":'ق', |
|
"ghain":'غ', |
|
"ha":'ه', |
|
"haa":'ح', |
|
"jeem":'ج', |
|
"kaaf":'ك', |
|
"laam":'ل', |
|
"meem":'م', |
|
"nun":"ن", |
|
"ra":'ر', |
|
"saad":'ص', |
|
"seen":'س', |
|
"sheen":"ش", |
|
"ta":'ت', |
|
"taa":'ط', |
|
"thaa":"ث", |
|
"thal":"ذ", |
|
"waw":'و', |
|
"ya" : "ي", |
|
"zay":'ز', |
|
"khaa":'خ' } |
|
|
|
def recognize_gesture(image): |
|
|
|
model_path = os.path.abspath("arabic_signlanguage_characters_model.task") |
|
recognizer = vision.GestureRecognizer.create_from_model_path(model_path) |
|
|
|
|
|
if isinstance(image, gr.Image): |
|
image = image.to_ndarray(format="bgr") |
|
|
|
|
|
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image) |
|
|
|
|
|
recognition_result = recognizer.recognize(image) |
|
|
|
|
|
top_gesture = recognition_result.gestures[0][0] |
|
|
|
|
|
return f"Gesture recognized: {eng_ara_mapping[top_gesture.category_name]} ({top_gesture.score:.2f})" |
|
|
|
iface = gr.Interface( |
|
fn=recognize_gesture, |
|
inputs=["image"], |
|
outputs="text", |
|
title="Arabic Sign Language Character Recognition", |
|
description="Upload an image to recognize the gesture", |
|
) |
|
|
|
iface.launch() |