|
import cv2 |
|
import mediapipe as mp |
|
from mediapipe.tasks import python |
|
from mediapipe.tasks.python import vision |
|
import gradio as gr |
|
import os |
|
|
|
eng_ara_mapping ={ |
|
' ':' ', |
|
"ain":'ع', |
|
"aleff":'أ', |
|
"bb":'ب', |
|
"dal":'د', |
|
"dha":'ط', |
|
"dhad":"ض", |
|
"fa":"ف", |
|
"gaaf":'ق', |
|
"ghain":'غ', |
|
"ha":'ه', |
|
"haa":'ح', |
|
"jeem":'ج', |
|
"kaaf":'ك', |
|
"laam":'ل', |
|
"meem":'م', |
|
"nun":"ن", |
|
"ra":'ر', |
|
"saad":'ص', |
|
"seen":'س', |
|
"sheen":"ش", |
|
"ta":'ت', |
|
"taa":'ط', |
|
"thaa":"ث", |
|
"thal":"ذ", |
|
"waw":'و', |
|
"ya":"ى", |
|
"zay":'ز', |
|
"khaa":'خ' } |
|
|
|
def recognize_gesture(image): |
|
|
|
model_path = os.path.abspath("arabic_signlanguage_characters_model.task") |
|
recognizer = vision.GestureRecognizer.create_from_model_path(model_path) |
|
|
|
|
|
if isinstance(image, gr.Image): |
|
image = image.to_ndarray(format="bgr") |
|
|
|
|
|
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image) |
|
|
|
|
|
recognition_result = recognizer.recognize(image) |
|
|
|
|
|
top_gesture = recognition_result.gestures[0][0] |
|
|
|
|
|
return f"Character recognized: {eng_ara_mapping[top_gesture.category_name]} probability: ({top_gesture.score:.2f})" |
|
|
|
iface = gr.Interface( |
|
fn=recognize_gesture, |
|
inputs=["image"], |
|
outputs="text", |
|
title="Arabic Sign Language Character Recognition", |
|
description="Upload an image to recognize the character", |
|
) |
|
|
|
iface.launch(share=True) |