mohamedsaeed823
commited on
Commit
•
be177d0
1
Parent(s):
ac30221
Upload 3 files
Browse files- .gitattributes +1 -0
- app.py +37 -0
- arabic_signlanguage_characters_model.task +3 -0
- requirements.txt +2 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
arabic_signlanguage_characters_model.task filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import mediapipe as mp
|
3 |
+
from mediapipe.tasks import python
|
4 |
+
from mediapipe.tasks.python import vision
|
5 |
+
import gradio as gr
|
6 |
+
import os
|
7 |
+
|
8 |
+
def recognize_gesture(image):
|
9 |
+
# Load the gesture recognition model
|
10 |
+
model_path = os.path.abspath("arabic_signlanguage_characters_model.task")
|
11 |
+
recognizer = vision.GestureRecognizer.create_from_model_path(model_path)
|
12 |
+
|
13 |
+
# Convert image to MediaPipe format
|
14 |
+
if isinstance(image, gr.Image): # Check if image is from Gradio
|
15 |
+
image = image.to_ndarray(format="bgr") # Convert to a NumPy array in BGR format
|
16 |
+
|
17 |
+
# Convert image to MediaPipe format
|
18 |
+
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
|
19 |
+
|
20 |
+
# Perform gesture recognition
|
21 |
+
recognition_result = recognizer.recognize(image)
|
22 |
+
|
23 |
+
# Extract the top gesture
|
24 |
+
top_gesture = recognition_result.gestures[0][0]
|
25 |
+
|
26 |
+
# Return the gesture label and score
|
27 |
+
return f"Gesture recognized: {top_gesture.category_name} ({top_gesture.score:.2f})"
|
28 |
+
|
29 |
+
iface = gr.Interface(
|
30 |
+
fn=recognize_gesture,
|
31 |
+
inputs=["image"], # Input type: image
|
32 |
+
outputs="text", # Output type: text
|
33 |
+
title="Arabic Sign Language Character Recognition",
|
34 |
+
description="Upload an image to recognize the gesture",
|
35 |
+
)
|
36 |
+
|
37 |
+
iface.launch(share=True) # Launch the interface in a web browser
|
arabic_signlanguage_characters_model.task
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd86421edd79714e6091d60bc2c96117ea8ad09521834a6971fd46d107398053
|
3 |
+
size 8473894
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
mediapipe
|
2 |
+
os
|