Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,104 +1,23 @@
|
|
1 |
-
import gradio as gr
|
2 |
import tensorflow as tf
|
3 |
-
import numpy as np
|
4 |
-
import cv2
|
5 |
-
import os
|
6 |
-
from scipy.spatial.distance import cosine
|
7 |
from tensorflow.keras.models import load_model
|
8 |
-
from tensorflow.keras import layers, Model
|
9 |
|
10 |
-
def
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
user_embeddings = []
|
31 |
-
user_ids = []
|
32 |
-
|
33 |
-
# Threshold
|
34 |
-
RECOGNITION_THRESHOLD = 0.1 # Adjust as needed
|
35 |
-
|
36 |
-
# Preprocess the image
|
37 |
-
def preprocess_image(image):
|
38 |
-
image = cv2.resize(image, (160, 160)) # Resize image to match FaceNet input size
|
39 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert to RGB (OpenCV loads images in BGR)
|
40 |
-
image = tf.convert_to_tensor(image) # Convert to TensorFlow tensor
|
41 |
-
image = tf.image.convert_image_dtype(image, tf.float32) # Normalize pixel values
|
42 |
-
return np.expand_dims(image, axis=0) # Add batch dimension
|
43 |
-
|
44 |
-
# Generate embedding
|
45 |
-
def generate_embedding(image):
|
46 |
-
preprocessed_image = preprocess_image(image)
|
47 |
-
return embedding_model.predict(preprocessed_image)[0]
|
48 |
-
|
49 |
-
# Register new user
|
50 |
-
def register_user(image, user_id):
|
51 |
-
try:
|
52 |
-
embedding = generate_embedding(image)
|
53 |
-
user_embeddings.append(embedding)
|
54 |
-
user_ids.append(user_id)
|
55 |
-
return f"User {user_id} registered successfully."
|
56 |
-
except Exception as e:
|
57 |
-
return f"Error during registration: {str(e)}"
|
58 |
-
|
59 |
-
# Recognize user
|
60 |
-
def recognize_user(image):
|
61 |
-
try:
|
62 |
-
new_embedding = generate_embedding(image)
|
63 |
-
closest_user_id = None
|
64 |
-
closest_distance = float('inf')
|
65 |
-
|
66 |
-
for user_id, embedding in zip(user_ids, user_embeddings):
|
67 |
-
distance = cosine(new_embedding, embedding)
|
68 |
-
print(f"Distance for {user_id}: {distance}") # Debug: Print distances for each user
|
69 |
-
if distance < closest_distance:
|
70 |
-
closest_distance = distance
|
71 |
-
closest_user_id = user_id
|
72 |
-
|
73 |
-
print(f"Min distance: {closest_distance}") # Debug: Print minimum distance
|
74 |
-
|
75 |
-
if closest_distance <= RECOGNITION_THRESHOLD:
|
76 |
-
return f"Recognized User: {closest_user_id}"
|
77 |
-
else:
|
78 |
-
return f"User not recognized. Closest Distance: {closest_distance}"
|
79 |
-
except Exception as e:
|
80 |
-
return f"Error during recognition: {str(e)}"
|
81 |
-
|
82 |
-
|
83 |
-
def main():
|
84 |
-
with gr.Blocks() as demo:
|
85 |
-
gr.Markdown("Facial Recognition System")
|
86 |
-
with gr.Tab("Register"):
|
87 |
-
with gr.Row():
|
88 |
-
img_register = gr.Image()
|
89 |
-
user_id = gr.Textbox(label="User ID")
|
90 |
-
register_button = gr.Button("Register")
|
91 |
-
register_output = gr.Textbox()
|
92 |
-
register_button.click(register_user, inputs=[img_register, user_id], outputs=register_output)
|
93 |
-
|
94 |
-
with gr.Tab("Recognize"):
|
95 |
-
with gr.Row():
|
96 |
-
img_recognize = gr.Image()
|
97 |
-
recognize_button = gr.Button("Recognize")
|
98 |
-
recognize_output = gr.Textbox()
|
99 |
-
recognize_button.click(recognize_user, inputs=[img_recognize], outputs=recognize_output)
|
100 |
-
|
101 |
-
demo.launch(share=True)
|
102 |
-
|
103 |
-
if __name__ == "__main__":
|
104 |
-
main()
|
|
|
|
|
1 |
import tensorflow as tf
|
|
|
|
|
|
|
|
|
2 |
from tensorflow.keras.models import load_model
|
|
|
3 |
|
4 |
+
def preprocess_image(filename, target_shape=(160, 160)):
|
5 |
+
image_string = tf.io.read_file(filename)
|
6 |
+
image = tf.image.decode_jpeg(image_string, channels=3)
|
7 |
+
image = tf.image.convert_image_dtype(image, tf.float32)
|
8 |
+
image = tf.image.resize(image, target_shape)
|
9 |
+
return image
|
10 |
+
|
11 |
+
embedding_model_path = 'facenet_siamese_embedding.h5'
|
12 |
+
embedding_model = load_model(embedding_model_path)
|
13 |
+
|
14 |
+
def generate_embedding(image_path, model):
|
15 |
+
preprocessed_image = preprocess_image(image_path)
|
16 |
+
preprocessed_image = tf.expand_dims(preprocessed_image, axis=0) # Add batch dimension
|
17 |
+
embedding = model(preprocessed_image)
|
18 |
+
return embedding
|
19 |
+
|
20 |
+
# Example usage
|
21 |
+
image_path = 'iman.jpg' # Update with your image's path
|
22 |
+
image_embedding = generate_embedding(image_path, embedding_model)
|
23 |
+
print("Generated Embedding:", image_embedding.numpy())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|