Spaces:
Runtime error
Runtime error
import gradio as gr | |
import tensorflow as tf | |
import numpy as np | |
import cv2 | |
import os | |
from scipy.spatial.distance import cosine | |
from tensorflow.keras.applications import resnet | |
from tensorflow.keras import layers, Model | |
def create_embedding_model(): | |
base_cnn = resnet.ResNet50(weights="imagenet", input_shape=(200, 200, 3), include_top=False) | |
flatten = layers.Flatten()(base_cnn.output) | |
dense1 = layers.Dense(512, activation="relu")(flatten) | |
dense1 = layers.BatchNormalization()(dense1) | |
dense2 = layers.Dense(256, activation="relu")(dense1) | |
dense2 = layers.BatchNormalization()(dense2) | |
output = layers.Dense(256)(dense2) | |
embedding_model = Model(base_cnn.input, output, name="Embedding") | |
trainable = False | |
for layer in base_cnn.layers: | |
if layer.name == "conv5_block1_out": | |
trainable = True | |
layer.trainable = trainable | |
return embedding_model | |
# Load the embedding model | |
embedding_model = create_embedding_model() | |
embedding_model.load_weights('base_128.h5') | |
# Database to store embeddings and user IDs | |
user_embeddings = [] | |
user_ids = [] | |
# Threshold | |
RECOGNITION_THRESHOLD = 0.1 # Adjust as needed | |
# Preprocess the image | |
def preprocess_image(image): | |
image = cv2.resize(image, (200, 200)) # Resize image to 200x200 | |
image = tf.keras.applications.resnet50.preprocess_input(image) | |
return np.expand_dims(image, axis=0) | |
# Generate embedding | |
def generate_embedding(image): | |
preprocessed_image = preprocess_image(image) | |
return embedding_model.predict(preprocessed_image)[0] | |
# Register new user | |
def register_user(image, user_id): | |
try: | |
embedding = generate_embedding(image) | |
user_embeddings.append(embedding) | |
user_ids.append(user_id) | |
return f"User {user_id} registered successfully." | |
except Exception as e: | |
return f"Error during registration: {str(e)}" | |
# Recognize user using Cosine Similarity | |
def recognize_user(image): | |
try: | |
new_embedding = generate_embedding(image) | |
closest_user_id = None | |
closest_distance = float('inf') | |
for user_id, embedding in zip(user_ids, user_embeddings): | |
distance = cosine(new_embedding, embedding) | |
if distance < closest_distance: | |
closest_distance = distance | |
closest_user_id = user_id | |
if closest_distance <= RECOGNITION_THRESHOLD: | |
return f"Recognized User: {closest_user_id}, Distance: {closest_distance}" | |
else: | |
return f"User not recognized. Closest Distance: {closest_distance}" | |
except Exception as e: | |
return f"Error during recognition: {str(e)}" | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("Facial Recognition System") | |
with gr.Tab("Register"): | |
with gr.Row(): | |
img_register = gr.Image() | |
user_id = gr.Textbox(label="User ID") | |
register_button = gr.Button("Register") | |
register_output = gr.Textbox() | |
register_button.click(register_user, inputs=[img_register, user_id], outputs=register_output) | |
with gr.Tab("Recognize"): | |
with gr.Row(): | |
img_recognize = gr.Image() | |
recognize_button = gr.Button("Recognize") | |
recognize_output = gr.Textbox() | |
recognize_button.click(recognize_user, inputs=[img_recognize], outputs=recognize_output) | |
demo.launch(share=True) | |
if __name__ == "__main__": | |
main() | |