Spaces:
Runtime error
Runtime error
import gradio as gr | |
import tensorflow as tf | |
import numpy as np | |
from scipy.spatial.distance import cosine | |
import cv2 | |
import os | |
from tensorflow.keras.applications import resnet | |
from tensorflow.keras import layers, Model | |
def create_embedding_model(): | |
base_cnn = resnet.ResNet50(weights="imagenet", input_shape=(200, 200, 3), include_top=False) | |
flatten = layers.Flatten()(base_cnn.output) | |
dense1 = layers.Dense(512, activation="relu")(flatten) | |
dense1 = layers.BatchNormalization()(dense1) | |
dense2 = layers.Dense(256, activation="relu")(dense1) | |
dense2 = layers.BatchNormalization()(dense2) | |
output = layers.Dense(256)(dense2) | |
embedding_model = Model(base_cnn.input, output, name="Embedding") | |
trainable = False | |
for layer in base_cnn.layers: | |
if layer.name == "conv5_block1_out": | |
trainable = True | |
layer.trainable = trainable | |
return embedding_model | |
# K-mean Clustering | |
from sklearn.cluster import KMeans | |
import matplotlib.pyplot as plt | |
# Threshold | |
RECOGNITION_THRESHOLD = 0.1 # Adjust as needed | |
n_clusters = 5 # You can adjust this based on your data | |
kmeans = KMeans(n_clusters=n_clusters) | |
# Load the embedding model | |
# embedding_model = tf.keras.models.load_model('base_128.h5') | |
embedding_model = create_embedding_model() | |
embedding_model.load_weights('base_128.h5') | |
# Database to store embeddings and user IDs | |
user_embeddings = [] | |
user_ids = [] | |
# Preprocess the image | |
def preprocess_image(image): | |
image = cv2.resize(image, (200, 200)) # Resize image to 200x200 | |
image = tf.keras.applications.resnet50.preprocess_input(image) | |
return np.expand_dims(image, axis=0) | |
# Generate embedding | |
def generate_embedding(image): | |
preprocessed_image = preprocess_image(image) | |
return embedding_model.predict(preprocessed_image)[0] | |
# Register new user | |
def register_user(image, user_id): | |
try: | |
embedding = generate_embedding(image) | |
user_embeddings.append(embedding) | |
user_ids.append(user_id) | |
return f"User {user_id} registered successfully." | |
except Exception as e: | |
return f"Error during registration: {str(e)}" | |
# Recognize user | |
def recognize_user(image): | |
try: | |
new_embedding = generate_embedding(image) | |
if len(user_embeddings) < n_clusters: | |
# Handle the case where there are not enough users for K-means | |
# For example, you could use nearest neighbor search among existing embeddings | |
# Here, I'm just returning a message for simplicity | |
return "Not enough registered users for recognition." | |
# Update the KMeans model | |
kmeans.fit(user_embeddings) | |
cluster_label = kmeans.predict([new_embedding])[0] | |
distances = kmeans.transform([new_embedding])[0] | |
min_distance = np.min(distances) | |
if min_distance > RECOGNITION_THRESHOLD: | |
return "User not recognized." | |
# Find the user ID(s) in the closest cluster | |
recognized_user_ids = [user_ids[i] for i, label in enumerate(kmeans.labels_) if label == cluster_label] | |
return f"Recognized User(s): {', '.join(recognized_user_ids)}" | |
except Exception as e: | |
return f"Error during recognition: {str(e)}" | |
def plot_clusters(): | |
# Assuming embeddings are 2-dimensional | |
plt.figure(figsize=(8, 6)) | |
plt.scatter(*zip(*user_embeddings), c=kmeans.labels_) | |
plt.title('User Embeddings Clustered by K-Means') | |
plt.xlabel('Embedding Dimension 1') | |
plt.ylabel('Embedding Dimension 2') | |
plt.show() | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("Facial Recognition System") | |
with gr.Tab("Register"): | |
with gr.Row(): | |
img_register = gr.Image() | |
user_id = gr.Textbox(label="User ID") | |
register_button = gr.Button("Register") | |
register_output = gr.Textbox() | |
register_button.click(register_user, inputs=[img_register, user_id], outputs=register_output) | |
with gr.Tab("Recognize"): | |
with gr.Row(): | |
img_recognize = gr.Image() | |
recognize_button = gr.Button("Recognize") | |
recognize_output = gr.Textbox() | |
recognize_button.click(recognize_user, inputs=[img_recognize], outputs=recognize_output) | |
demo.launch(share=True) | |
if __name__ == "__main__": | |
main() |