Spaces:
Runtime error
Runtime error
File size: 4,366 Bytes
fd4b460 9e516a3 75323f6 e5eb476 d6ffcce fd4b460 9e516a3 fd4b460 e5eb476 fd4b460 c5fe620 fd4b460 9a3eba9 e5eb476 9a3eba9 d6ffcce 8557945 162217b 8557945 162217b 8557945 e5eb476 9a3eba9 2310398 fd4b460 2310398 fd4b460 2310398 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import gradio as gr
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import cosine
import cv2
import os
from tensorflow.keras.applications import resnet
from tensorflow.keras import layers, Model
def create_embedding_model():
base_cnn = resnet.ResNet50(weights="imagenet", input_shape=(200, 200, 3), include_top=False)
flatten = layers.Flatten()(base_cnn.output)
dense1 = layers.Dense(512, activation="relu")(flatten)
dense1 = layers.BatchNormalization()(dense1)
dense2 = layers.Dense(256, activation="relu")(dense1)
dense2 = layers.BatchNormalization()(dense2)
output = layers.Dense(256)(dense2)
embedding_model = Model(base_cnn.input, output, name="Embedding")
trainable = False
for layer in base_cnn.layers:
if layer.name == "conv5_block1_out":
trainable = True
layer.trainable = trainable
return embedding_model
# K-mean Clustering
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
# Threshold
RECOGNITION_THRESHOLD = 0.1 # Adjust as needed
n_clusters = 5 # You can adjust this based on your data
kmeans = KMeans(n_clusters=n_clusters)
# Load the embedding model
# embedding_model = tf.keras.models.load_model('base_128.h5')
embedding_model = create_embedding_model()
embedding_model.load_weights('base_128.h5')
# Database to store embeddings and user IDs
user_embeddings = []
user_ids = []
# Preprocess the image
def preprocess_image(image):
image = cv2.resize(image, (200, 200)) # Resize image to 200x200
image = tf.keras.applications.resnet50.preprocess_input(image)
return np.expand_dims(image, axis=0)
# Generate embedding
def generate_embedding(image):
preprocessed_image = preprocess_image(image)
return embedding_model.predict(preprocessed_image)[0]
# Register new user
def register_user(image, user_id):
try:
embedding = generate_embedding(image)
user_embeddings.append(embedding)
user_ids.append(user_id)
return f"User {user_id} registered successfully."
except Exception as e:
return f"Error during registration: {str(e)}"
# Recognize user
def recognize_user(image):
try:
new_embedding = generate_embedding(image)
if len(user_embeddings) < n_clusters:
# Handle the case where there are not enough users for K-means
# For example, you could use nearest neighbor search among existing embeddings
# Here, I'm just returning a message for simplicity
return "Not enough registered users for recognition."
# Update the KMeans model
kmeans.fit(user_embeddings)
cluster_label = kmeans.predict([new_embedding])[0]
distances = kmeans.transform([new_embedding])[0]
min_distance = np.min(distances)
if min_distance > RECOGNITION_THRESHOLD:
return "User not recognized."
# Find the user ID(s) in the closest cluster
recognized_user_ids = [user_ids[i] for i, label in enumerate(kmeans.labels_) if label == cluster_label]
return f"Recognized User(s): {', '.join(recognized_user_ids)}"
except Exception as e:
return f"Error during recognition: {str(e)}"
def plot_clusters():
# Assuming embeddings are 2-dimensional
plt.figure(figsize=(8, 6))
plt.scatter(*zip(*user_embeddings), c=kmeans.labels_)
plt.title('User Embeddings Clustered by K-Means')
plt.xlabel('Embedding Dimension 1')
plt.ylabel('Embedding Dimension 2')
plt.show()
def main():
with gr.Blocks() as demo:
gr.Markdown("Facial Recognition System")
with gr.Tab("Register"):
with gr.Row():
img_register = gr.Image()
user_id = gr.Textbox(label="User ID")
register_button = gr.Button("Register")
register_output = gr.Textbox()
register_button.click(register_user, inputs=[img_register, user_id], outputs=register_output)
with gr.Tab("Recognize"):
with gr.Row():
img_recognize = gr.Image()
recognize_button = gr.Button("Recognize")
recognize_output = gr.Textbox()
recognize_button.click(recognize_user, inputs=[img_recognize], outputs=recognize_output)
demo.launch(share=True)
if __name__ == "__main__":
main() |