Spaces:
Runtime error
Runtime error
from email.headerregistry import Group | |
import numpy as np | |
from psutil import getloadavg | |
import tensorflow as tf | |
import logging | |
from rich.logging import RichHandler | |
from PIL import Image | |
import numpy as np | |
from config import get_config | |
from deepface import DeepFace | |
import matplotlib.pyplot as plt | |
from imutils import url_to_image | |
import cv2 | |
import gradio as gr | |
# # -------------------------------------------------------------------------- | |
# # global variables | |
# # -------------------------------------------------------------------------- | |
REFERENCE = None | |
QUERY = None | |
interpreter = tf.lite.Interpreter(model_path="IR/model_float16_quant.tflite") | |
interpreter.allocate_tensors() | |
input_details = interpreter.get_input_details() | |
output_details = interpreter.get_output_details() | |
ref = None | |
query = None | |
face_query_ = None | |
face_ref_ = None | |
# demo = gr.Interface( | |
# fn=image_classifier, | |
# inputs=[ | |
# gr.Image(shape=(224, 224), | |
# image_mode='RGB', | |
# source='webcam', | |
# type="numpy", | |
# label="Reference Image", | |
# streaming=True, | |
# mirror_webcam=True), | |
# gr.Image(shape=(224, 224), | |
# image_mode='RGB', | |
# source='webcam', | |
# type="numpy", | |
# label="Query Image", | |
# streaming=True, | |
# mirror_webcam=True) | |
# ], | |
# outputs=[ | |
# gr.Number(label="Cosine Similarity", | |
# precision=5), | |
# gr.Plot(label="Reference Embedding Histogram", | |
# ), | |
# gr.Plot(label="Query Embedding Histogram", | |
# ) | |
# ], | |
# live=False, | |
# title="Face Recognition", | |
# # description=''' | |
# # | feature | description | | |
# # | :-----:| :------------: | | |
# # | model | mobile-facenet | | |
# # | precision | fp16 | | |
# # |type | tflite| | |
# # ''', | |
# # article=""" | |
# # - detects face in input image | |
# # - resizes face to 112x112 | |
# # - aligns the face using **deepface MTCNN** | |
# # - runs inference on the aligned face | |
# # """, | |
# allow_flagging="auto", | |
# analytics_enabled=True, | |
# ) | |
# demo.launch(inbrowser=True, auth=("talha", "123")) | |
def plot_images(): | |
global face_query_, face_ref_ | |
return face_ref_, face_query_ | |
def predict(interpreter, input_details, input_data, output_details): | |
interpreter.set_tensor(input_details[0]['index'], input_data) | |
interpreter.invoke() | |
output_data = interpreter.get_tensor(output_details[0]['index']) | |
return output_data | |
def get_ref_vector(image): | |
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
cv2.imwrite("ref.jpg", image) | |
global face_ref_ | |
face_ref_ = DeepFace.detectFace("ref.jpg", detector_backend="opencv", | |
align=True, target_size=(112, 112)) | |
face_ref = face_ref_.copy() | |
# face_ is [0, 1] fp32 , needs to be changed to [0, 255] uint8 | |
face_ref = cv2.normalize(face_ref, None, 0, 255, | |
cv2.NORM_MINMAX, cv2.CV_32FC3) | |
print( | |
f"dtype {face_ref.dtype} || max {np.max(face_ref)} || min {np.min(face_ref)}") | |
# calculate embeddings | |
face_ref = face_ref[np.newaxis, ...] # [1, 112, 112, 3] | |
output_data_ref = predict(interpreter, input_details, | |
face_ref, output_details) | |
global ref | |
ref = output_data_ref | |
return str(f"shape ---> {face_ref_.shape} dtype ---> {face_ref_.dtype} max {face_ref_.max()} min {face_ref_.min()}"), ref | |
def get_query_vector(image1): | |
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) | |
cv2.imwrite("query.jpg", image1) | |
global face_query_ | |
face_query_ = DeepFace.detectFace("query.jpg", detector_backend="opencv", | |
align=True, target_size=(112, 112)) | |
face_query = face_query_.copy() | |
# face_ is [0, 1] fp32 , needs to be changed to [0, 255] uint8 | |
face_query = cv2.normalize( | |
face_query, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_32FC3) | |
print( | |
f"dtype {face_query.dtype} || max {np.max(face_query)} || min {np.min(face_query)}") | |
# calculate embeddings | |
face_query = face_query[np.newaxis, ...] # [1, 112, 112, 3] | |
output_data_query = predict(interpreter, input_details, | |
face_query, output_details) | |
global query | |
query = output_data_query | |
return str(f"shape ---> {face_query_.shape} dtype ---> {face_query_.dtype} max {face_query_.max()} min {face_query_.min()}"), query | |
def get_metrics(): | |
global ref, query | |
return float(np.dot(np.squeeze(ref), np.squeeze(query))), float(np.linalg.norm(np.squeeze(ref) - np.squeeze(query))) | |
with gr.Blocks(analytics_enabled=True, title="Face Recognition") as demo: | |
# draw a box around children | |
with gr.Box(): | |
gr.Markdown( | |
"# First provide the *reference* image and then the *query* image. The **cosine similarity** will be displayed as output.") | |
# put both cameras under separate groups | |
with gr.Group(): | |
# components under this scope will have no padding or margin between them | |
with gr.Row(): | |
# reference image | |
with gr.Column(): | |
inp_ref = gr.Image(shape=(224, 224), | |
image_mode='RGB', | |
source='webcam', | |
type="numpy", | |
label="Reference Image", | |
streaming=True, | |
mirror_webcam=True), | |
out_ref = [gr.Textbox(label="Face capture details"), | |
gr.Dataframe(label="Embedding", | |
type="pandas", max_cols=512, | |
headers=None), | |
] | |
# make button on left column | |
btn_ref = gr.Button("reference_image") | |
btn_ref.click(fn=get_ref_vector, | |
inputs=inp_ref, outputs=out_ref) | |
with gr.Column(): | |
inp_query = gr.Image(shape=(224, 224), | |
image_mode='RGB', | |
source='webcam', | |
type="numpy", | |
label="Query Image", | |
streaming=True, | |
mirror_webcam=True), | |
out_query = [gr.Textbox(label="Face capture details"), | |
gr.Dataframe(label="Embedding", | |
type="pandas", max_cols=512, | |
headers=None), | |
] | |
# make button on right column | |
btn_query = gr.Button("query_image") | |
btn_query.click(fn=get_query_vector, | |
inputs=inp_query, outputs=out_query) | |
with gr.Box(): | |
gr.Markdown("# Metrics") | |
with gr.Group(): | |
gr.Markdown( | |
"The **cosine similarity** and **l2 norm of diff.** will be displayed as output here") | |
with gr.Row(): | |
out_sim = gr.Number(label="Cosine Similarity", precision=5) | |
out_d = gr.Number(label="L2 norm distance", precision=5) | |
# make button in center, outside row | |
btn_sim = gr.Button("Calculate Metrics") | |
btn_sim.click(fn=get_metrics, inputs=[], outputs=[out_sim, out_d]) | |
with gr.Box(): | |
with gr.Group(): | |
gr.Markdown("# detected face results are shown below") | |
with gr.Row(): | |
out_faces = [ | |
gr.Image(shape=(60, 60), label="Detected Face Reference"), | |
gr.Image(shape=(112, 112), label="Detected Face Query") | |
] | |
# make button inside row | |
# make button outside (below) row | |
button_show = gr.Button("Show detected faces") | |
button_show.click(fn=plot_images, inputs=[], outputs=out_faces) | |
demo.launch(share=True) | |