Spaces:
Runtime error
Runtime error
import gradio as gr | |
import numpy as np | |
import urllib | |
import cv2 | |
from tensorflow.keras.preprocessing import image | |
from tensorflow.keras.models import load_model | |
# Load the pre-trained face detection model | |
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") | |
# Load the pre-trained model | |
model = load_model('my_model.h5') | |
def classify_image(img): | |
img_copy = img | |
height, width = img_copy.shape[0], img_copy.shape[1] | |
img_copy = cv2.resize(img_copy, (500, 500)) | |
# Convert the image to grayscale | |
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) | |
# Detect faces in the image | |
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) | |
# Check if any faces were detected | |
if len(faces) > 0: | |
#print("Human face detected in the image!") | |
face_area_list = [] | |
# Draw rectangles around the detected faces | |
for (x, y, w, h) in faces: | |
cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
area = w * h | |
face_area_list.append(area) | |
#print(sorted(face_area_list)) | |
big_face_area = sorted(face_area_list)[-1] | |
img_area = img_copy.shape[0] * img_copy.shape[1] | |
perc_area = (big_face_area/img_area)*100 | |
if perc_area>7: | |
img = image.img_to_array(img) | |
img = np.expand_dims(img, axis=0) | |
img /= 255.0 | |
# Use the model to make a prediction | |
prediction = model.predict(img)[0] | |
# Map the predicted class to a label | |
dic = {'NSFW': float(prediction[1]), 'CART': float(prediction[0]),'SFW':float(prediction[2])} | |
else : | |
dic = {'CART': float(0),'SFW': float(0), 'NSFW': float(1)} | |
else: | |
dic = {'CART': float(0),'SFW': float(0), 'NSFW': float(1)} | |
perc_area = "could not detected face" | |
#print("No human face detected in the image.") | |
return [dic, perc_area, img_copy] | |
def classify_url(url): | |
# Load the image from the URL | |
response = urllib.request.urlopen(url) | |
img = image.load_img(response, target_size=(224, 224)) | |
return classify_image(img) | |
# Define the GRADIO output interface | |
examples = [f"example{i}.jpg" for i in range(1,9)] | |
# Define the GRADIO output interfaces | |
output_interfaces = [ | |
gr.outputs.Label(num_top_classes=3), | |
gr.outputs.Textbox(label="% Area of the largest face in image"), | |
gr.outputs.Image(type="pil", label="Detected Faces") | |
] | |
# Define the GRADIO app | |
app = gr.Interface(classify_image, gr.Image(shape=(224, 224)), outputs=output_interfaces, allow_flagging="never", examples = examples,title="NSFW/SFW Classifier") | |
# Start the GRADIO app | |
app.launch() | |