import gradio as gr import numpy as np import urllib import cv2 from tensorflow.keras.preprocessing import image from tensorflow.keras.models import load_model # Load the pre-trained face detection model face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") # Load the pre-trained model model = load_model('my_model.h5') def classify_image(img): #print("image : ",type(img)) #image_array = img_to_array(image) #print("image shape: ",img.shape) img_copy = img height, width = img_copy.shape[0], img_copy.shape[1] img_copy = cv2.resize(img_copy, (500, 500)) # Convert the image to grayscale gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) # Detect faces in the image faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) # Check if any faces were detected if len(faces) > 0: #print("Human face detected in the image!") face_area_list = [] # Draw rectangles around the detected faces for (x, y, w, h) in faces: cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 255, 0), 2) area = w * h face_area_list.append(area) #print(sorted(face_area_list)) big_face_area = sorted(face_area_list)[-1] img_area = img_copy.shape[0] * img_copy.shape[1] #print(big_face_area) #print(img_area) #print("% Area of the largest face in image:", (big_face_area/img_area)*100) perc_area = (big_face_area/img_area)*100 if perc_area>7.5: img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img /= 255.0 # Use the model to make a prediction prediction = model.predict(img)[0] # Map the predicted class to a label dic = {'SFW': float(prediction[1]), 'NSFW': float(prediction[0])} else : dic = {'SFW': float(0), 'NSFW': float(1)} # Display the image with the detected faces #cv2_imshow(img) #cv2.imshow('Detected faces', img_copy) #cv2.waitKey(0) #cv2.destroyAllWindows() # Preprocess the input image else: dic = {'SFW': float(0), 'NSFW': float(1)} #print("No human face detected in the image.") return dic def classify_url(url): # Load the image from the URL response = urllib.request.urlopen(url) img = image.load_img(response, target_size=(224, 224)) return classify_image(img) # Define the GRADIO output interface examples = [f"example{i}.jpg" for i in range(1,9)] # Define the GRADIO app app = gr.Interface(classify_image, gr.Image(shape=(224, 224)), outputs="label", allow_flagging="never", examples = examples,title="NSFW/SFW Classifier") # Start the GRADIO app app.launch()