Manith Marapperuma
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -4,114 +4,75 @@ import numpy as np
|
|
4 |
from PIL import Image
|
5 |
from mtcnn import MTCNN
|
6 |
import cv2
|
7 |
-
import
|
8 |
-
|
9 |
-
import subprocess
|
10 |
-
|
11 |
-
# Define the package you want to install
|
12 |
-
package_name = "opencv-python"
|
13 |
-
package_name1 = "mtcnn"
|
14 |
-
|
15 |
-
# Run the pip install command within your Python script
|
16 |
-
subprocess.call(['pip', 'install', package_name])
|
17 |
-
subprocess.call(['pip', 'install', package_name1])
|
18 |
|
19 |
# Load the model (ensure correct path for loading)
|
|
|
20 |
model = tf.keras.models.load_model('oily_dry.h5')
|
21 |
|
22 |
# Load the MTCNN face detection model
|
23 |
mtcnn = MTCNN()
|
24 |
|
25 |
-
def detect_and_process_skin(
|
26 |
-
"""Detects faces in an image, crops the skin region, and
|
27 |
-
#
|
28 |
-
|
29 |
-
img_rgb = cv2.cvtColor(
|
30 |
|
31 |
# Detect faces in the image
|
32 |
detections = mtcnn.detect_faces(img_rgb)
|
33 |
|
34 |
-
# Check if any faces were detected
|
35 |
if detections:
|
36 |
x, y, width, height = detections[0]['box']
|
37 |
|
38 |
# Crop the face region
|
39 |
-
face_img =
|
40 |
|
41 |
-
# Convert
|
42 |
pil_img = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB))
|
43 |
|
44 |
-
|
45 |
-
output_path = os.path.join(output_folder, os.path.basename(image_path))
|
46 |
-
cv2.imwrite(output_path, cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR))
|
47 |
-
|
48 |
-
return output_path
|
49 |
-
|
50 |
else:
|
51 |
-
|
52 |
-
|
53 |
-
def classify_image(image_path):
|
54 |
-
"""Loads, preprocess, and classifies the image."""
|
55 |
-
# Process the image using face detection and background removal
|
56 |
-
processed_image_path = detect_and_process_skin(image_path, "processed_images")
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
predicted_class = np.argmax(predictions)
|
71 |
-
percentages = predictions[0] * 100
|
72 |
-
dry_percentage, normal_percentage, oily_percentage = percentages
|
73 |
-
|
74 |
-
# Ensure percentages are within 0-100
|
75 |
-
dry_percentage = max(0, min(dry_percentage, 100))
|
76 |
-
oily_percentage = max(0, min(oily_percentage, 100))
|
77 |
-
normal_percentage = max(0, min(normal_percentage, 100))
|
78 |
-
|
79 |
-
return predicted_class, dry_percentage, oily_percentage, normal_percentage
|
80 |
-
else:
|
81 |
-
return None, None, None, None # Return Nones if no face was detected
|
82 |
|
83 |
def app():
|
84 |
-
st.title("Oily/Dry Skin Level Predictor
|
85 |
-
st.write("Coded by Manith Jayaba")
|
86 |
|
87 |
-
st.write("This app can measure the oiliness and dryness of your skin")
|
88 |
-
|
89 |
-
# Get the image file
|
90 |
image_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
91 |
|
92 |
-
# Classify and display the result
|
93 |
if image_file is not None:
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
predicted_class, dry_percentage, oily_percentage, normal_percentage = classify_image(image_path)
|
99 |
-
|
100 |
-
if predicted_class is not None and os.path.exists(os.path.join("processed_images", image_path)):
|
101 |
-
st.image(os.path.join("processed_images", image_path), width=250)
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
|
108 |
-
|
109 |
-
st.image(image_path, width=250)
|
110 |
-
st.error("No face detected in the image. Results can be inaccurate")
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
115 |
|
116 |
if __name__ == "__main__":
|
117 |
app()
|
|
|
4 |
from PIL import Image
|
5 |
from mtcnn import MTCNN
|
6 |
import cv2
|
7 |
+
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Load the model (ensure correct path for loading)
|
10 |
+
# For Hugging Face Spaces, ensure the model file is included in the repository or loaded from a URL
|
11 |
model = tf.keras.models.load_model('oily_dry.h5')
|
12 |
|
13 |
# Load the MTCNN face detection model
|
14 |
mtcnn = MTCNN()
|
15 |
|
16 |
+
def detect_and_process_skin(image, output_folder=None):
|
17 |
+
"""Detects faces in an image, crops the skin region, and prepares it for classification."""
|
18 |
+
# Convert PIL Image to numpy array
|
19 |
+
img_np = np.array(image)
|
20 |
+
img_rgb = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
|
21 |
|
22 |
# Detect faces in the image
|
23 |
detections = mtcnn.detect_faces(img_rgb)
|
24 |
|
|
|
25 |
if detections:
|
26 |
x, y, width, height = detections[0]['box']
|
27 |
|
28 |
# Crop the face region
|
29 |
+
face_img = img_np[y:y+height, x:x+width]
|
30 |
|
31 |
+
# Convert cropped face to PIL Image for compatibility with model preprocessing
|
32 |
pil_img = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB))
|
33 |
|
34 |
+
return pil_img
|
|
|
|
|
|
|
|
|
|
|
35 |
else:
|
36 |
+
# Return the original image if no face was detected
|
37 |
+
return image
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
def classify_image(image):
|
40 |
+
"""Classifies the image after processing."""
|
41 |
+
processed_image = detect_and_process_skin(image)
|
42 |
|
43 |
+
# Resize and preprocess the image for the model
|
44 |
+
processed_image = processed_image.resize((224, 224))
|
45 |
+
image_array = tf.keras.preprocessing.image.img_to_array(processed_image) / 255.0
|
46 |
+
image_array = np.expand_dims(image_array, axis=0)
|
47 |
|
48 |
+
# Classify the image
|
49 |
+
predictions = model.predict(image_array)
|
50 |
+
predicted_class = np.argmax(predictions)
|
51 |
+
percentages = predictions[0] * 100
|
52 |
|
53 |
+
return predicted_class, *percentages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def app():
|
56 |
+
st.title("Oily/Dry Skin Level Predictor")
|
|
|
57 |
|
|
|
|
|
|
|
58 |
image_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
59 |
|
|
|
60 |
if image_file is not None:
|
61 |
+
# Convert the file to an image
|
62 |
+
image = Image.open(image_file)
|
63 |
+
predicted_class, dry_percentage, oily_percentage, normal_percentage = classify_image(image)
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
# Convert processed image to bytes for display
|
66 |
+
processed_image_bytes = io.BytesIO()
|
67 |
+
image.save(processed_image_bytes, format='JPEG')
|
68 |
+
processed_image_bytes.seek(0)
|
69 |
|
70 |
+
st.image(processed_image_bytes, width=250)
|
|
|
|
|
71 |
|
72 |
+
# Display the classification results
|
73 |
+
st.write(f"Dry Skin: {dry_percentage:.2f}%")
|
74 |
+
st.write(f"Oily Skin: {oily_percentage:.2f}%")
|
75 |
+
st.write(f"Normal Skin: {normal_percentage:.2f}%")
|
76 |
|
77 |
if __name__ == "__main__":
|
78 |
app()
|