Julien Ajdenbaum
Added link to info on Nordwood scale
de6d532
import gradio as gr
import cv2
import math
import numpy as np
import os
import mediapipe as mp
import predict
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
# Height and width that will be used by the model
DESIRED_HEIGHT = 480
DESIRED_WIDTH = 480
# Performs resizing and showing the image
def resize_and_show(image):
h, w = image.shape[:2]
if h < w:
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
else:
img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
cv2.imshow('color', img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
def segmentate(filepath):
BG_COLOR = (192, 192, 192) # gray
MASK_COLOR = (255, 255, 255) # white
# Create the options that will be used for ImageSegmenter
base_options = python.BaseOptions(model_asset_path='./hair_segmenter.tflite')
options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
# Create the image segmenter
with vision.ImageSegmenter.create_from_options(options) as segmenter:
# Loop through demo image(s)
# Create the MediaPipe image file that will be segmented
print(filepath)
image = mp.Image.create_from_file(filepath)
# Retrieve the masks for the segmented image
segmentation_result = segmenter.segment(image)
category_mask = segmentation_result.category_mask
# Generate solid color images for showing the output segmentation mask.
image_data = image.numpy_view()
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
fg_image[:] = MASK_COLOR
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
bg_image[:] = BG_COLOR
condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
output_image = np.where(condition, fg_image, bg_image)
# print(f'Segmentation mask of {name}:')
# resize_and_show(output_image)
prediction = predict.predict(filepath)[0][0]
print(prediction)
limits = [0.002, 0.1, 0.4, 0.95, 0.97, 0.991, 1]
print(np.where(prediction < limits)[0][0])
"""
img = cv2.cvtColor(output_image, cv2.COLOR_BGR2GRAY)
# print(np.unique(img))
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY_INV)
# plt.imshow(thresh, cmap='gray')
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
im = cv2.drawContours(img, contours, -1, (0,255,0), 3)
"""
return np.where(prediction < limits)[0][0] + 1, output_image
# GUI
title = 'Hair loss prediction'
description = 'Automatic Prediction of Nordwood scale state. For more information on the scale, click [here](https://www.medicalnewstoday.com/articles/327001#stages).'
examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
iface = gr.Interface(
fn=segmentate,
inputs=[
gr.Image(type='filepath', label='Input Image')
],
outputs=[
gr.Number(label='Nordwood Scale'), gr.Image(label='Hair Segmentation')
],
examples=examples,
allow_flagging='never',
cache_examples=False,
title=title,
description=description
)
iface.launch(share=True)