|
import gradio as gr |
|
import tensorflow as tf |
|
import numpy as np |
|
import cv2 |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
model_path = "saved_model" |
|
|
|
|
|
segmentation_model = tf.keras.models.load_model(model_path) |
|
|
|
|
|
TARGET_SHAPE = (256, 256) |
|
|
|
|
|
def segment_image(img:np.ndarray): |
|
|
|
ORIGINAL_SHAPE = img.shape |
|
|
|
|
|
if len(ORIGINAL_SHAPE) == 2: |
|
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) |
|
|
|
|
|
img = cv2.resize(img, TARGET_SHAPE) |
|
|
|
|
|
img = np.expand_dims(img, axis=0) |
|
|
|
|
|
mask = segmentation_model.predict(img) |
|
|
|
|
|
mask = np.squeeze(mask, axis=0) |
|
|
|
|
|
mask = np.argmax(mask, axis=-1) |
|
|
|
|
|
mask = mask.astype(np.uint8) |
|
|
|
|
|
mask = cv2.resize(mask, (ORIGINAL_SHAPE[1], ORIGINAL_SHAPE[0])) |
|
|
|
return mask |
|
|
|
def overlay_mask(img, mask, alpha=0.5): |
|
|
|
colors = { |
|
0: [255, 0, 0], |
|
1: [0, 255, 0], |
|
2: [0, 0, 255] |
|
|
|
} |
|
|
|
|
|
overlay = np.zeros_like(img) |
|
|
|
|
|
for class_id, color in colors.items(): |
|
overlay[mask == class_id] = color |
|
|
|
|
|
output = cv2.addWeighted(img, 1 - alpha, overlay, alpha, 0) |
|
|
|
return output |
|
|
|
|
|
|
|
def transform(img): |
|
mask=segment_image(img) |
|
blended_img = overlay_mask(img, mask) |
|
return blended_img |
|
|
|
|
|
|
|
app = gr.Interface( |
|
fn=transform, |
|
inputs=gr.Image(label="Input Image"), |
|
outputs=gr.Image(label="Image with Segmentation Overlay"), |
|
title="Image Segmentation on Pet Images", |
|
description="Segment image of a pet animal into three classes: background, pet, and boundary.", |
|
examples=[ |
|
"example_images/img1.jpg", |
|
"example_images/img2.jpg", |
|
"example_images/img3.jpg" |
|
] |
|
) |
|
|
|
|
|
app.launch() |