Update app.py
Browse files
app.py
CHANGED
@@ -1,40 +1,41 @@
|
|
1 |
-
import gradio as gr
|
2 |
import cv2
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
from PIL import Image
|
6 |
from torchvision import transforms
|
7 |
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
|
|
|
8 |
# import segmentation_models_pytorch as smp
|
9 |
|
10 |
-
def load_model(model_type):
|
11 |
-
# Model loading simplified for clarity
|
12 |
-
model = sam_model_registry[model_type](checkpoint=f"sam_{model_type}_checkpoint.pth")
|
13 |
-
model.to(device='cuda')
|
14 |
-
return SamAutomaticMaskGenerator(model)
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
|
|
28 |
mask = mask_data['segmentation']
|
29 |
-
|
30 |
-
|
31 |
|
32 |
-
#
|
33 |
-
|
34 |
-
return
|
|
|
|
|
35 |
|
36 |
iface = gr.Interface(
|
37 |
-
fn=
|
38 |
inputs=[gr.inputs.Image(type="pil"), gr.inputs.Dropdown(['vit_h', 'vit_b', 'vit_l'], label="Model Type")],
|
39 |
outputs=gr.outputs.Image(type="pil"),
|
40 |
title="SAM Model Segmentation and Classification",
|
|
|
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
from torchvision import transforms
|
6 |
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
# import segmentation_models_pytorch as smp
|
9 |
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
|
12 |
+
# image= cv2.imread('image_4.png', cv2.IMREAD_COLOR)
|
13 |
+
def get_masks(model_type, image):
|
14 |
+
if model_type == 'vit_h':
|
15 |
+
sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
|
16 |
+
|
17 |
+
masks_h = mask_generator_h.generate(image)
|
18 |
+
if model_type == 'vit_b':
|
19 |
+
sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b_01ec64.pth")
|
20 |
+
|
21 |
+
if model_type == 'vit_l':
|
22 |
+
sam = sam_model_registry["vit_l"](checkpoint="sam_vit_l_0b3195.pth")
|
23 |
|
24 |
+
mask_generator = SamAutomaticMaskGenerator(sam)
|
25 |
+
masks = mask_generator.generate(image)
|
26 |
+
for i, mask_data in enumerate(masks):
|
27 |
mask = mask_data['segmentation']
|
28 |
+
color = colors[i]
|
29 |
+
composite_image[mask] = (color[:3] * 255).astype(np.uint8) # Apply color to mask
|
30 |
|
31 |
+
# Combine original image with the composite mask image
|
32 |
+
overlayed_image = (composite_image * 0.5 + image_cv.squeeze().permute(1, 2, 0).cpu().numpy() * 0.5).astype(np.uint8)
|
33 |
+
return overlayed_image
|
34 |
+
|
35 |
+
|
36 |
|
37 |
iface = gr.Interface(
|
38 |
+
fn=get_masks,
|
39 |
inputs=[gr.inputs.Image(type="pil"), gr.inputs.Dropdown(['vit_h', 'vit_b', 'vit_l'], label="Model Type")],
|
40 |
outputs=gr.outputs.Image(type="pil"),
|
41 |
title="SAM Model Segmentation and Classification",
|