File size: 11,239 Bytes
6cd90b7
 
 
 
 
 
 
85eb645
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63184f
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76c0be6
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63184f
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63184f
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63184f
6cd90b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63184f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6cd90b7
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
"""Run a Gradio demo of the CaR model on a single image."""

import numpy as np
import argparse
from functools import reduce
import PIL.Image as Image
import torch
from modeling.model.car import CaR
from utils.utils import Config, load_yaml
import matplotlib.pyplot as plt
import colorsys
from modeling.post_process.post_process import match_masks, generate_masks_from_sam
from sam.sam import SAMPipeline
from sam.utils import build_sam_config
import random
import gradio as gr

# set random seed
random.seed(15)
np.random.seed(0)
torch.manual_seed(0)


CFG_PATH = "configs/demo/pokemon.yaml"


def generate_distinct_colors(n):
    colors = []
    # generate a random number from 0 to 1
    random_color_bias = random.random()
    for i in range(n):
        hue = float(i) / n
        hue += random_color_bias
        hue = hue % 1.0
        rgb = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
        # Convert RGB values from [0, 1] range to [0, 255]
        colors.append(tuple(int(val * 255) for val in rgb))
    return colors


def overlap_masks(masks):
    """
    Overlap masks to generate a single mask for visualization.

    Parameters:
    - masks: list of np.arrays of shape (H, W) representing binary masks for each class

    Returns:
    - overlap_mask: list of np.array of shape (H, W) that have no overlaps
    """
    overlap_mask = torch.zeros_like(masks[0])
    for mask_idx, mask in enumerate(masks):
        overlap_mask[mask > 0] = mask_idx + 1

    clean_masks = [overlap_mask == mask_idx +
                   1 for mask_idx in range(len(masks))]
    clean_masks = torch.stack(clean_masks, dim=0)

    return clean_masks


def visualize_segmentation(image,
                           masks,
                           class_names,
                           alpha=0.7,
                           y_list=None,
                           x_list=None):
    """
    Visualize segmentation masks on an image.

    Parameters:
    - image: np.array of shape (H, W, 3) representing the RGB image
    - masks: list of np.arrays of shape (H, W) representing binary masks for each class
    - class_names: list of strings representing names of each class
    - alpha: float, transparency level of masks on the image

    Returns:
    - visualization: plt.figure object
    """
    # Create a figure and axis
    fig, ax = plt.subplots(1, figsize=(12, 9))
    # Display the image
    # Generate distinct colors for each mask
    final_mask = np.zeros(
        (masks.shape[1], masks.shape[2], 3), dtype=np.float32)
    binary_final_mask = np.zeros(
        (masks.shape[1], masks.shape[2]), dtype=np.float32)
    colors = generate_distinct_colors(len(class_names))
    idx = 0
    for mask, color, class_name in zip(masks, colors, class_names):
        # Overlay the mask
        final_mask += np.dstack([mask * c for c in color])
        binary_final_mask += mask
        # Find a representative point (e.g., centroid) for placing the label
        if y_list is None or x_list is None:
            y, x = np.argwhere(mask).mean(axis=0)
        else:
            y, x = y_list[idx], x_list[idx]
        ax.text(x, y, class_name, color='white',
                fontsize=22, va='center', ha='center',
                bbox=dict(facecolor='black', alpha=0.7, edgecolor='none'))
        idx += 1

    image[binary_final_mask > 0] = image[binary_final_mask > 0] * (1 - alpha)
    final_image = image + final_mask * alpha
    final_image = final_image.astype(np.uint8)
    plt.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0, hspace=0)
    ax.imshow(final_image)
    # Remove axis ticks and labels
    ax.axis('off')
    return fig


def get_sam_masks(cfg,
                  masks,
                  image_path=None,
                  img_sam=None,
                  pipeline=None):
    print("generating sam masks online")
    if img_sam is None and image_path is not None:
        raise ValueError(
            'Please provide either the image path or the image numpy array.')

    mask_tensor, mask_list = generate_masks_from_sam(
        image_path,
        save_path='./',
        pipeline=pipeline,
        img_sam=img_sam,
        visualize=False,
    )
    mask_tensor = mask_tensor.to(masks.device)
    # only conduct sam on masks that is not all zero
    attn_map, mask_ids = [], []
    for mask_id, mask in enumerate(masks):
        if torch.sum(mask) > 0:
            attn_map.append(mask.unsqueeze(0))
            mask_ids.append(mask_id)
    matched_masks = [match_masks(
        mask_tensor,
        attn,
        mask_list,
        iom_thres=cfg.car.iom_thres,
        min_pred_threshold=cfg.sam.min_pred_threshold)
        for attn in attn_map]
    for matched_mask, mask_id in zip(matched_masks, mask_ids):
        sam_masks = np.array([item['segmentation'] for item in matched_mask])
        sam_mask = np.any(sam_masks, axis=0)
        masks[mask_id] = torch.from_numpy(sam_mask).to(masks.device)
    return masks


def load_sam(cfg, device):
    sam_checkpoint, model_type = build_sam_config(cfg)
    pipeline = SAMPipeline(
        sam_checkpoint,
        model_type,
        device=device,
        points_per_side=cfg.sam.points_per_side,
        pred_iou_thresh=cfg.sam.pred_iou_thresh,
        stability_score_thresh=cfg.sam.stability_score_thresh,
        box_nms_thresh=cfg.sam.box_nms_thresh,
    )
    return pipeline


def generate(img,
             class_names,
             clip_thresh,
             mask_thresh,
             confidence_thresh,
             post_process,
             stability_score_thresh,
             box_nms_thresh,
             iom_thres,
             min_pred_threshold):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    cfg = Config(**load_yaml(CFG_PATH))
    cfg.car.clipes_threshold = clip_thresh
    cfg.car.mask_threshold = mask_thresh
    cfg.car.confidence_threshold = confidence_thresh
    cfg.sam.stability_score_thresh = stability_score_thresh
    cfg.sam.box_nms_thresh = box_nms_thresh
    cfg.car.iom_thres = iom_thres
    cfg.sam.min_pred_threshold = min_pred_threshold
    car_model = CaR(cfg,
                    visualize=True,
                    seg_mode='semantic',
                    device=device)

    # resize image by dividing 2 if the size is larger than 1000
    if img.size[0] > 1000:
        img = img.resize((img.size[0] // 2, img.size[1] // 2))

    y_list, x_list = None, None
    class_names = class_names.split(',')
    sentences = class_names

    # class_names = ['the women chatting', 'the women chatting', 'table', 'fridge', 'cooking pot']

    pseudo_masks, _ = car_model(img, sentences)

    if post_process == 'SAM':
        pipeline = load_sam(cfg, device)
        pseudo_masks = get_sam_masks(
            cfg,
            pseudo_masks,
            image_path=None,
            img_sam=np.array(img),
            pipeline=pipeline)
        pseudo_masks = overlap_masks(pseudo_masks)

    # visualize segmentation masks
    demo_fig = visualize_segmentation(np.array(img),
                                      pseudo_masks.detach().cpu().numpy(),
                                      class_names,
                                      y_list=y_list,
                                      x_list=x_list)

    # convert the demo figure to an pil image
    demo_fig.canvas.draw()
    demo_img = np.array(demo_fig.canvas.renderer._renderer)
    demo_img = Image.fromarray(demo_img)
    return demo_img


if __name__ == "__main__":
    parser = argparse.ArgumentParser('car')
    parser.add_argument("--cfg-path",
                        default='configs/local_car.yaml',
                        help="path to configuration file.")
    args = parser.parse_args()

    demo = gr.Interface(generate,
                        inputs=[gr.Image(label="upload an image", type="pil"),
                                "text",
                                gr.Slider(label="clip thresh",
                                          minimum=0,
                                          maximum=1,
                                          value=0.4,
                                          step=0.1,
                                          info="the threshold for clip-es adversarial heatmap clipping"),
                                gr.Slider(label="mask thresh",
                                          minimum=0,
                                          maximum=1,
                                          value=0.6,
                                          step=0.1,
                                          info="the binariation threshold for the mask to generate visual prompt"),
                                gr.Slider(label="confidence thresh",
                                          minimum=0,
                                          maximum=1,
                                          value=0,
                                          step=0.1,
                                          info="the threshold for filtering the proposed classes"),
                                gr.Radio(["CRF", "SAM"], label="post process",
                                         value="CRF", info="choose the post process method"),
                                gr.Slider(label="stability score thresh for SAM mask proposal \n(only when SAM is chosen for post process)",
                                          minimum=0,
                                          maximum=1,
                                          value=0.95,
                                          step=0.1),
                                gr.Slider(label="box nms thresh for SAM mask proposal \n(only when SAM is chosen for post process)",
                                          minimum=0, maximum=1, value=0.7, step=0.1),
                                gr.Slider(label="intersection over mask threshold for SAM mask proposal \n(only when SAM is chosen for post process)",
                                          minimum=0, maximum=1, value=0.5, step=0.1),
                                gr.Slider(label="minimum prediction threshold for SAM mask proposal \n(only when SAM is chosen for post process)", minimum=0, maximum=1, value=0.03, step=0.01)],
                        outputs="image",
                        title="CLIP as RNN: Segment Countless Visual Concepts without Training Endeavor",
                        description="This is the official demo for CLIP as RNN. Please upload an image and type in the class names (connected by ',' e.g. cat,dog,human) you want to segment. The model will generate the segmentation masks for the input image. You can also adjust the clip thresh, mask thresh and confidence thresh to get better results.",
                        examples=[["demo/pokemon.jpg", "Pikachu,Eevee", 0.6, 0.6, 0, "SAM", 0.95, 0.7, 0.6, 0.01],
                                  ["demo/Eiffel_tower.jpg", "Eiffel Tower",
                                   0.6, 0.6, 0, "SAM", 0.95, 0.7, 0.6, 0.01],
                                  ["demo/superhero.jpeg", "Batman,Superman,Wonder Woman,Flash,Cyborg",
                                   0.6, 0.6, 0, "SAM", 0.89, 0.65, 0.5, 0.03],
                                  ])
    demo.launch(share=True)

    # device = "cuda" if torch.cuda.is_available() else "cpu"