kadirnar commited on
Commit
45b1530
·
1 Parent(s): f0573ed

update main codes

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Metaseg Webui
3
  emoji: 🐨
4
  colorFrom: blue
5
  colorTo: yellow
 
1
  ---
2
+ title: Segment-Anything-Video
3
  emoji: 🐨
4
  colorFrom: blue
5
  colorTo: yellow
demo.py DELETED
@@ -1,110 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import torch
4
-
5
- from metaseg import SamAutomaticMaskGenerator, sam_model_registry
6
- from metaseg.utils.file import download_model
7
-
8
-
9
- class SegAutoMaskGenerator:
10
- def __init__(self):
11
- self.model = None
12
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
13
-
14
- def load_model(self, model_type):
15
- if self.model is None:
16
- model_path = download_model(model_type)
17
- model = sam_model_registry[model_type](checkpoint=model_path)
18
- model.to(device=self.device)
19
- self.model = model
20
-
21
- return self.model
22
-
23
- def load_image(self, image_path):
24
- image = cv2.imread(image_path)
25
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
26
- return image
27
-
28
- def load_video(self, video_path):
29
- cap = cv2.VideoCapture(video_path)
30
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
31
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
32
- fourcc = cv2.VideoWriter_fourcc(*"XVID")
33
- fps = int(cap.get(cv2.CAP_PROP_FPS))
34
- out = cv2.VideoWriter("output.mp4", fourcc, fps, (frame_width, frame_height))
35
-
36
- return cap, out
37
-
38
- def predict(self, frame, model_type, points_per_side, points_per_batch):
39
- model = self.load_model(model_type)
40
- mask_generator = SamAutomaticMaskGenerator(
41
- model, points_per_side=points_per_side, points_per_batch=points_per_batch
42
- )
43
- masks = mask_generator.generate(frame)
44
-
45
- return frame, masks
46
-
47
- def save_image(self, source, model_type, points_per_side, points_per_batch):
48
- read_image = self.load_image(source)
49
- image, anns = self.predict(read_image, model_type, points_per_side, points_per_batch)
50
- if len(anns) == 0:
51
- return
52
-
53
- sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
54
- mask_image = np.zeros((anns[0]["segmentation"].shape[0], anns[0]["segmentation"].shape[1], 3), dtype=np.uint8)
55
- colors = np.random.randint(0, 255, size=(256, 3), dtype=np.uint8)
56
- for i, ann in enumerate(sorted_anns):
57
- m = ann["segmentation"]
58
- img = np.ones((m.shape[0], m.shape[1], 3), dtype=np.uint8)
59
- color = colors[i % 256]
60
- for i in range(3):
61
- img[:, :, 0] = color[0]
62
- img[:, :, 1] = color[1]
63
- img[:, :, 2] = color[2]
64
- img = cv2.bitwise_and(img, img, mask=m.astype(np.uint8))
65
- img = cv2.addWeighted(img, 0.35, np.zeros_like(img), 0.65, 0)
66
- mask_image = cv2.add(mask_image, img)
67
-
68
- combined_mask = cv2.add(image, mask_image)
69
- cv2.imwrite("output.jpg", combined_mask)
70
-
71
- return "output.jpg"
72
-
73
- def save_video(self, source, model_type, points_per_side, points_per_batch, min_area, max_area):
74
- cap, out = self.load_video(source)
75
- colors = np.random.randint(0, 255, size=(256, 3), dtype=np.uint8)
76
-
77
- while True:
78
- ret, frame = cap.read()
79
- if not ret:
80
- break
81
-
82
- image, anns = self.predict(frame, model_type, points_per_side, points_per_batch)
83
- if len(anns) == 0:
84
- continue
85
-
86
- sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
87
- mask_image = np.zeros(
88
- (anns[0]["segmentation"].shape[0], anns[0]["segmentation"].shape[1], 3), dtype=np.uint8
89
- )
90
-
91
- for i, ann in enumerate(sorted_anns):
92
- if max_area > ann["area"] > min_area:
93
- m = ann["segmentation"]
94
- color = colors[i % 256] # Her nesne için farklı bir renk kullan
95
- img = np.zeros((m.shape[0], m.shape[1], 3), dtype=np.uint8)
96
- img[:, :, 0] = color[0]
97
- img[:, :, 1] = color[1]
98
- img[:, :, 2] = color[2]
99
- img = cv2.bitwise_and(img, img, mask=m.astype(np.uint8))
100
- img = cv2.addWeighted(img, 0.35, np.zeros_like(img), 0.65, 0)
101
- mask_image = cv2.add(mask_image, img)
102
-
103
- combined_mask = cv2.add(frame, mask_image)
104
- out.write(combined_mask)
105
-
106
- out.release()
107
- cap.release()
108
- cv2.destroyAllWindows()
109
-
110
- return "output.mp4"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from metaseg.automatic_mask_generator import SamAutomaticMaskGenerator
8
- from metaseg.build_sam import build_sam, build_sam_vit_b, build_sam_vit_h, build_sam_vit_l, sam_model_registry
9
- from metaseg.demo import SegAutoMaskGenerator
10
- from metaseg.predictor import SamPredictor
11
-
12
- __version__ = "0.2.3"
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/app.py DELETED
@@ -1,121 +0,0 @@
1
- import gradio as gr
2
-
3
- from metaseg import SegAutoMaskGenerator
4
-
5
-
6
- def image_app():
7
- with gr.Blocks():
8
- with gr.Row():
9
- with gr.Column():
10
- seg_automask_image_file = gr.Image(type="filepath").style(height=260)
11
-
12
- with gr.Row():
13
- with gr.Column():
14
- seg_automask_image_model_type = gr.Dropdown(
15
- choices=[
16
- "vit_h",
17
- "vit_l",
18
- "vit_b",
19
- ],
20
- value="vit_l",
21
- label="Model Type",
22
- )
23
-
24
- seg_automask_image_points_per_side = gr.Slider(
25
- minimum=0,
26
- maximum=32,
27
- step=2,
28
- value=16,
29
- label="Points per Side",
30
- )
31
-
32
- seg_automask_image_points_per_batch = gr.Slider(
33
- minimum=0,
34
- maximum=64,
35
- step=2,
36
- value=64,
37
- label="Points per Batch",
38
- )
39
-
40
- seg_automask_image_predict = gr.Button(value="Generator")
41
-
42
- with gr.Column():
43
- output_image = gr.Image()
44
-
45
- seg_automask_image_predict.click(
46
- fn=SegAutoMaskGenerator().save_image,
47
- inputs=[
48
- seg_automask_image_file,
49
- seg_automask_image_model_type,
50
- seg_automask_image_points_per_side,
51
- seg_automask_image_points_per_batch,
52
- ],
53
- outputs=[output_image],
54
- )
55
-
56
-
57
- def video_app():
58
- with gr.Blocks():
59
- with gr.Row():
60
- with gr.Column():
61
- seg_automask_video_file = gr.Video().style(height=260)
62
-
63
- with gr.Row():
64
- with gr.Column():
65
- seg_automask_video_model_type = gr.Dropdown(
66
- choices=[
67
- "vit_h",
68
- "vit_l",
69
- "vit_b",
70
- ],
71
- value="vit_l",
72
- label="Model Type",
73
- )
74
-
75
- seg_automask_video_points_per_side = gr.Slider(
76
- minimum=0,
77
- maximum=32,
78
- step=2,
79
- value=16,
80
- label="Points per Side",
81
- )
82
- seg_automask_video_points_per_batch = gr.Slider(
83
- minimum=0,
84
- maximum=64,
85
- step=2,
86
- value=64,
87
- label="Points per Batch",
88
- )
89
-
90
- seg_automask_video_predict = gr.Button(value="Generator")
91
- with gr.Column():
92
- output_video = gr.Video()
93
-
94
- seg_automask_video_predict.click(
95
- fn=SegAutoMaskGenerator().save_image,
96
- inputs=[
97
- seg_automask_video_file,
98
- seg_automask_video_model_type,
99
- seg_automask_video_points_per_side,
100
- seg_automask_video_points_per_batch,
101
- ],
102
- outputs=[output_video],
103
- )
104
-
105
-
106
- def metaseg_app():
107
- app = gr.Blocks()
108
- with app:
109
- with gr.Row():
110
- with gr.Column():
111
- with gr.Tab("Image"):
112
- image_app()
113
- with gr.Tab("Video"):
114
- video_app()
115
-
116
- app.queue(concurrency_count=2)
117
- app.launch(debug=True, enable_queue=True)
118
-
119
-
120
- if __name__ == "__main__":
121
- metaseg_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/automatic_mask_generator.py DELETED
@@ -1,368 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Any, Dict, List, Optional, Tuple
8
-
9
- import numpy as np
10
- import torch
11
- from torchvision.ops.boxes import batched_nms, box_area # type: ignore
12
-
13
- from metaseg.modeling import Sam
14
- from metaseg.predictor import SamPredictor
15
- from metaseg.utils.amg import (
16
- MaskData,
17
- area_from_rle,
18
- batch_iterator,
19
- batched_mask_to_box,
20
- box_xyxy_to_xywh,
21
- build_all_layer_point_grids,
22
- calculate_stability_score,
23
- coco_encode_rle,
24
- generate_crop_boxes,
25
- is_box_near_crop_edge,
26
- mask_to_rle_pytorch,
27
- remove_small_regions,
28
- rle_to_mask,
29
- uncrop_boxes_xyxy,
30
- uncrop_masks,
31
- uncrop_points,
32
- )
33
-
34
-
35
- class SamAutomaticMaskGenerator:
36
- def __init__(
37
- self,
38
- model: Sam,
39
- points_per_side: Optional[int] = 32,
40
- points_per_batch: int = 64,
41
- pred_iou_thresh: float = 0.88,
42
- stability_score_thresh: float = 0.95,
43
- stability_score_offset: float = 1.0,
44
- box_nms_thresh: float = 0.7,
45
- crop_n_layers: int = 0,
46
- crop_nms_thresh: float = 0.7,
47
- crop_overlap_ratio: float = 512 / 1500,
48
- crop_n_points_downscale_factor: int = 1,
49
- point_grids: Optional[List[np.ndarray]] = None,
50
- min_mask_region_area: int = 0,
51
- output_mode: str = "binary_mask",
52
- ) -> None:
53
- """
54
- Using a SAM model, generates masks for the entire image.
55
- Generates a grid of point prompts over the image, then filters
56
- low quality and duplicate masks. The default settings are chosen
57
- for SAM with a ViT-H backbone.
58
-
59
- Arguments:
60
- model (Sam): The SAM model to use for mask prediction.
61
- points_per_side (int or None): The number of points to be sampled
62
- along one side of the image. The total number of points is
63
- points_per_side**2. If None, 'point_grids' must provide explicit
64
- point sampling.
65
- points_per_batch (int): Sets the number of points run simultaneously
66
- by the model. Higher numbers may be faster but use more GPU memory.
67
- pred_iou_thresh (float): A filtering threshold in [0,1], using the
68
- model's predicted mask quality.
69
- stability_score_thresh (float): A filtering threshold in [0,1], using
70
- the stability of the mask under changes to the cutoff used to binarize
71
- the model's mask predictions.
72
- stability_score_offset (float): The amount to shift the cutoff when
73
- calculated the stability score.
74
- box_nms_thresh (float): The box IoU cutoff used by non-maximal
75
- suppression to filter duplicate masks.
76
- crops_n_layers (int): If >0, mask prediction will be run again on
77
- crops of the image. Sets the number of layers to run, where each
78
- layer has 2**i_layer number of image crops.
79
- crops_nms_thresh (float): The box IoU cutoff used by non-maximal
80
- suppression to filter duplicate masks between different crops.
81
- crop_overlap_ratio (float): Sets the degree to which crops overlap.
82
- In the first crop layer, crops will overlap by this fraction of
83
- the image length. Later layers with more crops scale down this overlap.
84
- crop_n_points_downscale_factor (int): The number of points-per-side
85
- sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
86
- point_grids (list(np.ndarray) or None): A list over explicit grids
87
- of points used for sampling, normalized to [0,1]. The nth grid in the
88
- list is used in the nth crop layer. Exclusive with points_per_side.
89
- min_mask_region_area (int): If >0, postprocessing will be applied
90
- to remove disconnected regions and holes in masks with area smaller
91
- than min_mask_region_area. Requires opencv.
92
- output_mode (str): The form masks are returned in. Can be 'binary_mask',
93
- 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
94
- For large resolutions, 'binary_mask' may consume large amounts of
95
- memory.
96
- """
97
-
98
- assert (points_per_side is None) != (
99
- point_grids is None
100
- ), "Exactly one of points_per_side or point_grid must be provided."
101
- if points_per_side is not None:
102
- self.point_grids = build_all_layer_point_grids(
103
- points_per_side,
104
- crop_n_layers,
105
- crop_n_points_downscale_factor,
106
- )
107
- elif point_grids is not None:
108
- self.point_grids = point_grids
109
- else:
110
- raise ValueError("Can't have both points_per_side and point_grid be None.")
111
-
112
- assert output_mode in [
113
- "binary_mask",
114
- "uncompressed_rle",
115
- "coco_rle",
116
- ], f"Unknown output_mode {output_mode}."
117
- if output_mode == "coco_rle":
118
- from pycocotools import mask as mask_utils # type: ignore # noqa: F401
119
-
120
- if min_mask_region_area > 0:
121
- import cv2 # type: ignore # noqa: F401
122
-
123
- self.predictor = SamPredictor(model)
124
- self.points_per_batch = points_per_batch
125
- self.pred_iou_thresh = pred_iou_thresh
126
- self.stability_score_thresh = stability_score_thresh
127
- self.stability_score_offset = stability_score_offset
128
- self.box_nms_thresh = box_nms_thresh
129
- self.crop_n_layers = crop_n_layers
130
- self.crop_nms_thresh = crop_nms_thresh
131
- self.crop_overlap_ratio = crop_overlap_ratio
132
- self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
133
- self.min_mask_region_area = min_mask_region_area
134
- self.output_mode = output_mode
135
-
136
- @torch.no_grad()
137
- def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
138
- """
139
- Generates masks for the given image.
140
-
141
- Arguments:
142
- image (np.ndarray): The image to generate masks for, in HWC uint8 format.
143
-
144
- Returns:
145
- list(dict(str, any)): A list over records for masks. Each record is
146
- a dict containing the following keys:
147
- segmentation (dict(str, any) or np.ndarray): The mask. If
148
- output_mode='binary_mask', is an array of shape HW. Otherwise,
149
- is a dictionary containing the RLE.
150
- bbox (list(float)): The box around the mask, in XYWH format.
151
- area (int): The area in pixels of the mask.
152
- predicted_iou (float): The model's own prediction of the mask's
153
- quality. This is filtered by the pred_iou_thresh parameter.
154
- point_coords (list(list(float))): The point coordinates input
155
- to the model to generate this mask.
156
- stability_score (float): A measure of the mask's quality. This
157
- is filtered on using the stability_score_thresh parameter.
158
- crop_box (list(float)): The crop of the image used to generate
159
- the mask, given in XYWH format.
160
- """
161
-
162
- # Generate masks
163
- mask_data = self._generate_masks(image)
164
-
165
- # Filter small disconnected regions and holes in masks
166
- if self.min_mask_region_area > 0:
167
- mask_data = self.postprocess_small_regions(
168
- mask_data,
169
- self.min_mask_region_area,
170
- max(self.box_nms_thresh, self.crop_nms_thresh),
171
- )
172
-
173
- # Encode masks
174
- if self.output_mode == "coco_rle":
175
- mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
176
- elif self.output_mode == "binary_mask":
177
- mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
178
- else:
179
- mask_data["segmentations"] = mask_data["rles"]
180
-
181
- # Write mask records
182
- curr_anns = []
183
- for idx in range(len(mask_data["segmentations"])):
184
- ann = {
185
- "segmentation": mask_data["segmentations"][idx],
186
- "area": area_from_rle(mask_data["rles"][idx]),
187
- "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
188
- "predicted_iou": mask_data["iou_preds"][idx].item(),
189
- "point_coords": [mask_data["points"][idx].tolist()],
190
- "stability_score": mask_data["stability_score"][idx].item(),
191
- "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
192
- }
193
- curr_anns.append(ann)
194
-
195
- return curr_anns
196
-
197
- def _generate_masks(self, image: np.ndarray) -> MaskData:
198
- orig_size = image.shape[:2]
199
- crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio)
200
-
201
- # Iterate over image crops
202
- data = MaskData()
203
- for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
204
- crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
205
- data.cat(crop_data)
206
-
207
- # Remove duplicate masks between crops
208
- if len(crop_boxes) > 1:
209
- # Prefer masks from smaller crops
210
- scores = 1 / box_area(data["crop_boxes"])
211
- scores = scores.to(data["boxes"].device)
212
- keep_by_nms = batched_nms(
213
- data["boxes"].float(),
214
- scores,
215
- torch.zeros(len(data["boxes"])), # categories
216
- iou_threshold=self.crop_nms_thresh,
217
- )
218
- data.filter(keep_by_nms)
219
-
220
- data.to_numpy()
221
- return data
222
-
223
- def _process_crop(
224
- self,
225
- image: np.ndarray,
226
- crop_box: List[int],
227
- crop_layer_idx: int,
228
- orig_size: Tuple[int, ...],
229
- ) -> MaskData:
230
- # Crop the image and calculate embeddings
231
- x0, y0, x1, y1 = crop_box
232
- cropped_im = image[y0:y1, x0:x1, :]
233
- cropped_im_size = cropped_im.shape[:2]
234
- self.predictor.set_image(cropped_im)
235
-
236
- # Get points for this crop
237
- points_scale = np.array(cropped_im_size)[None, ::-1]
238
- points_for_image = self.point_grids[crop_layer_idx] * points_scale
239
-
240
- # Generate masks for this crop in batches
241
- data = MaskData()
242
- for (points,) in batch_iterator(self.points_per_batch, points_for_image):
243
- batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)
244
- data.cat(batch_data)
245
- del batch_data
246
- self.predictor.reset_image()
247
-
248
- # Remove duplicates within this crop.
249
- keep_by_nms = batched_nms(
250
- data["boxes"].float(),
251
- data["iou_preds"],
252
- torch.zeros(len(data["boxes"])), # categories
253
- iou_threshold=self.box_nms_thresh,
254
- )
255
- data.filter(keep_by_nms)
256
-
257
- # Return to the original image frame
258
- data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
259
- data["points"] = uncrop_points(data["points"], crop_box)
260
- data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
261
-
262
- return data
263
-
264
- def _process_batch(
265
- self,
266
- points: np.ndarray,
267
- im_size: Tuple[int, ...],
268
- crop_box: List[int],
269
- orig_size: Tuple[int, ...],
270
- ) -> MaskData:
271
- orig_h, orig_w = orig_size
272
-
273
- # Run model on this batch
274
- transformed_points = self.predictor.transform.apply_coords(points, im_size)
275
- in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
276
- in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
277
- masks, iou_preds, _ = self.predictor.predict_torch(
278
- in_points[:, None, :],
279
- in_labels[:, None],
280
- multimask_output=True,
281
- return_logits=True,
282
- )
283
-
284
- # Serialize predictions and store in MaskData
285
- data = MaskData(
286
- masks=masks.flatten(0, 1),
287
- iou_preds=iou_preds.flatten(0, 1),
288
- points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
289
- )
290
- del masks
291
-
292
- # Filter by predicted IoU
293
- if self.pred_iou_thresh > 0.0:
294
- keep_mask = data["iou_preds"] > self.pred_iou_thresh
295
- data.filter(keep_mask)
296
-
297
- # Calculate stability score
298
- data["stability_score"] = calculate_stability_score(
299
- data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
300
- )
301
- if self.stability_score_thresh > 0.0:
302
- keep_mask = data["stability_score"] >= self.stability_score_thresh
303
- data.filter(keep_mask)
304
-
305
- # Threshold masks and calculate boxes
306
- data["masks"] = data["masks"] > self.predictor.model.mask_threshold
307
- data["boxes"] = batched_mask_to_box(data["masks"])
308
-
309
- # Filter boxes that touch crop boundaries
310
- keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
311
- if not torch.all(keep_mask):
312
- data.filter(keep_mask)
313
-
314
- # Compress to RLE
315
- data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
316
- data["rles"] = mask_to_rle_pytorch(data["masks"])
317
- del data["masks"]
318
-
319
- return data
320
-
321
- @staticmethod
322
- def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData:
323
- """
324
- Removes small disconnected regions and holes in masks, then reruns
325
- box NMS to remove any new duplicates.
326
-
327
- Edits mask_data in place.
328
-
329
- Requires open-cv as a dependency.
330
- """
331
- if len(mask_data["rles"]) == 0:
332
- return mask_data
333
-
334
- # Filter small disconnected regions and holes
335
- new_masks = []
336
- scores = []
337
- for rle in mask_data["rles"]:
338
- mask = rle_to_mask(rle)
339
-
340
- mask, changed = remove_small_regions(mask, min_area, mode="holes")
341
- unchanged = not changed
342
- mask, changed = remove_small_regions(mask, min_area, mode="islands")
343
- unchanged = unchanged and not changed
344
-
345
- new_masks.append(torch.as_tensor(mask).unsqueeze(0))
346
- # Give score=0 to changed masks and score=1 to unchanged masks
347
- # so NMS will prefer ones that didn't need postprocessing
348
- scores.append(float(unchanged))
349
-
350
- # Recalculate boxes and remove any new duplicates
351
- masks = torch.cat(new_masks, dim=0)
352
- boxes = batched_mask_to_box(masks)
353
- keep_by_nms = batched_nms(
354
- boxes.float(),
355
- torch.as_tensor(scores),
356
- torch.zeros(len(boxes)), # categories
357
- iou_threshold=nms_thresh,
358
- )
359
-
360
- # Only recalculate RLEs for masks that have changed
361
- for i_mask in keep_by_nms:
362
- if scores[i_mask] == 0.0:
363
- mask_torch = masks[i_mask].unsqueeze(0)
364
- mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
365
- mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
366
- mask_data.filter(keep_by_nms)
367
-
368
- return mask_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/build_sam.py DELETED
@@ -1,107 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from functools import partial
8
-
9
- import torch
10
-
11
- from metaseg.modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
12
-
13
-
14
- def build_sam_vit_h(checkpoint=None):
15
- return _build_sam(
16
- encoder_embed_dim=1280,
17
- encoder_depth=32,
18
- encoder_num_heads=16,
19
- encoder_global_attn_indexes=[7, 15, 23, 31],
20
- checkpoint=checkpoint,
21
- )
22
-
23
-
24
- build_sam = build_sam_vit_h
25
-
26
-
27
- def build_sam_vit_l(checkpoint=None):
28
- return _build_sam(
29
- encoder_embed_dim=1024,
30
- encoder_depth=24,
31
- encoder_num_heads=16,
32
- encoder_global_attn_indexes=[5, 11, 17, 23],
33
- checkpoint=checkpoint,
34
- )
35
-
36
-
37
- def build_sam_vit_b(checkpoint=None):
38
- return _build_sam(
39
- encoder_embed_dim=768,
40
- encoder_depth=12,
41
- encoder_num_heads=12,
42
- encoder_global_attn_indexes=[2, 5, 8, 11],
43
- checkpoint=checkpoint,
44
- )
45
-
46
-
47
- sam_model_registry = {
48
- "default": build_sam,
49
- "vit_h": build_sam,
50
- "vit_l": build_sam_vit_l,
51
- "vit_b": build_sam_vit_b,
52
- }
53
-
54
-
55
- def _build_sam(
56
- encoder_embed_dim,
57
- encoder_depth,
58
- encoder_num_heads,
59
- encoder_global_attn_indexes,
60
- checkpoint=None,
61
- ):
62
- prompt_embed_dim = 256
63
- image_size = 1024
64
- vit_patch_size = 16
65
- image_embedding_size = image_size // vit_patch_size
66
- sam = Sam(
67
- image_encoder=ImageEncoderViT(
68
- depth=encoder_depth,
69
- embed_dim=encoder_embed_dim,
70
- img_size=image_size,
71
- mlp_ratio=4,
72
- norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
73
- num_heads=encoder_num_heads,
74
- patch_size=vit_patch_size,
75
- qkv_bias=True,
76
- use_rel_pos=True,
77
- global_attn_indexes=encoder_global_attn_indexes,
78
- window_size=14,
79
- out_chans=prompt_embed_dim,
80
- ),
81
- prompt_encoder=PromptEncoder(
82
- embed_dim=prompt_embed_dim,
83
- image_embedding_size=(image_embedding_size, image_embedding_size),
84
- input_image_size=(image_size, image_size),
85
- mask_in_chans=16,
86
- ),
87
- mask_decoder=MaskDecoder(
88
- num_multimask_outputs=3,
89
- transformer=TwoWayTransformer(
90
- depth=2,
91
- embedding_dim=prompt_embed_dim,
92
- mlp_dim=2048,
93
- num_heads=8,
94
- ),
95
- transformer_dim=prompt_embed_dim,
96
- iou_head_depth=3,
97
- iou_head_hidden_dim=256,
98
- ),
99
- pixel_mean=[123.675, 116.28, 103.53],
100
- pixel_std=[58.395, 57.12, 57.375],
101
- )
102
- sam.eval()
103
- if checkpoint is not None:
104
- with open(checkpoint, "rb") as f:
105
- state_dict = torch.load(f)
106
- sam.load_state_dict(state_dict)
107
- return sam
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/demo.py DELETED
@@ -1,112 +0,0 @@
1
- from typing import Optional
2
-
3
- import cv2
4
- import numpy as np
5
- import torch
6
-
7
- from metaseg import SamAutomaticMaskGenerator, sam_model_registry
8
- from metaseg.utils.file import download_model
9
-
10
-
11
- class SegAutoMaskGenerator:
12
- def __init__(self):
13
- self.model = None
14
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
15
-
16
- def load_model(self, model_type):
17
- if self.model is None:
18
- model_path = download_model(model_type)
19
- model = sam_model_registry[self.model_type](checkpoint=model_path)
20
- model.to(device=self.device)
21
- self.model = model
22
-
23
- return self.model
24
-
25
- def load_image(self, image_path):
26
- image = cv2.imread(image_path)
27
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
28
- return image
29
-
30
- def load_video(self, video_path):
31
- cap = cv2.VideoCapture(video_path)
32
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
33
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
34
- fourcc = cv2.VideoWriter_fourcc(*"XVID")
35
- fps = int(cap.get(cv2.CAP_PROP_FPS))
36
- out = cv2.VideoWriter("output.mp4", fourcc, fps, (frame_width, frame_height))
37
-
38
- return cap, out
39
-
40
- def predict(self, frame, model_type, points_per_side, points_per_batch):
41
- model = self.load_model(model_type)
42
- mask_generator = SamAutomaticMaskGenerator(
43
- model, points_per_side=points_per_side, points_per_batch=points_per_batch
44
- )
45
- masks = mask_generator.generate(frame)
46
-
47
- return frame, masks
48
-
49
- def save_image(self, source, model_type, points_per_side, points_per_batch):
50
- read_image = self.load_image(source)
51
- image, anns = self.predict(read_image, model_type, points_per_side, points_per_batch)
52
- if len(anns) == 0:
53
- return
54
-
55
- sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
56
- mask_image = np.zeros((anns[0]["segmentation"].shape[0], anns[0]["segmentation"].shape[1], 3), dtype=np.uint8)
57
- colors = np.random.randint(0, 255, size=(256, 3), dtype=np.uint8)
58
- for i, ann in enumerate(sorted_anns):
59
- m = ann["segmentation"]
60
- img = np.ones((m.shape[0], m.shape[1], 3), dtype=np.uint8)
61
- color = colors[i % 256]
62
- for i in range(3):
63
- img[:, :, 0] = color[0]
64
- img[:, :, 1] = color[1]
65
- img[:, :, 2] = color[2]
66
- img = cv2.bitwise_and(img, img, mask=m.astype(np.uint8))
67
- img = cv2.addWeighted(img, 0.35, np.zeros_like(img), 0.65, 0)
68
- mask_image = cv2.add(mask_image, img)
69
-
70
- combined_mask = cv2.add(image, mask_image)
71
- cv2.imwrite("output.jpg", combined_mask)
72
-
73
- return "output.jpg"
74
-
75
- def save_video(self, source, model_type, points_per_side, points_per_batch):
76
- cap, out = self.load_video()
77
- colors = np.random.randint(0, 255, size=(256, 3), dtype=np.uint8)
78
-
79
- while True:
80
- ret, frame = cap.read()
81
- if not ret:
82
- break
83
-
84
- image, anns = self.predict(frame)
85
- if len(anns) == 0:
86
- continue
87
-
88
- sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
89
- mask_image = np.zeros(
90
- (anns[0]["segmentation"].shape[0], anns[0]["segmentation"].shape[1], 3), dtype=np.uint8
91
- )
92
-
93
- for i, ann in enumerate(sorted_anns):
94
- if ann["area"] > 5000:
95
- m = ann["segmentation"]
96
- color = colors[i % 256] # Her nesne için farklı bir renk kullan
97
- img = np.zeros((m.shape[0], m.shape[1], 3), dtype=np.uint8)
98
- img[:, :, 0] = color[0]
99
- img[:, :, 1] = color[1]
100
- img[:, :, 2] = color[2]
101
- img = cv2.bitwise_and(img, img, mask=m.astype(np.uint8))
102
- img = cv2.addWeighted(img, 0.35, np.zeros_like(img), 0.65, 0)
103
- mask_image = cv2.add(mask_image, img)
104
-
105
- combined_mask = cv2.add(frame, mask_image)
106
- out.write(combined_mask)
107
-
108
- out.release()
109
- cap.release()
110
- cv2.destroyAllWindows()
111
-
112
- return "output.mp4"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from metaseg.modeling.image_encoder import ImageEncoderViT
8
- from metaseg.modeling.mask_decoder import MaskDecoder
9
- from metaseg.modeling.prompt_encoder import PromptEncoder
10
- from metaseg.modeling.sam import Sam
11
- from metaseg.modeling.transformer import TwoWayTransformer
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (484 Bytes)
 
metaseg/modeling/__pycache__/common.cpython-310.pyc DELETED
Binary file (1.75 kB)
 
metaseg/modeling/__pycache__/image_encoder.cpython-310.pyc DELETED
Binary file (12.6 kB)
 
metaseg/modeling/__pycache__/mask_decoder.cpython-310.pyc DELETED
Binary file (5.46 kB)
 
metaseg/modeling/__pycache__/prompt_encoder.cpython-310.pyc DELETED
Binary file (7.68 kB)
 
metaseg/modeling/__pycache__/sam.cpython-310.pyc DELETED
Binary file (6.74 kB)
 
metaseg/modeling/__pycache__/transformer.cpython-310.pyc DELETED
Binary file (6.59 kB)
 
metaseg/modeling/common.py DELETED
@@ -1,43 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Type
8
-
9
- import torch
10
- import torch.nn as nn
11
-
12
-
13
- class MLPBlock(nn.Module):
14
- def __init__(
15
- self,
16
- embedding_dim: int,
17
- mlp_dim: int,
18
- act: Type[nn.Module] = nn.GELU,
19
- ) -> None:
20
- super().__init__()
21
- self.lin1 = nn.Linear(embedding_dim, mlp_dim)
22
- self.lin2 = nn.Linear(mlp_dim, embedding_dim)
23
- self.act = act()
24
-
25
- def forward(self, x: torch.Tensor) -> torch.Tensor:
26
- return self.lin2(self.act(self.lin1(x)))
27
-
28
-
29
- # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
30
- # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
31
- class LayerNorm2d(nn.Module):
32
- def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
33
- super().__init__()
34
- self.weight = nn.Parameter(torch.ones(num_channels))
35
- self.bias = nn.Parameter(torch.zeros(num_channels))
36
- self.eps = eps
37
-
38
- def forward(self, x: torch.Tensor) -> torch.Tensor:
39
- u = x.mean(1, keepdim=True)
40
- s = (x - u).pow(2).mean(1, keepdim=True)
41
- x = (x - u) / torch.sqrt(s + self.eps)
42
- x = self.weight[:, None, None] * x + self.bias[:, None, None]
43
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/image_encoder.py DELETED
@@ -1,389 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Optional, Tuple, Type
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
-
13
- from metaseg.modeling.common import LayerNorm2d, MLPBlock
14
-
15
-
16
- # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
17
- class ImageEncoderViT(nn.Module):
18
- def __init__(
19
- self,
20
- img_size: int = 1024,
21
- patch_size: int = 16,
22
- in_chans: int = 3,
23
- embed_dim: int = 768,
24
- depth: int = 12,
25
- num_heads: int = 12,
26
- mlp_ratio: float = 4.0,
27
- out_chans: int = 256,
28
- qkv_bias: bool = True,
29
- norm_layer: Type[nn.Module] = nn.LayerNorm,
30
- act_layer: Type[nn.Module] = nn.GELU,
31
- use_abs_pos: bool = True,
32
- use_rel_pos: bool = False,
33
- rel_pos_zero_init: bool = True,
34
- window_size: int = 0,
35
- global_attn_indexes: Tuple[int, ...] = (),
36
- ) -> None:
37
- """
38
- Args:
39
- img_size (int): Input image size.
40
- patch_size (int): Patch size.
41
- in_chans (int): Number of input image channels.
42
- embed_dim (int): Patch embedding dimension.
43
- depth (int): Depth of ViT.
44
- num_heads (int): Number of attention heads in each ViT block.
45
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
46
- qkv_bias (bool): If True, add a learnable bias to query, key, value.
47
- norm_layer (nn.Module): Normalization layer.
48
- act_layer (nn.Module): Activation layer.
49
- use_abs_pos (bool): If True, use absolute positional embeddings.
50
- use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
51
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
52
- window_size (int): Window size for window attention blocks.
53
- global_attn_indexes (list): Indexes for blocks using global attention.
54
- """
55
- super().__init__()
56
- self.img_size = img_size
57
-
58
- self.patch_embed = PatchEmbed(
59
- kernel_size=(patch_size, patch_size),
60
- stride=(patch_size, patch_size),
61
- in_chans=in_chans,
62
- embed_dim=embed_dim,
63
- )
64
-
65
- self.pos_embed: Optional[nn.Parameter] = None
66
- if use_abs_pos:
67
- # Initialize absolute positional embedding with pretrain image size.
68
- self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
69
-
70
- self.blocks = nn.ModuleList()
71
- for i in range(depth):
72
- block = Block(
73
- dim=embed_dim,
74
- num_heads=num_heads,
75
- mlp_ratio=mlp_ratio,
76
- qkv_bias=qkv_bias,
77
- norm_layer=norm_layer,
78
- act_layer=act_layer,
79
- use_rel_pos=use_rel_pos,
80
- rel_pos_zero_init=rel_pos_zero_init,
81
- window_size=window_size if i not in global_attn_indexes else 0,
82
- input_size=(img_size // patch_size, img_size // patch_size),
83
- )
84
- self.blocks.append(block)
85
-
86
- self.neck = nn.Sequential(
87
- nn.Conv2d(
88
- embed_dim,
89
- out_chans,
90
- kernel_size=1,
91
- bias=False,
92
- ),
93
- LayerNorm2d(out_chans),
94
- nn.Conv2d(
95
- out_chans,
96
- out_chans,
97
- kernel_size=3,
98
- padding=1,
99
- bias=False,
100
- ),
101
- LayerNorm2d(out_chans),
102
- )
103
-
104
- def forward(self, x: torch.Tensor) -> torch.Tensor:
105
- x = self.patch_embed(x)
106
- if self.pos_embed is not None:
107
- x = x + self.pos_embed
108
-
109
- for blk in self.blocks:
110
- x = blk(x)
111
-
112
- x = self.neck(x.permute(0, 3, 1, 2))
113
-
114
- return x
115
-
116
-
117
- class Block(nn.Module):
118
- """Transformer blocks with support of window attention and residual propagation blocks"""
119
-
120
- def __init__(
121
- self,
122
- dim: int,
123
- num_heads: int,
124
- mlp_ratio: float = 4.0,
125
- qkv_bias: bool = True,
126
- norm_layer: Type[nn.Module] = nn.LayerNorm,
127
- act_layer: Type[nn.Module] = nn.GELU,
128
- use_rel_pos: bool = False,
129
- rel_pos_zero_init: bool = True,
130
- window_size: int = 0,
131
- input_size: Optional[Tuple[int, int]] = None,
132
- ) -> None:
133
- """
134
- Args:
135
- dim (int): Number of input channels.
136
- num_heads (int): Number of attention heads in each ViT block.
137
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
138
- qkv_bias (bool): If True, add a learnable bias to query, key, value.
139
- norm_layer (nn.Module): Normalization layer.
140
- act_layer (nn.Module): Activation layer.
141
- use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
142
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
143
- window_size (int): Window size for window attention blocks. If it equals 0, then
144
- use global attention.
145
- input_size (int or None): Input resolution for calculating the relative positional
146
- parameter size.
147
- """
148
- super().__init__()
149
- self.norm1 = norm_layer(dim)
150
- self.attn = Attention(
151
- dim,
152
- num_heads=num_heads,
153
- qkv_bias=qkv_bias,
154
- use_rel_pos=use_rel_pos,
155
- rel_pos_zero_init=rel_pos_zero_init,
156
- input_size=input_size if window_size == 0 else (window_size, window_size),
157
- )
158
-
159
- self.norm2 = norm_layer(dim)
160
- self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
161
-
162
- self.window_size = window_size
163
-
164
- def forward(self, x: torch.Tensor) -> torch.Tensor:
165
- shortcut = x
166
- x = self.norm1(x)
167
- # Window partition
168
- if self.window_size > 0:
169
- H, W = x.shape[1], x.shape[2]
170
- x, pad_hw = window_partition(x, self.window_size)
171
-
172
- x = self.attn(x)
173
- # Reverse window partition
174
- if self.window_size > 0:
175
- x = window_unpartition(x, self.window_size, pad_hw, (H, W))
176
-
177
- x = shortcut + x
178
- x = x + self.mlp(self.norm2(x))
179
-
180
- return x
181
-
182
-
183
- class Attention(nn.Module):
184
- """Multi-head Attention block with relative position embeddings."""
185
-
186
- def __init__(
187
- self,
188
- dim: int,
189
- num_heads: int = 8,
190
- qkv_bias: bool = True,
191
- use_rel_pos: bool = False,
192
- rel_pos_zero_init: bool = True,
193
- input_size: Optional[Tuple[int, int]] = None,
194
- ) -> None:
195
- """
196
- Args:
197
- dim (int): Number of input channels.
198
- num_heads (int): Number of attention heads.
199
- qkv_bias (bool: If True, add a learnable bias to query, key, value.
200
- rel_pos (bool): If True, add relative positional embeddings to the attention map.
201
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
202
- input_size (int or None): Input resolution for calculating the relative positional
203
- parameter size.
204
- """
205
- super().__init__()
206
- self.num_heads = num_heads
207
- head_dim = dim // num_heads
208
- self.scale = head_dim ** -0.5
209
-
210
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
211
- self.proj = nn.Linear(dim, dim)
212
-
213
- self.use_rel_pos = use_rel_pos
214
- if self.use_rel_pos:
215
- assert input_size is not None, "Input size must be provided if using relative positional encoding."
216
- # initialize relative positional embeddings
217
- self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
218
- self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
219
-
220
- def forward(self, x: torch.Tensor) -> torch.Tensor:
221
- B, H, W, _ = x.shape
222
- # qkv with shape (3, B, nHead, H * W, C)
223
- qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
224
- # q, k, v with shape (B * nHead, H * W, C)
225
- q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
226
-
227
- attn = (q * self.scale) @ k.transpose(-2, -1)
228
-
229
- if self.use_rel_pos:
230
- attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
231
-
232
- attn = attn.softmax(dim=-1)
233
- x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
234
- x = self.proj(x)
235
-
236
- return x
237
-
238
-
239
- def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
240
- """
241
- Partition into non-overlapping windows with padding if needed.
242
- Args:
243
- x (tensor): input tokens with [B, H, W, C].
244
- window_size (int): window size.
245
-
246
- Returns:
247
- windows: windows after partition with [B * num_windows, window_size, window_size, C].
248
- (Hp, Wp): padded height and width before partition
249
- """
250
- B, H, W, C = x.shape
251
-
252
- pad_h = (window_size - H % window_size) % window_size
253
- pad_w = (window_size - W % window_size) % window_size
254
- if pad_h > 0 or pad_w > 0:
255
- x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
256
- Hp, Wp = H + pad_h, W + pad_w
257
-
258
- x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
259
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
260
- return windows, (Hp, Wp)
261
-
262
-
263
- def window_unpartition(
264
- windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
265
- ) -> torch.Tensor:
266
- """
267
- Window unpartition into original sequences and removing padding.
268
- Args:
269
- x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
270
- window_size (int): window size.
271
- pad_hw (Tuple): padded height and width (Hp, Wp).
272
- hw (Tuple): original height and width (H, W) before padding.
273
-
274
- Returns:
275
- x: unpartitioned sequences with [B, H, W, C].
276
- """
277
- Hp, Wp = pad_hw
278
- H, W = hw
279
- B = windows.shape[0] // (Hp * Wp // window_size // window_size)
280
- x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
281
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
282
-
283
- if Hp > H or Wp > W:
284
- x = x[:, :H, :W, :].contiguous()
285
- return x
286
-
287
-
288
- def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
289
- """
290
- Get relative positional embeddings according to the relative positions of
291
- query and key sizes.
292
- Args:
293
- q_size (int): size of query q.
294
- k_size (int): size of key k.
295
- rel_pos (Tensor): relative position embeddings (L, C).
296
-
297
- Returns:
298
- Extracted positional embeddings according to relative positions.
299
- """
300
- max_rel_dist = int(2 * max(q_size, k_size) - 1)
301
- # Interpolate rel pos if needed.
302
- if rel_pos.shape[0] != max_rel_dist:
303
- # Interpolate rel pos.
304
- rel_pos_resized = F.interpolate(
305
- rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
306
- size=max_rel_dist,
307
- mode="linear",
308
- )
309
- rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
310
- else:
311
- rel_pos_resized = rel_pos
312
-
313
- # Scale the coords with short length if shapes for q and k are different.
314
- q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
315
- k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
316
- relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
317
-
318
- return rel_pos_resized[relative_coords.long()]
319
-
320
-
321
- def add_decomposed_rel_pos(
322
- attn: torch.Tensor,
323
- q: torch.Tensor,
324
- rel_pos_h: torch.Tensor,
325
- rel_pos_w: torch.Tensor,
326
- q_size: Tuple[int, int],
327
- k_size: Tuple[int, int],
328
- ) -> torch.Tensor:
329
- """
330
- Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
331
- https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
332
- Args:
333
- attn (Tensor): attention map.
334
- q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
335
- rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
336
- rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
337
- q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
338
- k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
339
-
340
- Returns:
341
- attn (Tensor): attention map with added relative positional embeddings.
342
- """
343
- q_h, q_w = q_size
344
- k_h, k_w = k_size
345
- Rh = get_rel_pos(q_h, k_h, rel_pos_h)
346
- Rw = get_rel_pos(q_w, k_w, rel_pos_w)
347
-
348
- B, _, dim = q.shape
349
- r_q = q.reshape(B, q_h, q_w, dim)
350
- rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
351
- rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
352
-
353
- attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view(
354
- B, q_h * q_w, k_h * k_w
355
- )
356
-
357
- return attn
358
-
359
-
360
- class PatchEmbed(nn.Module):
361
- """
362
- Image to Patch Embedding.
363
- """
364
-
365
- def __init__(
366
- self,
367
- kernel_size: Tuple[int, int] = (16, 16),
368
- stride: Tuple[int, int] = (16, 16),
369
- padding: Tuple[int, int] = (0, 0),
370
- in_chans: int = 3,
371
- embed_dim: int = 768,
372
- ) -> None:
373
- """
374
- Args:
375
- kernel_size (Tuple): kernel size of the projection layer.
376
- stride (Tuple): stride of the projection layer.
377
- padding (Tuple): padding size of the projection layer.
378
- in_chans (int): Number of input image channels.
379
- embed_dim (int): embed_dim (int): Patch embedding dimension.
380
- """
381
- super().__init__()
382
-
383
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
384
-
385
- def forward(self, x: torch.Tensor) -> torch.Tensor:
386
- x = self.proj(x)
387
- # B C H W -> B H W C
388
- x = x.permute(0, 2, 3, 1)
389
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/mask_decoder.py DELETED
@@ -1,169 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import List, Tuple, Type
8
-
9
- import torch
10
- from torch import nn
11
- from torch.nn import functional as F
12
-
13
- from metaseg.modeling.common import LayerNorm2d
14
-
15
-
16
- class MaskDecoder(nn.Module):
17
- def __init__(
18
- self,
19
- *,
20
- transformer_dim: int,
21
- transformer: nn.Module,
22
- num_multimask_outputs: int = 3,
23
- activation: Type[nn.Module] = nn.GELU,
24
- iou_head_depth: int = 3,
25
- iou_head_hidden_dim: int = 256,
26
- ) -> None:
27
- """
28
- Predicts masks given an image and prompt embeddings, using a
29
- tranformer architecture.
30
-
31
- Arguments:
32
- transformer_dim (int): the channel dimension of the transformer
33
- transformer (nn.Module): the transformer used to predict masks
34
- num_multimask_outputs (int): the number of masks to predict
35
- when disambiguating masks
36
- activation (nn.Module): the type of activation to use when
37
- upscaling masks
38
- iou_head_depth (int): the depth of the MLP used to predict
39
- mask quality
40
- iou_head_hidden_dim (int): the hidden dimension of the MLP
41
- used to predict mask quality
42
- """
43
- super().__init__()
44
- self.transformer_dim = transformer_dim
45
- self.transformer = transformer
46
-
47
- self.num_multimask_outputs = num_multimask_outputs
48
-
49
- self.iou_token = nn.Embedding(1, transformer_dim)
50
- self.num_mask_tokens = num_multimask_outputs + 1
51
- self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
52
-
53
- self.output_upscaling = nn.Sequential(
54
- nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
55
- LayerNorm2d(transformer_dim // 4),
56
- activation(),
57
- nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
58
- activation(),
59
- )
60
- self.output_hypernetworks_mlps = nn.ModuleList(
61
- [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)]
62
- )
63
-
64
- self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth)
65
-
66
- def forward(
67
- self,
68
- image_embeddings: torch.Tensor,
69
- image_pe: torch.Tensor,
70
- sparse_prompt_embeddings: torch.Tensor,
71
- dense_prompt_embeddings: torch.Tensor,
72
- multimask_output: bool,
73
- ) -> Tuple[torch.Tensor, torch.Tensor]:
74
- """
75
- Predict masks given image and prompt embeddings.
76
-
77
- Arguments:
78
- image_embeddings (torch.Tensor): the embeddings from the image encoder
79
- image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
80
- sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
81
- dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
82
- multimask_output (bool): Whether to return multiple masks or a single
83
- mask.
84
-
85
- Returns:
86
- torch.Tensor: batched predicted masks
87
- torch.Tensor: batched predictions of mask quality
88
- """
89
- masks, iou_pred = self.predict_masks(
90
- image_embeddings=image_embeddings,
91
- image_pe=image_pe,
92
- sparse_prompt_embeddings=sparse_prompt_embeddings,
93
- dense_prompt_embeddings=dense_prompt_embeddings,
94
- )
95
-
96
- # Select the correct mask or masks for outptu
97
- if multimask_output:
98
- mask_slice = slice(1, None)
99
- else:
100
- mask_slice = slice(0, 1)
101
- masks = masks[:, mask_slice, :, :]
102
- iou_pred = iou_pred[:, mask_slice]
103
-
104
- # Prepare output
105
- return masks, iou_pred
106
-
107
- def predict_masks(
108
- self,
109
- image_embeddings: torch.Tensor,
110
- image_pe: torch.Tensor,
111
- sparse_prompt_embeddings: torch.Tensor,
112
- dense_prompt_embeddings: torch.Tensor,
113
- ) -> Tuple[torch.Tensor, torch.Tensor]:
114
- """Predicts masks. See 'forward' for more details."""
115
- # Concatenate output tokens
116
- output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
117
- output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
118
- tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
119
-
120
- # Expand per-image data in batch direction to be per-mask
121
- src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
122
- src = src + dense_prompt_embeddings
123
- pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
124
- b, c, h, w = src.shape
125
-
126
- # Run the transformer
127
- hs, src = self.transformer(src, pos_src, tokens)
128
- iou_token_out = hs[:, 0, :]
129
- mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
130
-
131
- # Upscale mask embeddings and predict masks using the mask tokens
132
- src = src.transpose(1, 2).view(b, c, h, w)
133
- upscaled_embedding = self.output_upscaling(src)
134
- hyper_in_list: List[torch.Tensor] = []
135
- for i in range(self.num_mask_tokens):
136
- hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
137
- hyper_in = torch.stack(hyper_in_list, dim=1)
138
- b, c, h, w = upscaled_embedding.shape
139
- masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
140
-
141
- # Generate mask quality predictions
142
- iou_pred = self.iou_prediction_head(iou_token_out)
143
-
144
- return masks, iou_pred
145
-
146
-
147
- # Lightly adapted from
148
- # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
149
- class MLP(nn.Module):
150
- def __init__(
151
- self,
152
- input_dim: int,
153
- hidden_dim: int,
154
- output_dim: int,
155
- num_layers: int,
156
- sigmoid_output: bool = False,
157
- ) -> None:
158
- super().__init__()
159
- self.num_layers = num_layers
160
- h = [hidden_dim] * (num_layers - 1)
161
- self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
162
- self.sigmoid_output = sigmoid_output
163
-
164
- def forward(self, x):
165
- for i, layer in enumerate(self.layers):
166
- x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
167
- if self.sigmoid_output:
168
- x = F.sigmoid(x)
169
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/prompt_encoder.py DELETED
@@ -1,212 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Any, Optional, Tuple, Type
8
-
9
- import numpy as np
10
- import torch
11
- from torch import nn
12
-
13
- from metaseg.modeling.common import LayerNorm2d
14
-
15
-
16
- class PromptEncoder(nn.Module):
17
- def __init__(
18
- self,
19
- embed_dim: int,
20
- image_embedding_size: Tuple[int, int],
21
- input_image_size: Tuple[int, int],
22
- mask_in_chans: int,
23
- activation: Type[nn.Module] = nn.GELU,
24
- ) -> None:
25
- """
26
- Encodes prompts for input to SAM's mask decoder.
27
-
28
- Arguments:
29
- embed_dim (int): The prompts' embedding dimension
30
- image_embedding_size (tuple(int, int)): The spatial size of the
31
- image embedding, as (H, W).
32
- input_image_size (int): The padded size of the image as input
33
- to the image encoder, as (H, W).
34
- mask_in_chans (int): The number of hidden channels used for
35
- encoding input masks.
36
- activation (nn.Module): The activation to use when encoding
37
- input masks.
38
- """
39
- super().__init__()
40
- self.embed_dim = embed_dim
41
- self.input_image_size = input_image_size
42
- self.image_embedding_size = image_embedding_size
43
- self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
44
-
45
- self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
46
- point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
47
- self.point_embeddings = nn.ModuleList(point_embeddings)
48
- self.not_a_point_embed = nn.Embedding(1, embed_dim)
49
-
50
- self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
51
- self.mask_downscaling = nn.Sequential(
52
- nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
53
- LayerNorm2d(mask_in_chans // 4),
54
- activation(),
55
- nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
56
- LayerNorm2d(mask_in_chans),
57
- activation(),
58
- nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
59
- )
60
- self.no_mask_embed = nn.Embedding(1, embed_dim)
61
-
62
- def get_dense_pe(self) -> torch.Tensor:
63
- """
64
- Returns the positional encoding used to encode point prompts,
65
- applied to a dense set of points the shape of the image encoding.
66
-
67
- Returns:
68
- torch.Tensor: Positional encoding with shape
69
- 1x(embed_dim)x(embedding_h)x(embedding_w)
70
- """
71
- return self.pe_layer(self.image_embedding_size).unsqueeze(0)
72
-
73
- def _embed_points(
74
- self,
75
- points: torch.Tensor,
76
- labels: torch.Tensor,
77
- pad: bool,
78
- ) -> torch.Tensor:
79
- """Embeds point prompts."""
80
- points = points + 0.5 # Shift to center of pixel
81
- if pad:
82
- padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
83
- padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
84
- points = torch.cat([points, padding_point], dim=1)
85
- labels = torch.cat([labels, padding_label], dim=1)
86
- point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
87
- point_embedding[labels == -1] = 0.0
88
- point_embedding[labels == -1] += self.not_a_point_embed.weight
89
- point_embedding[labels == 0] += self.point_embeddings[0].weight
90
- point_embedding[labels == 1] += self.point_embeddings[1].weight
91
- return point_embedding
92
-
93
- def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
94
- """Embeds box prompts."""
95
- boxes = boxes + 0.5 # Shift to center of pixel
96
- coords = boxes.reshape(-1, 2, 2)
97
- corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
98
- corner_embedding[:, 0, :] += self.point_embeddings[2].weight
99
- corner_embedding[:, 1, :] += self.point_embeddings[3].weight
100
- return corner_embedding
101
-
102
- def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
103
- """Embeds mask inputs."""
104
- mask_embedding = self.mask_downscaling(masks)
105
- return mask_embedding
106
-
107
- def _get_batch_size(
108
- self,
109
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
110
- boxes: Optional[torch.Tensor],
111
- masks: Optional[torch.Tensor],
112
- ) -> int:
113
- """
114
- Gets the batch size of the output given the batch size of the input prompts.
115
- """
116
- if points is not None:
117
- return points[0].shape[0]
118
- elif boxes is not None:
119
- return boxes.shape[0]
120
- elif masks is not None:
121
- return masks.shape[0]
122
- else:
123
- return 1
124
-
125
- def _get_device(self) -> torch.device:
126
- return self.point_embeddings[0].weight.device
127
-
128
- def forward(
129
- self,
130
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
131
- boxes: Optional[torch.Tensor],
132
- masks: Optional[torch.Tensor],
133
- ) -> Tuple[torch.Tensor, torch.Tensor]:
134
- """
135
- Embeds different types of prompts, returning both sparse and dense
136
- embeddings.
137
-
138
- Arguments:
139
- points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
140
- and labels to embed.
141
- boxes (torch.Tensor or none): boxes to embed
142
- masks (torch.Tensor or none): masks to embed
143
-
144
- Returns:
145
- torch.Tensor: sparse embeddings for the points and boxes, with shape
146
- BxNx(embed_dim), where N is determined by the number of input points
147
- and boxes.
148
- torch.Tensor: dense embeddings for the masks, in the shape
149
- Bx(embed_dim)x(embed_H)x(embed_W)
150
- """
151
- bs = self._get_batch_size(points, boxes, masks)
152
- sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
153
- if points is not None:
154
- coords, labels = points
155
- point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
156
- sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
157
- if boxes is not None:
158
- box_embeddings = self._embed_boxes(boxes)
159
- sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
160
-
161
- if masks is not None:
162
- dense_embeddings = self._embed_masks(masks)
163
- else:
164
- dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
165
- bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
166
- )
167
-
168
- return sparse_embeddings, dense_embeddings
169
-
170
-
171
- class PositionEmbeddingRandom(nn.Module):
172
- """
173
- Positional encoding using random spatial frequencies.
174
- """
175
-
176
- def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
177
- super().__init__()
178
- if scale is None or scale <= 0.0:
179
- scale = 1.0
180
- self.register_buffer(
181
- "positional_encoding_gaussian_matrix",
182
- scale * torch.randn((2, num_pos_feats)),
183
- )
184
-
185
- def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
186
- """Positionally encode points that are normalized to [0,1]."""
187
- # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
188
- coords = 2 * coords - 1
189
- coords = coords @ self.positional_encoding_gaussian_matrix
190
- coords = 2 * np.pi * coords
191
- # outputs d_1 x ... x d_n x C shape
192
- return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
193
-
194
- def forward(self, size: Tuple[int, int]) -> torch.Tensor:
195
- """Generate positional encoding for a grid of the specified size."""
196
- h, w = size
197
- device: Any = self.positional_encoding_gaussian_matrix.device
198
- grid = torch.ones((h, w), device=device, dtype=torch.float32)
199
- y_embed = grid.cumsum(dim=0) - 0.5
200
- x_embed = grid.cumsum(dim=1) - 0.5
201
- y_embed = y_embed / h
202
- x_embed = x_embed / w
203
-
204
- pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
205
- return pe.permute(2, 0, 1) # C x H x W
206
-
207
- def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor:
208
- """Positionally encode points that are not normalized to [0,1]."""
209
- coords = coords_input.clone()
210
- coords[:, :, 0] = coords[:, :, 0] / image_size[1]
211
- coords[:, :, 1] = coords[:, :, 1] / image_size[0]
212
- return self._pe_encoding(coords.to(torch.float)) # B x N x C
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/sam.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Any, Dict, List, Tuple
8
-
9
- import torch
10
- from torch import nn
11
- from torch.nn import functional as F
12
-
13
- from metaseg.modeling.image_encoder import ImageEncoderViT
14
- from metaseg.modeling.mask_decoder import MaskDecoder
15
- from metaseg.modeling.prompt_encoder import PromptEncoder
16
-
17
-
18
- class Sam(nn.Module):
19
- mask_threshold: float = 0.0
20
- image_format: str = "RGB"
21
-
22
- def __init__(
23
- self,
24
- image_encoder: ImageEncoderViT,
25
- prompt_encoder: PromptEncoder,
26
- mask_decoder: MaskDecoder,
27
- pixel_mean: List[float] = [123.675, 116.28, 103.53],
28
- pixel_std: List[float] = [58.395, 57.12, 57.375],
29
- ) -> None:
30
- """
31
- SAM predicts object masks from an image and input prompts.
32
-
33
- Arguments:
34
- image_encoder (ImageEncoderViT): The backbone used to encode the
35
- image into image embeddings that allow for efficient mask prediction.
36
- prompt_encoder (PromptEncoder): Encodes various types of input prompts.
37
- mask_decoder (MaskDecoder): Predicts masks from the image embeddings
38
- and encoded prompts.
39
- pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
40
- pixel_std (list(float)): Std values for normalizing pixels in the input image.
41
- """
42
- super().__init__()
43
- self.image_encoder = image_encoder
44
- self.prompt_encoder = prompt_encoder
45
- self.mask_decoder = mask_decoder
46
- self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
47
- self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
48
-
49
- @property
50
- def device(self) -> Any:
51
- return self.pixel_mean.device
52
-
53
- @torch.no_grad()
54
- def forward(
55
- self,
56
- batched_input: List[Dict[str, Any]],
57
- multimask_output: bool,
58
- ) -> List[Dict[str, torch.Tensor]]:
59
- """
60
- Predicts masks end-to-end from provided images and prompts.
61
- If prompts are not known in advance, using SamPredictor is
62
- recommended over calling the model directly.
63
-
64
- Arguments:
65
- batched_input (list(dict)): A list over input images, each a
66
- dictionary with the following keys. A prompt key can be
67
- excluded if it is not present.
68
- 'image': The image as a torch tensor in 3xHxW format,
69
- already transformed for input to the model.
70
- 'original_size': (tuple(int, int)) The original size of
71
- the image before transformation, as (H, W).
72
- 'point_coords': (torch.Tensor) Batched point prompts for
73
- this image, with shape BxNx2. Already transformed to the
74
- input frame of the model.
75
- 'point_labels': (torch.Tensor) Batched labels for point prompts,
76
- with shape BxN.
77
- 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
78
- Already transformed to the input frame of the model.
79
- 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
80
- in the form Bx1xHxW.
81
- multimask_output (bool): Whether the model should predict multiple
82
- disambiguating masks, or return a single mask.
83
-
84
- Returns:
85
- (list(dict)): A list over input images, where each element is
86
- as dictionary with the following keys.
87
- 'masks': (torch.Tensor) Batched binary mask predictions,
88
- with shape BxCxHxW, where B is the number of input promts,
89
- C is determiend by multimask_output, and (H, W) is the
90
- original size of the image.
91
- 'iou_predictions': (torch.Tensor) The model's predictions
92
- of mask quality, in shape BxC.
93
- 'low_res_logits': (torch.Tensor) Low resolution logits with
94
- shape BxCxHxW, where H=W=256. Can be passed as mask input
95
- to subsequent iterations of prediction.
96
- """
97
- input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
98
- image_embeddings = self.image_encoder(input_images)
99
-
100
- outputs = []
101
- for image_record, curr_embedding in zip(batched_input, image_embeddings):
102
- if "point_coords" in image_record:
103
- points = (image_record["point_coords"], image_record["point_labels"])
104
- else:
105
- points = None
106
- sparse_embeddings, dense_embeddings = self.prompt_encoder(
107
- points=points,
108
- boxes=image_record.get("boxes", None),
109
- masks=image_record.get("mask_inputs", None),
110
- )
111
- low_res_masks, iou_predictions = self.mask_decoder(
112
- image_embeddings=curr_embedding.unsqueeze(0),
113
- image_pe=self.prompt_encoder.get_dense_pe(),
114
- sparse_prompt_embeddings=sparse_embeddings,
115
- dense_prompt_embeddings=dense_embeddings,
116
- multimask_output=multimask_output,
117
- )
118
- masks = self.postprocess_masks(
119
- low_res_masks,
120
- input_size=image_record["image"].shape[-2:],
121
- original_size=image_record["original_size"],
122
- )
123
- masks = masks > self.mask_threshold
124
- outputs.append(
125
- {
126
- "masks": masks,
127
- "iou_predictions": iou_predictions,
128
- "low_res_logits": low_res_masks,
129
- }
130
- )
131
- return outputs
132
-
133
- def postprocess_masks(
134
- self,
135
- masks: torch.Tensor,
136
- input_size: Tuple[int, ...],
137
- original_size: Tuple[int, ...],
138
- ) -> torch.Tensor:
139
- """
140
- Remove padding and upscale masks to the original image size.
141
-
142
- Arguments:
143
- masks (torch.Tensor): Batched masks from the mask_decoder,
144
- in BxCxHxW format.
145
- input_size (tuple(int, int)): The size of the image input to the
146
- model, in (H, W) format. Used to remove padding.
147
- original_size (tuple(int, int)): The original size of the image
148
- before resizing for input to the model, in (H, W) format.
149
-
150
- Returns:
151
- (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
152
- is given by original_size.
153
- """
154
- masks = F.interpolate(
155
- masks,
156
- (self.image_encoder.img_size, self.image_encoder.img_size),
157
- mode="bilinear",
158
- align_corners=False,
159
- )
160
- masks = masks[..., : input_size[0], : input_size[1]]
161
- masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
162
- return masks
163
-
164
- def preprocess(self, x: torch.Tensor) -> torch.Tensor:
165
- """Normalize pixel values and pad to a square input."""
166
- # Normalize colors
167
- x = (x - self.pixel_mean) / self.pixel_std
168
-
169
- # Pad
170
- h, w = x.shape[-2:]
171
- padh = self.image_encoder.img_size - h
172
- padw = self.image_encoder.img_size - w
173
- x = F.pad(x, (0, padw, 0, padh))
174
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/modeling/transformer.py DELETED
@@ -1,232 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import math
8
- from typing import Tuple, Type
9
-
10
- import torch
11
- from torch import Tensor, nn
12
-
13
- from metaseg.modeling.common import MLPBlock
14
-
15
-
16
- class TwoWayTransformer(nn.Module):
17
- def __init__(
18
- self,
19
- depth: int,
20
- embedding_dim: int,
21
- num_heads: int,
22
- mlp_dim: int,
23
- activation: Type[nn.Module] = nn.ReLU,
24
- attention_downsample_rate: int = 2,
25
- ) -> None:
26
- """
27
- A transformer decoder that attends to an input image using
28
- queries whose positional embedding is supplied.
29
-
30
- Args:
31
- depth (int): number of layers in the transformer
32
- embedding_dim (int): the channel dimension for the input embeddings
33
- num_heads (int): the number of heads for multihead attention. Must
34
- divide embedding_dim
35
- mlp_dim (int): the channel dimension internal to the MLP block
36
- activation (nn.Module): the activation to use in the MLP block
37
- """
38
- super().__init__()
39
- self.depth = depth
40
- self.embedding_dim = embedding_dim
41
- self.num_heads = num_heads
42
- self.mlp_dim = mlp_dim
43
- self.layers = nn.ModuleList()
44
-
45
- for i in range(depth):
46
- self.layers.append(
47
- TwoWayAttentionBlock(
48
- embedding_dim=embedding_dim,
49
- num_heads=num_heads,
50
- mlp_dim=mlp_dim,
51
- activation=activation,
52
- attention_downsample_rate=attention_downsample_rate,
53
- skip_first_layer_pe=(i == 0),
54
- )
55
- )
56
-
57
- self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
58
- self.norm_final_attn = nn.LayerNorm(embedding_dim)
59
-
60
- def forward(
61
- self,
62
- image_embedding: Tensor,
63
- image_pe: Tensor,
64
- point_embedding: Tensor,
65
- ) -> Tuple[Tensor, Tensor]:
66
- """
67
- Args:
68
- image_embedding (torch.Tensor): image to attend to. Should be shape
69
- B x embedding_dim x h x w for any h and w.
70
- image_pe (torch.Tensor): the positional encoding to add to the image. Must
71
- have the same shape as image_embedding.
72
- point_embedding (torch.Tensor): the embedding to add to the query points.
73
- Must have shape B x N_points x embedding_dim for any N_points.
74
-
75
- Returns:
76
- torch.Tensor: the processed point_embedding
77
- torch.Tensor: the processed image_embedding
78
- """
79
- # BxCxHxW -> BxHWxC == B x N_image_tokens x C
80
- bs, c, h, w = image_embedding.shape
81
- image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
82
- image_pe = image_pe.flatten(2).permute(0, 2, 1)
83
-
84
- # Prepare queries
85
- queries = point_embedding
86
- keys = image_embedding
87
-
88
- # Apply transformer blocks and final layernorm
89
- for layer in self.layers:
90
- queries, keys = layer(
91
- queries=queries,
92
- keys=keys,
93
- query_pe=point_embedding,
94
- key_pe=image_pe,
95
- )
96
-
97
- # Apply the final attenion layer from the points to the image
98
- q = queries + point_embedding
99
- k = keys + image_pe
100
- attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
101
- queries = queries + attn_out
102
- queries = self.norm_final_attn(queries)
103
-
104
- return queries, keys
105
-
106
-
107
- class TwoWayAttentionBlock(nn.Module):
108
- def __init__(
109
- self,
110
- embedding_dim: int,
111
- num_heads: int,
112
- mlp_dim: int = 2048,
113
- activation: Type[nn.Module] = nn.ReLU,
114
- attention_downsample_rate: int = 2,
115
- skip_first_layer_pe: bool = False,
116
- ) -> None:
117
- """
118
- A transformer block with four layers: (1) self-attention of sparse
119
- inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
120
- block on sparse inputs, and (4) cross attention of dense inputs to sparse
121
- inputs.
122
-
123
- Arguments:
124
- embedding_dim (int): the channel dimension of the embeddings
125
- num_heads (int): the number of heads in the attention layers
126
- mlp_dim (int): the hidden dimension of the mlp block
127
- activation (nn.Module): the activation of the mlp block
128
- skip_first_layer_pe (bool): skip the PE on the first layer
129
- """
130
- super().__init__()
131
- self.self_attn = Attention(embedding_dim, num_heads)
132
- self.norm1 = nn.LayerNorm(embedding_dim)
133
-
134
- self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
135
- self.norm2 = nn.LayerNorm(embedding_dim)
136
-
137
- self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
138
- self.norm3 = nn.LayerNorm(embedding_dim)
139
-
140
- self.norm4 = nn.LayerNorm(embedding_dim)
141
- self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
142
-
143
- self.skip_first_layer_pe = skip_first_layer_pe
144
-
145
- def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:
146
- # Self attention block
147
- if self.skip_first_layer_pe:
148
- queries = self.self_attn(q=queries, k=queries, v=queries)
149
- else:
150
- q = queries + query_pe
151
- attn_out = self.self_attn(q=q, k=q, v=queries)
152
- queries = queries + attn_out
153
- queries = self.norm1(queries)
154
-
155
- # Cross attention block, tokens attending to image embedding
156
- q = queries + query_pe
157
- k = keys + key_pe
158
- attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
159
- queries = queries + attn_out
160
- queries = self.norm2(queries)
161
-
162
- # MLP block
163
- mlp_out = self.mlp(queries)
164
- queries = queries + mlp_out
165
- queries = self.norm3(queries)
166
-
167
- # Cross attention block, image embedding attending to tokens
168
- q = queries + query_pe
169
- k = keys + key_pe
170
- attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
171
- keys = keys + attn_out
172
- keys = self.norm4(keys)
173
-
174
- return queries, keys
175
-
176
-
177
- class Attention(nn.Module):
178
- """
179
- An attention layer that allows for downscaling the size of the embedding
180
- after projection to queries, keys, and values.
181
- """
182
-
183
- def __init__(
184
- self,
185
- embedding_dim: int,
186
- num_heads: int,
187
- downsample_rate: int = 1,
188
- ) -> None:
189
- super().__init__()
190
- self.embedding_dim = embedding_dim
191
- self.internal_dim = embedding_dim // downsample_rate
192
- self.num_heads = num_heads
193
- assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
194
-
195
- self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
196
- self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
197
- self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
198
- self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
199
-
200
- def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
201
- b, n, c = x.shape
202
- x = x.reshape(b, n, num_heads, c // num_heads)
203
- return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
204
-
205
- def _recombine_heads(self, x: Tensor) -> Tensor:
206
- b, n_heads, n_tokens, c_per_head = x.shape
207
- x = x.transpose(1, 2)
208
- return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
209
-
210
- def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
211
- # Input projections
212
- q = self.q_proj(q)
213
- k = self.k_proj(k)
214
- v = self.v_proj(v)
215
-
216
- # Separate into heads
217
- q = self._separate_heads(q, self.num_heads)
218
- k = self._separate_heads(k, self.num_heads)
219
- v = self._separate_heads(v, self.num_heads)
220
-
221
- # Attention
222
- _, _, _, c_per_head = q.shape
223
- attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
224
- attn = attn / math.sqrt(c_per_head)
225
- attn = torch.softmax(attn, dim=-1)
226
-
227
- # Get output
228
- out = attn @ v
229
- out = self._recombine_heads(out)
230
- out = self.out_proj(out)
231
-
232
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/predictor.py DELETED
@@ -1,264 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Optional, Tuple
8
-
9
- import numpy as np
10
- import torch
11
-
12
- from metaseg.modeling import Sam
13
- from metaseg.utils.transforms import ResizeLongestSide
14
-
15
-
16
- class SamPredictor:
17
- def __init__(
18
- self,
19
- sam_model: Sam,
20
- ) -> None:
21
- """
22
- Uses SAM to calculate the image embedding for an image, and then
23
- allow repeated, efficient mask prediction given prompts.
24
-
25
- Arguments:
26
- sam_model (Sam): The model to use for mask prediction.
27
- """
28
- super().__init__()
29
- self.model = sam_model
30
- self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
31
- self.reset_image()
32
-
33
- def set_image(
34
- self,
35
- image: np.ndarray,
36
- image_format: str = "RGB",
37
- ) -> None:
38
- """
39
- Calculates the image embeddings for the provided image, allowing
40
- masks to be predicted with the 'predict' method.
41
-
42
- Arguments:
43
- image (np.ndarray): The image for calculating masks. Expects an
44
- image in HWC uint8 format, with pixel values in [0, 255].
45
- image_format (str): The color format of the image, in ['RGB', 'BGR'].
46
- """
47
- assert image_format in [
48
- "RGB",
49
- "BGR",
50
- ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
51
- if image_format != self.model.image_format:
52
- image = image[..., ::-1]
53
-
54
- # Transform the image to the form expected by the model
55
- input_image = self.transform.apply_image(image)
56
- input_image_torch = torch.as_tensor(input_image, device=self.device)
57
- input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
58
-
59
- self.set_torch_image(input_image_torch, image.shape[:2])
60
-
61
- @torch.no_grad()
62
- def set_torch_image(
63
- self,
64
- transformed_image: torch.Tensor,
65
- original_image_size: Tuple[int, ...],
66
- ) -> None:
67
- """
68
- Calculates the image embeddings for the provided image, allowing
69
- masks to be predicted with the 'predict' method. Expects the input
70
- image to be already transformed to the format expected by the model.
71
-
72
- Arguments:
73
- transformed_image (torch.Tensor): The input image, with shape
74
- 1x3xHxW, which has been transformed with ResizeLongestSide.
75
- original_image_size (tuple(int, int)): The size of the image
76
- before transformation, in (H, W) format.
77
- """
78
- assert (
79
- len(transformed_image.shape) == 4
80
- and transformed_image.shape[1] == 3
81
- and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
82
- ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
83
- self.reset_image()
84
-
85
- self.original_size = original_image_size
86
- self.input_size = tuple(transformed_image.shape[-2:])
87
- input_image = self.model.preprocess(transformed_image)
88
- self.features = self.model.image_encoder(input_image)
89
- self.is_image_set = True
90
-
91
- def predict(
92
- self,
93
- point_coords: Optional[np.ndarray] = None,
94
- point_labels: Optional[np.ndarray] = None,
95
- box: Optional[np.ndarray] = None,
96
- mask_input: Optional[np.ndarray] = None,
97
- multimask_output: bool = True,
98
- return_logits: bool = False,
99
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
100
- """
101
- Predict masks for the given input prompts, using the currently set image.
102
-
103
- Arguments:
104
- point_coords (np.ndarray or None): A Nx2 array of point prompts to the
105
- model. Each point is in (X,Y) in pixels.
106
- point_labels (np.ndarray or None): A length N array of labels for the
107
- point prompts. 1 indicates a foreground point and 0 indicates a
108
- background point.
109
- box (np.ndarray or None): A length 4 array given a box prompt to the
110
- model, in XYXY format.
111
- mask_input (np.ndarray): A low resolution mask input to the model, typically
112
- coming from a previous prediction iteration. Has form 1xHxW, where
113
- for SAM, H=W=256.
114
- multimask_output (bool): If true, the model will return three masks.
115
- For ambiguous input prompts (such as a single click), this will often
116
- produce better masks than a single prediction. If only a single
117
- mask is needed, the model's predicted quality score can be used
118
- to select the best mask. For non-ambiguous prompts, such as multiple
119
- input prompts, multimask_output=False can give better results.
120
- return_logits (bool): If true, returns un-thresholded masks logits
121
- instead of a binary mask.
122
-
123
- Returns:
124
- (np.ndarray): The output masks in CxHxW format, where C is the
125
- number of masks, and (H, W) is the original image size.
126
- (np.ndarray): An array of length C containing the model's
127
- predictions for the quality of each mask.
128
- (np.ndarray): An array of shape CxHxW, where C is the number
129
- of masks and H=W=256. These low resolution logits can be passed to
130
- a subsequent iteration as mask input.
131
- """
132
- if not self.is_image_set:
133
- raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
134
-
135
- # Transform input prompts
136
- coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
137
- if point_coords is not None:
138
- assert point_labels is not None, "point_labels must be supplied if point_coords is supplied."
139
- point_coords = self.transform.apply_coords(point_coords, self.original_size)
140
- coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
141
- labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
142
- coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
143
- if box is not None:
144
- box = self.transform.apply_boxes(box, self.original_size)
145
- box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
146
- box_torch = box_torch[None, :]
147
- if mask_input is not None:
148
- mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
149
- mask_input_torch = mask_input_torch[None, :, :, :]
150
-
151
- masks, iou_predictions, low_res_masks = self.predict_torch(
152
- coords_torch,
153
- labels_torch,
154
- box_torch,
155
- mask_input_torch,
156
- multimask_output,
157
- return_logits=return_logits,
158
- )
159
-
160
- masks = masks[0].detach().cpu().numpy()
161
- iou_predictions = iou_predictions[0].detach().cpu().numpy()
162
- low_res_masks = low_res_masks[0].detach().cpu().numpy()
163
- return masks, iou_predictions, low_res_masks
164
-
165
- @torch.no_grad()
166
- def predict_torch(
167
- self,
168
- point_coords: Optional[torch.Tensor],
169
- point_labels: Optional[torch.Tensor],
170
- boxes: Optional[torch.Tensor] = None,
171
- mask_input: Optional[torch.Tensor] = None,
172
- multimask_output: bool = True,
173
- return_logits: bool = False,
174
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
175
- """
176
- Predict masks for the given input prompts, using the currently set image.
177
- Input prompts are batched torch tensors and are expected to already be
178
- transformed to the input frame using ResizeLongestSide.
179
-
180
- Arguments:
181
- point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
182
- model. Each point is in (X,Y) in pixels.
183
- point_labels (torch.Tensor or None): A BxN array of labels for the
184
- point prompts. 1 indicates a foreground point and 0 indicates a
185
- background point.
186
- box (np.ndarray or None): A Bx4 array given a box prompt to the
187
- model, in XYXY format.
188
- mask_input (np.ndarray): A low resolution mask input to the model, typically
189
- coming from a previous prediction iteration. Has form Bx1xHxW, where
190
- for SAM, H=W=256. Masks returned by a previous iteration of the
191
- predict method do not need further transformation.
192
- multimask_output (bool): If true, the model will return three masks.
193
- For ambiguous input prompts (such as a single click), this will often
194
- produce better masks than a single prediction. If only a single
195
- mask is needed, the model's predicted quality score can be used
196
- to select the best mask. For non-ambiguous prompts, such as multiple
197
- input prompts, multimask_output=False can give better results.
198
- return_logits (bool): If true, returns un-thresholded masks logits
199
- instead of a binary mask.
200
-
201
- Returns:
202
- (torch.Tensor): The output masks in BxCxHxW format, where C is the
203
- number of masks, and (H, W) is the original image size.
204
- (torch.Tensor): An array of shape BxC containing the model's
205
- predictions for the quality of each mask.
206
- (torch.Tensor): An array of shape BxCxHxW, where C is the number
207
- of masks and H=W=256. These low res logits can be passed to
208
- a subsequent iteration as mask input.
209
- """
210
- if not self.is_image_set:
211
- raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
212
-
213
- if point_coords is not None:
214
- points = (point_coords, point_labels)
215
- else:
216
- points = None
217
-
218
- # Embed prompts
219
- sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
220
- points=points,
221
- boxes=boxes,
222
- masks=mask_input,
223
- )
224
-
225
- # Predict masks
226
- low_res_masks, iou_predictions = self.model.mask_decoder(
227
- image_embeddings=self.features,
228
- image_pe=self.model.prompt_encoder.get_dense_pe(),
229
- sparse_prompt_embeddings=sparse_embeddings,
230
- dense_prompt_embeddings=dense_embeddings,
231
- multimask_output=multimask_output,
232
- )
233
-
234
- # Upscale the masks to the original image resolution
235
- masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
236
-
237
- if not return_logits:
238
- masks = masks > self.model.mask_threshold
239
-
240
- return masks, iou_predictions, low_res_masks
241
-
242
- def get_image_embedding(self) -> torch.Tensor:
243
- """
244
- Returns the image embeddings for the currently set image, with
245
- shape 1xCxHxW, where C is the embedding dimension and (H,W) are
246
- the embedding spatial dimension of SAM (typically C=256, H=W=64).
247
- """
248
- if not self.is_image_set:
249
- raise RuntimeError("An image must be set with .set_image(...) to generate an embedding.")
250
- assert self.features is not None, "Features must exist if an image has been set."
251
- return self.features
252
-
253
- @property
254
- def device(self) -> torch.device:
255
- return self.model.device
256
-
257
- def reset_image(self) -> None:
258
- """Resets the currently set image."""
259
- self.is_image_set = False
260
- self.features = None
261
- self.orig_h = None
262
- self.orig_w = None
263
- self.input_h = None
264
- self.input_w = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/utils/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
metaseg/utils/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (159 Bytes)
 
metaseg/utils/__pycache__/amg.cpython-310.pyc DELETED
Binary file (12 kB)
 
metaseg/utils/__pycache__/file.cpython-310.pyc DELETED
Binary file (1.16 kB)
 
metaseg/utils/__pycache__/transforms.cpython-310.pyc DELETED
Binary file (3.9 kB)
 
metaseg/utils/amg.py DELETED
@@ -1,330 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import math
8
- from copy import deepcopy
9
- from itertools import product
10
- from typing import Any, Dict, Generator, ItemsView, List, Tuple
11
-
12
- import numpy as np
13
- import torch
14
-
15
-
16
- class MaskData:
17
- """
18
- A structure for storing masks and their related data in batched format.
19
- Implements basic filtering and concatenation.
20
- """
21
-
22
- def __init__(self, **kwargs) -> None:
23
- for v in kwargs.values():
24
- assert isinstance(
25
- v, (list, np.ndarray, torch.Tensor)
26
- ), "MaskData only supports list, numpy arrays, and torch tensors."
27
- self._stats = dict(**kwargs)
28
-
29
- def __setitem__(self, key: str, item: Any) -> None:
30
- assert isinstance(
31
- item, (list, np.ndarray, torch.Tensor)
32
- ), "MaskData only supports list, numpy arrays, and torch tensors."
33
- self._stats[key] = item
34
-
35
- def __delitem__(self, key: str) -> None:
36
- del self._stats[key]
37
-
38
- def __getitem__(self, key: str) -> Any:
39
- return self._stats[key]
40
-
41
- def items(self) -> ItemsView[str, Any]:
42
- return self._stats.items()
43
-
44
- def filter(self, keep: torch.Tensor) -> None:
45
- for k, v in self._stats.items():
46
- if v is None:
47
- self._stats[k] = None
48
- elif isinstance(v, torch.Tensor):
49
- self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
50
- elif isinstance(v, np.ndarray):
51
- self._stats[k] = v[keep.detach().cpu().numpy()]
52
- elif isinstance(v, list) and keep.dtype == torch.bool:
53
- self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
54
- elif isinstance(v, list):
55
- self._stats[k] = [v[i] for i in keep]
56
- else:
57
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
58
-
59
- def cat(self, new_stats: "MaskData") -> None:
60
- for k, v in new_stats.items():
61
- if k not in self._stats or self._stats[k] is None:
62
- self._stats[k] = deepcopy(v)
63
- elif isinstance(v, torch.Tensor):
64
- self._stats[k] = torch.cat([self._stats[k], v], dim=0)
65
- elif isinstance(v, np.ndarray):
66
- self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
67
- elif isinstance(v, list):
68
- self._stats[k] = self._stats[k] + deepcopy(v)
69
- else:
70
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
71
-
72
- def to_numpy(self) -> None:
73
- for k, v in self._stats.items():
74
- if isinstance(v, torch.Tensor):
75
- self._stats[k] = v.detach().cpu().numpy()
76
-
77
-
78
- def is_box_near_crop_edge(
79
- boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
80
- ) -> torch.Tensor:
81
- """Filter masks at the edge of a crop, but not at the edge of the original image."""
82
- crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
83
- orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
84
- boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
85
- near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
86
- near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
87
- near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
88
- return torch.any(near_crop_edge, dim=1)
89
-
90
-
91
- def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
92
- box_xywh = deepcopy(box_xyxy)
93
- box_xywh[2] = box_xywh[2] - box_xywh[0]
94
- box_xywh[3] = box_xywh[3] - box_xywh[1]
95
- return box_xywh
96
-
97
-
98
- def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
99
- assert len(args) > 0 and all(
100
- len(a) == len(args[0]) for a in args
101
- ), "Batched iteration must have inputs of all the same size."
102
- n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
103
- for b in range(n_batches):
104
- yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
105
-
106
-
107
- def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
108
- """
109
- Encodes masks to an uncompressed RLE, in the format expected by
110
- pycoco tools.
111
- """
112
- # Put in fortran order and flatten h,w
113
- b, h, w = tensor.shape
114
- tensor = tensor.permute(0, 2, 1).flatten(1)
115
-
116
- # Compute change indices
117
- diff = tensor[:, 1:] ^ tensor[:, :-1]
118
- change_indices = diff.nonzero()
119
-
120
- # Encode run length
121
- out = []
122
- for i in range(b):
123
- cur_idxs = change_indices[change_indices[:, 0] == i, 1]
124
- cur_idxs = torch.cat(
125
- [
126
- torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
127
- cur_idxs + 1,
128
- torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
129
- ]
130
- )
131
- btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
132
- counts = [] if tensor[i, 0] == 0 else [0]
133
- counts.extend(btw_idxs.detach().cpu().tolist())
134
- out.append({"size": [h, w], "counts": counts})
135
- return out
136
-
137
-
138
- def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
139
- """Compute a binary mask from an uncompressed RLE."""
140
- h, w = rle["size"]
141
- mask = np.empty(h * w, dtype=bool)
142
- idx = 0
143
- parity = False
144
- for count in rle["counts"]:
145
- mask[idx : idx + count] = parity
146
- idx += count
147
- parity ^= True
148
- mask = mask.reshape(w, h)
149
- return mask.transpose() # Put in C order
150
-
151
-
152
- def area_from_rle(rle: Dict[str, Any]) -> int:
153
- return sum(rle["counts"][1::2])
154
-
155
-
156
- def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
157
- """
158
- Computes the stability score for a batch of masks. The stability
159
- score is the IoU between the binary masks obtained by thresholding
160
- the predicted mask logits at high and low values.
161
- """
162
- # One mask is always contained inside the other.
163
- # Save memory by preventing unnecesary cast to torch.int64
164
- intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
165
- unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
166
- return intersections / unions
167
-
168
-
169
- def build_point_grid(n_per_side: int) -> np.ndarray:
170
- """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
171
- offset = 1 / (2 * n_per_side)
172
- points_one_side = np.linspace(offset, 1 - offset, n_per_side)
173
- points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
174
- points_y = np.tile(points_one_side[:, None], (1, n_per_side))
175
- points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
176
- return points
177
-
178
-
179
- def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:
180
- """Generates point grids for all crop layers."""
181
- points_by_layer = []
182
- for i in range(n_layers + 1):
183
- n_points = int(n_per_side / (scale_per_layer ** i))
184
- points_by_layer.append(build_point_grid(n_points))
185
- return points_by_layer
186
-
187
-
188
- def generate_crop_boxes(
189
- im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
190
- ) -> Tuple[List[List[int]], List[int]]:
191
- """
192
- Generates a list of crop boxes of different sizes. Each layer
193
- has (2**i)**2 boxes for the ith layer.
194
- """
195
- crop_boxes, layer_idxs = [], []
196
- im_h, im_w = im_size
197
- short_side = min(im_h, im_w)
198
-
199
- # Original image
200
- crop_boxes.append([0, 0, im_w, im_h])
201
- layer_idxs.append(0)
202
-
203
- def crop_len(orig_len, n_crops, overlap):
204
- return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
205
-
206
- for i_layer in range(n_layers):
207
- n_crops_per_side = 2 ** (i_layer + 1)
208
- overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
209
-
210
- crop_w = crop_len(im_w, n_crops_per_side, overlap)
211
- crop_h = crop_len(im_h, n_crops_per_side, overlap)
212
-
213
- crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
214
- crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
215
-
216
- # Crops in XYWH format
217
- for x0, y0 in product(crop_box_x0, crop_box_y0):
218
- box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
219
- crop_boxes.append(box)
220
- layer_idxs.append(i_layer + 1)
221
-
222
- return crop_boxes, layer_idxs
223
-
224
-
225
- def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
226
- x0, y0, _, _ = crop_box
227
- offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
228
- # Check if boxes has a channel dimension
229
- if len(boxes.shape) == 3:
230
- offset = offset.unsqueeze(1)
231
- return boxes + offset
232
-
233
-
234
- def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
235
- x0, y0, _, _ = crop_box
236
- offset = torch.tensor([[x0, y0]], device=points.device)
237
- # Check if points has a channel dimension
238
- if len(points.shape) == 3:
239
- offset = offset.unsqueeze(1)
240
- return points + offset
241
-
242
-
243
- def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:
244
- x0, y0, x1, y1 = crop_box
245
- if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
246
- return masks
247
- # Coordinate transform masks
248
- pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
249
- pad = (x0, pad_x - x0, y0, pad_y - y0)
250
- return torch.nn.functional.pad(masks, pad, value=0)
251
-
252
-
253
- def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:
254
- """
255
- Removes small disconnected regions and holes in a mask. Returns the
256
- mask and an indicator of if the mask has been modified.
257
- """
258
- import cv2 # type: ignore
259
-
260
- assert mode in ["holes", "islands"]
261
- correct_holes = mode == "holes"
262
- working_mask = (correct_holes ^ mask).astype(np.uint8)
263
- n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
264
- sizes = stats[:, -1][1:] # Row 0 is background label
265
- small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
266
- if len(small_regions) == 0:
267
- return mask, False
268
- fill_labels = [0] + small_regions
269
- if not correct_holes:
270
- fill_labels = [i for i in range(n_labels) if i not in fill_labels]
271
- # If every region is below threshold, keep largest
272
- if len(fill_labels) == 0:
273
- fill_labels = [int(np.argmax(sizes)) + 1]
274
- mask = np.isin(regions, fill_labels)
275
- return mask, True
276
-
277
-
278
- def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
279
- from pycocotools import mask as mask_utils # type: ignore
280
-
281
- h, w = uncompressed_rle["size"]
282
- rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
283
- rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
284
- return rle
285
-
286
-
287
- def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
288
- """
289
- Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
290
- an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
291
- """
292
- # torch.max below raises an error on empty inputs, just skip in this case
293
- if torch.numel(masks) == 0:
294
- return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
295
-
296
- # Normalize shape to CxHxW
297
- shape = masks.shape
298
- h, w = shape[-2:]
299
- if len(shape) > 2:
300
- masks = masks.flatten(0, -3)
301
- else:
302
- masks = masks.unsqueeze(0)
303
-
304
- # Get top and bottom edges
305
- in_height, _ = torch.max(masks, dim=-1)
306
- in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
307
- bottom_edges, _ = torch.max(in_height_coords, dim=-1)
308
- in_height_coords = in_height_coords + h * (~in_height)
309
- top_edges, _ = torch.min(in_height_coords, dim=-1)
310
-
311
- # Get left and right edges
312
- in_width, _ = torch.max(masks, dim=-2)
313
- in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
314
- right_edges, _ = torch.max(in_width_coords, dim=-1)
315
- in_width_coords = in_width_coords + w * (~in_width)
316
- left_edges, _ = torch.min(in_width_coords, dim=-1)
317
-
318
- # If the mask is empty the right edge will be to the left of the left edge.
319
- # Replace these boxes with [0, 0, 0, 0]
320
- empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
321
- out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
322
- out = out * (~empty_filter).unsqueeze(-1)
323
-
324
- # Return to original shape
325
- if len(shape) > 2:
326
- out = out.reshape(*shape[:-2], 4)
327
- else:
328
- out = out[0]
329
-
330
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/utils/file.py DELETED
@@ -1,32 +0,0 @@
1
- import os
2
- import urllib.request
3
-
4
-
5
- def download_model(model_type):
6
- """
7
- model_type: str, A string representing the model type. It can be 'vit_h', 'vit_l', or 'vit_b'.
8
- """
9
-
10
- # A dictionary containing model types as keys and their respective URLs as values
11
- model_urls = {
12
- "vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
13
- "vit_l": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth",
14
- "vit_b": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth",
15
- }
16
-
17
- # Check if the model file already exists and model_type is in model_urls
18
- filename = f"{model_type}.pth"
19
- if not os.path.exists(filename) and model_type in model_urls:
20
- url = model_urls[model_type]
21
- print(f"Downloading {model_type} model from {url}...")
22
- urllib.request.urlretrieve(url, filename)
23
- print(f"{model_type} model has been successfully downloaded and saved as '{filename}'.")
24
- elif os.path.exists(filename):
25
- print(f"{model_type} model already exists as '{filename}'. Skipping download.")
26
- else:
27
- raise ValueError("Invalid model type. It should be 'vit_h', 'vit_l', or 'vit_b'.")
28
-
29
- return filename
30
-
31
-
32
- download_model("vit_b")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/utils/onnx.py DELETED
@@ -1,138 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Tuple
8
-
9
- import torch
10
- import torch.nn as nn
11
- from torch.nn import functional as F
12
-
13
- from metaseg.modeling import Sam
14
- from metaseg.utils.amg import calculate_stability_score
15
-
16
-
17
- class SamOnnxModel(nn.Module):
18
- """
19
- This model should not be called directly, but is used in ONNX export.
20
- It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
21
- with some functions modified to enable model tracing. Also supports extra
22
- options controlling what information. See the ONNX export script for details.
23
- """
24
-
25
- def __init__(
26
- self,
27
- model: Sam,
28
- return_single_mask: bool,
29
- use_stability_score: bool = False,
30
- return_extra_metrics: bool = False,
31
- ) -> None:
32
- super().__init__()
33
- self.mask_decoder = model.mask_decoder
34
- self.model = model
35
- self.img_size = model.image_encoder.img_size
36
- self.return_single_mask = return_single_mask
37
- self.use_stability_score = use_stability_score
38
- self.stability_score_offset = 1.0
39
- self.return_extra_metrics = return_extra_metrics
40
-
41
- @staticmethod
42
- def resize_longest_image_size(input_image_size: torch.Tensor, longest_side: int) -> torch.Tensor:
43
- input_image_size = input_image_size.to(torch.float32)
44
- scale = longest_side / torch.max(input_image_size)
45
- transformed_size = scale * input_image_size
46
- transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
47
- return transformed_size
48
-
49
- def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
50
- point_coords = point_coords + 0.5
51
- point_coords = point_coords / self.img_size
52
- point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
53
- point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
54
-
55
- point_embedding = point_embedding * (point_labels != -1)
56
- point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (point_labels == -1)
57
-
58
- for i in range(self.model.prompt_encoder.num_point_embeddings):
59
- point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[i].weight * (
60
- point_labels == i
61
- )
62
-
63
- return point_embedding
64
-
65
- def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
66
- mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
67
- mask_embedding = mask_embedding + (1 - has_mask_input) * self.model.prompt_encoder.no_mask_embed.weight.reshape(
68
- 1, -1, 1, 1
69
- )
70
- return mask_embedding
71
-
72
- def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
73
- masks = F.interpolate(
74
- masks,
75
- size=(self.img_size, self.img_size),
76
- mode="bilinear",
77
- align_corners=False,
78
- )
79
-
80
- prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
81
- masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
82
-
83
- orig_im_size = orig_im_size.to(torch.int64)
84
- h, w = orig_im_size[0], orig_im_size[1]
85
- masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
86
- return masks
87
-
88
- def select_masks(
89
- self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
90
- ) -> Tuple[torch.Tensor, torch.Tensor]:
91
- # Determine if we should return the multiclick mask or not from the number of points.
92
- # The reweighting is used to avoid control flow.
93
- score_reweight = torch.tensor([[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]).to(
94
- iou_preds.device
95
- )
96
- score = iou_preds + (num_points - 2.5) * score_reweight
97
- best_idx = torch.argmax(score, dim=1)
98
- masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
99
- iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
100
-
101
- return masks, iou_preds
102
-
103
- @torch.no_grad()
104
- def forward(
105
- self,
106
- image_embeddings: torch.Tensor,
107
- point_coords: torch.Tensor,
108
- point_labels: torch.Tensor,
109
- mask_input: torch.Tensor,
110
- has_mask_input: torch.Tensor,
111
- orig_im_size: torch.Tensor,
112
- ):
113
- sparse_embedding = self._embed_points(point_coords, point_labels)
114
- dense_embedding = self._embed_masks(mask_input, has_mask_input)
115
-
116
- masks, scores = self.model.mask_decoder.predict_masks(
117
- image_embeddings=image_embeddings,
118
- image_pe=self.model.prompt_encoder.get_dense_pe(),
119
- sparse_prompt_embeddings=sparse_embedding,
120
- dense_prompt_embeddings=dense_embedding,
121
- )
122
-
123
- if self.use_stability_score:
124
- scores = calculate_stability_score(masks, self.model.mask_threshold, self.stability_score_offset)
125
-
126
- if self.return_single_mask:
127
- masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
128
-
129
- upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
130
-
131
- if self.return_extra_metrics:
132
- stability_scores = calculate_stability_score(
133
- upscaled_masks, self.model.mask_threshold, self.stability_score_offset
134
- )
135
- areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
136
- return upscaled_masks, scores, stability_scores, areas, masks
137
-
138
- return upscaled_masks, scores, masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metaseg/utils/transforms.py DELETED
@@ -1,92 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from copy import deepcopy
8
- from typing import Tuple
9
-
10
- import numpy as np
11
- import torch
12
- from torch.nn import functional as F
13
- from torchvision.transforms.functional import resize, to_pil_image # type: ignore
14
-
15
-
16
- class ResizeLongestSide:
17
- """
18
- Resizes images to longest side 'target_length', as well as provides
19
- methods for resizing coordinates and boxes. Provides methods for
20
- transforming both numpy array and batched torch tensors.
21
- """
22
-
23
- def __init__(self, target_length: int) -> None:
24
- self.target_length = target_length
25
-
26
- def apply_image(self, image: np.ndarray) -> np.ndarray:
27
- """
28
- Expects a numpy array with shape HxWxC in uint8 format.
29
- """
30
- target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
31
- return np.array(resize(to_pil_image(image), target_size))
32
-
33
- def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
34
- """
35
- Expects a numpy array of length 2 in the final dimension. Requires the
36
- original image size in (H, W) format.
37
- """
38
- old_h, old_w = original_size
39
- new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length)
40
- coords = deepcopy(coords).astype(float)
41
- coords[..., 0] = coords[..., 0] * (new_w / old_w)
42
- coords[..., 1] = coords[..., 1] * (new_h / old_h)
43
- return coords
44
-
45
- def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
46
- """
47
- Expects a numpy array shape Bx4. Requires the original image size
48
- in (H, W) format.
49
- """
50
- boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
51
- return boxes.reshape(-1, 4)
52
-
53
- def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
54
- """
55
- Expects batched images with shape BxCxHxW and float format. This
56
- transformation may not exactly match apply_image. apply_image is
57
- the transformation expected by the model.
58
- """
59
- # Expects an image in BCHW format. May not exactly match apply_image.
60
- target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
61
- return F.interpolate(image, target_size, mode="bilinear", align_corners=False, antialias=True)
62
-
63
- def apply_coords_torch(self, coords: torch.Tensor, original_size: Tuple[int, ...]) -> torch.Tensor:
64
- """
65
- Expects a torch tensor with length 2 in the last dimension. Requires the
66
- original image size in (H, W) format.
67
- """
68
- old_h, old_w = original_size
69
- new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length)
70
- coords = deepcopy(coords).to(torch.float)
71
- coords[..., 0] = coords[..., 0] * (new_w / old_w)
72
- coords[..., 1] = coords[..., 1] * (new_h / old_h)
73
- return coords
74
-
75
- def apply_boxes_torch(self, boxes: torch.Tensor, original_size: Tuple[int, ...]) -> torch.Tensor:
76
- """
77
- Expects a torch tensor with shape Bx4. Requires the original image
78
- size in (H, W) format.
79
- """
80
- boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
81
- return boxes.reshape(-1, 4)
82
-
83
- @staticmethod
84
- def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
85
- """
86
- Compute the output size given input size and target long side length.
87
- """
88
- scale = long_side_length * 1.0 / max(oldh, oldw)
89
- newh, neww = oldh * scale, oldw * scale
90
- neww = int(neww + 0.5)
91
- newh = int(newh + 0.5)
92
- return (newh, neww)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,8 +1,4 @@
1
- onnxruntime
2
- pycocotools
3
- torch>=1.7
4
- torchvision>=0.8
5
- opencv-python
6
 
7
  # code formatting
8
  black==21.7b0
 
1
+ metaseg==0.3.0
 
 
 
 
2
 
3
  # code formatting
4
  black==21.7b0
scripts/amg.py DELETED
@@ -1,233 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import argparse
8
- import json
9
- import os
10
- from typing import Any, Dict, List
11
-
12
- import cv2 # type: ignore
13
-
14
- from metaseg import SamAutomaticMaskGenerator, sam_model_registry
15
-
16
- parser = argparse.ArgumentParser(
17
- description=(
18
- "Runs automatic mask generation on an input image or directory of images, "
19
- "and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
20
- "as well as pycocotools if saving in RLE format."
21
- )
22
- )
23
-
24
- parser.add_argument(
25
- "--input",
26
- type=str,
27
- required=True,
28
- help="Path to either a single input image or folder of images.",
29
- )
30
-
31
- parser.add_argument(
32
- "--output",
33
- type=str,
34
- required=True,
35
- help=(
36
- "Path to the directory where masks will be output. Output will be either a folder "
37
- "of PNGs per image or a single json with COCO-style masks."
38
- ),
39
- )
40
-
41
- parser.add_argument(
42
- "--model-type",
43
- type=str,
44
- default="default",
45
- help="The type of model to load, in ['default', 'vit_l', 'vit_b']",
46
- )
47
-
48
- parser.add_argument(
49
- "--checkpoint",
50
- type=str,
51
- required=True,
52
- help="The path to the SAM checkpoint to use for mask generation.",
53
- )
54
-
55
- parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
56
-
57
- parser.add_argument(
58
- "--convert-to-rle",
59
- action="store_true",
60
- help=("Save masks as COCO RLEs in a single json instead of as a folder of PNGs. " "Requires pycocotools."),
61
- )
62
-
63
- amg_settings = parser.add_argument_group("AMG Settings")
64
-
65
- amg_settings.add_argument(
66
- "--points-per-side",
67
- type=int,
68
- default=None,
69
- help="Generate masks by sampling a grid over the image with this many points to a side.",
70
- )
71
-
72
- amg_settings.add_argument(
73
- "--points-per-batch",
74
- type=int,
75
- default=None,
76
- help="How many input points to process simultaneously in one batch.",
77
- )
78
-
79
- amg_settings.add_argument(
80
- "--pred-iou-thresh",
81
- type=float,
82
- default=None,
83
- help="Exclude masks with a predicted score from the model that is lower than this threshold.",
84
- )
85
-
86
- amg_settings.add_argument(
87
- "--stability-score-thresh",
88
- type=float,
89
- default=None,
90
- help="Exclude masks with a stability score lower than this threshold.",
91
- )
92
-
93
- amg_settings.add_argument(
94
- "--stability-score-offset",
95
- type=float,
96
- default=None,
97
- help="Larger values perturb the mask more when measuring stability score.",
98
- )
99
-
100
- amg_settings.add_argument(
101
- "--box-nms-thresh",
102
- type=float,
103
- default=None,
104
- help="The overlap threshold for excluding a duplicate mask.",
105
- )
106
-
107
- amg_settings.add_argument(
108
- "--crop-n-layers",
109
- type=int,
110
- default=None,
111
- help=(
112
- "If >0, mask generation is run on smaller crops of the image to generate more masks. "
113
- "The value sets how many different scales to crop at."
114
- ),
115
- )
116
-
117
- amg_settings.add_argument(
118
- "--crop-nms-thresh",
119
- type=float,
120
- default=None,
121
- help="The overlap threshold for excluding duplicate masks across different crops.",
122
- )
123
-
124
- amg_settings.add_argument(
125
- "--crop-overlap-ratio",
126
- type=int,
127
- default=None,
128
- help="Larger numbers mean image crops will overlap more.",
129
- )
130
-
131
- amg_settings.add_argument(
132
- "--crop-n-points-downscale-factor",
133
- type=int,
134
- default=None,
135
- help="The number of points-per-side in each layer of crop is reduced by this factor.",
136
- )
137
-
138
- amg_settings.add_argument(
139
- "--min-mask-region-area",
140
- type=int,
141
- default=None,
142
- help=(
143
- "Disconnected mask regions or holes with area smaller than this value "
144
- "in pixels are removed by postprocessing."
145
- ),
146
- )
147
-
148
-
149
- def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
150
- header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
151
- metadata = [header]
152
- for i, mask_data in enumerate(masks):
153
- mask = mask_data["segmentation"]
154
- filename = f"{i}.png"
155
- cv2.imwrite(os.path.join(path, filename), mask * 255)
156
- mask_metadata = [
157
- str(i),
158
- str(mask_data["area"]),
159
- *[str(x) for x in mask_data["bbox"]],
160
- *[str(x) for x in mask_data["point_coords"][0]],
161
- str(mask_data["predicted_iou"]),
162
- str(mask_data["stability_score"]),
163
- *[str(x) for x in mask_data["crop_box"]],
164
- ]
165
- row = ",".join(mask_metadata)
166
- metadata.append(row)
167
- metadata_path = os.path.join(path, "metadata.csv")
168
- with open(metadata_path, "w") as f:
169
- f.write("\n".join(metadata))
170
-
171
- return
172
-
173
-
174
- def get_amg_kwargs(args):
175
- amg_kwargs = {
176
- "points_per_side": args.points_per_side,
177
- "points_per_batch": args.points_per_batch,
178
- "pred_iou_thresh": args.pred_iou_thresh,
179
- "stability_score_thresh": args.stability_score_thresh,
180
- "stability_score_offset": args.stability_score_offset,
181
- "box_nms_thresh": args.box_nms_thresh,
182
- "crop_n_layers": args.crop_n_layers,
183
- "crop_nms_thresh": args.crop_nms_thresh,
184
- "crop_overlap_ratio": args.crop_overlap_ratio,
185
- "crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
186
- "min_mask_region_area": args.min_mask_region_area,
187
- }
188
- amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
189
- return amg_kwargs
190
-
191
-
192
- def main(args: argparse.Namespace) -> None:
193
- print("Loading model...")
194
- sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
195
- _ = sam.to(device=args.device)
196
- output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
197
- amg_kwargs = get_amg_kwargs(args)
198
- generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
199
-
200
- if not os.path.isdir(args.input):
201
- targets = [args.input]
202
- else:
203
- targets = [f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))]
204
- targets = [os.path.join(args.input, f) for f in targets]
205
-
206
- os.makedirs(args.output, exist_ok=True)
207
-
208
- for t in targets:
209
- print(f"Processing '{t}'...")
210
- image = cv2.imread(t)
211
- if image is None:
212
- print(f"Could not load '{t}' as an image, skipping...")
213
- continue
214
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
215
-
216
- masks = generator.generate(image)
217
-
218
- base = os.path.basename(t)
219
- base = os.path.splitext(base)[0]
220
- save_base = os.path.join(args.output, base)
221
- if output_mode == "binary_mask":
222
- os.makedirs(save_base, exist_ok=False)
223
- write_masks_to_folder(masks, save_base)
224
- else:
225
- save_file = save_base + ".json"
226
- with open(save_file, "w") as f:
227
- json.dump(masks, f)
228
- print("Done!")
229
-
230
-
231
- if __name__ == "__main__":
232
- args = parser.parse_args()
233
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/code_format.sh DELETED
@@ -1,2 +0,0 @@
1
- black . --config pyproject.toml
2
- isort .
 
 
 
scripts/export_onnx_model.py DELETED
@@ -1,198 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import argparse
8
- import warnings
9
-
10
- import torch
11
-
12
- from metaseg import build_sam, build_sam_vit_b, build_sam_vit_l
13
- from metaseg.utils.onnx import SamOnnxModel
14
-
15
- try:
16
- import onnxruntime # type: ignore
17
-
18
- onnxruntime_exists = True
19
- except ImportError:
20
- onnxruntime_exists = False
21
-
22
- parser = argparse.ArgumentParser(description="Export the SAM prompt encoder and mask decoder to an ONNX model.")
23
-
24
- parser.add_argument("--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint.")
25
-
26
- parser.add_argument("--output", type=str, required=True, help="The filename to save the ONNX model to.")
27
-
28
- parser.add_argument(
29
- "--model-type",
30
- type=str,
31
- default="default",
32
- help="In ['default', 'vit_b', 'vit_l']. Which type of SAM model to export.",
33
- )
34
-
35
- parser.add_argument(
36
- "--return-single-mask",
37
- action="store_true",
38
- help=(
39
- "If true, the exported ONNX model will only return the best mask, "
40
- "instead of returning multiple masks. For high resolution images "
41
- "this can improve runtime when upscaling masks is expensive."
42
- ),
43
- )
44
-
45
- parser.add_argument(
46
- "--opset",
47
- type=int,
48
- default=17,
49
- help="The ONNX opset version to use. Must be >=11",
50
- )
51
-
52
- parser.add_argument(
53
- "--quantize-out",
54
- type=str,
55
- default=None,
56
- help=(
57
- "If set, will quantize the model and save it with this name. "
58
- "Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize."
59
- ),
60
- )
61
-
62
- parser.add_argument(
63
- "--gelu-approximate",
64
- action="store_true",
65
- help=(
66
- "Replace GELU operations with approximations using tanh. Useful "
67
- "for some runtimes that have slow or unimplemented erf ops, used in GELU."
68
- ),
69
- )
70
-
71
- parser.add_argument(
72
- "--use-stability-score",
73
- action="store_true",
74
- help=(
75
- "Replaces the model's predicted mask quality score with the stability "
76
- "score calculated on the low resolution masks using an offset of 1.0. "
77
- ),
78
- )
79
-
80
- parser.add_argument(
81
- "--return-extra-metrics",
82
- action="store_true",
83
- help=(
84
- "The model will return five results: (masks, scores, stability_scores, "
85
- "areas, low_res_logits) instead of the usual three. This can be "
86
- "significantly slower for high resolution outputs."
87
- ),
88
- )
89
-
90
-
91
- def run_export(
92
- model_type: str,
93
- checkpoint: str,
94
- output: str,
95
- opset: int,
96
- return_single_mask: bool,
97
- gelu_approximate: bool = False,
98
- use_stability_score: bool = False,
99
- return_extra_metrics=False,
100
- ):
101
- print("Loading model...")
102
- if model_type == "vit_b":
103
- sam = build_sam_vit_b(checkpoint)
104
- elif model_type == "vit_l":
105
- sam = build_sam_vit_l(checkpoint)
106
- else:
107
- sam = build_sam(checkpoint)
108
-
109
- onnx_model = SamOnnxModel(
110
- model=sam,
111
- return_single_mask=return_single_mask,
112
- use_stability_score=use_stability_score,
113
- return_extra_metrics=return_extra_metrics,
114
- )
115
-
116
- if gelu_approximate:
117
- for n, m in onnx_model.named_modules():
118
- if isinstance(m, torch.nn.GELU):
119
- m.approximate = "tanh"
120
-
121
- dynamic_axes = {
122
- "point_coords": {1: "num_points"},
123
- "point_labels": {1: "num_points"},
124
- }
125
-
126
- embed_dim = sam.prompt_encoder.embed_dim
127
- embed_size = sam.prompt_encoder.image_embedding_size
128
- mask_input_size = [4 * x for x in embed_size]
129
- dummy_inputs = {
130
- "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float),
131
- "point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float),
132
- "point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float),
133
- "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float),
134
- "has_mask_input": torch.tensor([1], dtype=torch.float),
135
- "orig_im_size": torch.tensor([1500, 2250], dtype=torch.float),
136
- }
137
-
138
- _ = onnx_model(**dummy_inputs)
139
-
140
- output_names = ["masks", "iou_predictions", "low_res_masks"]
141
-
142
- with warnings.catch_warnings():
143
- warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
144
- warnings.filterwarnings("ignore", category=UserWarning)
145
- with open(output, "wb") as f:
146
- print(f"Exporing onnx model to {output}...")
147
- torch.onnx.export(
148
- onnx_model,
149
- tuple(dummy_inputs.values()),
150
- f,
151
- export_params=True,
152
- verbose=False,
153
- opset_version=opset,
154
- do_constant_folding=True,
155
- input_names=list(dummy_inputs.keys()),
156
- output_names=output_names,
157
- dynamic_axes=dynamic_axes,
158
- )
159
-
160
- if onnxruntime_exists:
161
- ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()}
162
- ort_session = onnxruntime.InferenceSession(output)
163
- _ = ort_session.run(None, ort_inputs)
164
- print("Model has successfully been run with ONNXRuntime.")
165
-
166
-
167
- def to_numpy(tensor):
168
- return tensor.cpu().numpy()
169
-
170
-
171
- if __name__ == "__main__":
172
- args = parser.parse_args()
173
- run_export(
174
- model_type=args.model_type,
175
- checkpoint=args.checkpoint,
176
- output=args.output,
177
- opset=args.opset,
178
- return_single_mask=args.return_single_mask,
179
- gelu_approximate=args.gelu_approximate,
180
- use_stability_score=args.use_stability_score,
181
- return_extra_metrics=args.return_extra_metrics,
182
- )
183
-
184
- if args.quantize_out is not None:
185
- assert onnxruntime_exists, "onnxruntime is required to quantize the model."
186
- from onnxruntime.quantization import QuantType # type: ignore
187
- from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore
188
-
189
- print(f"Quantizing model and writing to {args.quantize_out}...")
190
- quantize_dynamic(
191
- model_input=args.output,
192
- model_output=args.quantize_out,
193
- optimize_model=True,
194
- per_channel=False,
195
- reduce_range=False,
196
- weight_type=QuantType.QUInt8,
197
- )
198
- print("Done!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/package.sh DELETED
@@ -1,2 +0,0 @@
1
- python setup.py sdist
2
- twine upload dist/*