Commit
·
9b2b882
1
Parent(s):
ee32f46
Update README.md
Browse files
README.md
CHANGED
@@ -37,25 +37,29 @@ Here is how to use this model:
|
|
37 |
```python
|
38 |
import torch
|
39 |
from PIL import Image
|
40 |
-
import
|
41 |
-
from transformers import SamModel, SamProcessor
|
42 |
|
43 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
44 |
-
model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device)
|
45 |
-
processor = SamProcessor.from_pretrained("facebook/sam-vit-huge")
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device)
|
52 |
with torch.no_grad():
|
53 |
outputs = model(**inputs)
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
59 |
```
|
60 |
|
61 |
For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/mask2former).
|
|
|
37 |
```python
|
38 |
import torch
|
39 |
from PIL import Image
|
40 |
+
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
|
|
41 |
|
|
|
|
|
|
|
42 |
|
43 |
+
# load Mask2Former fine-tuned on COCO panoptic segmentation
|
44 |
+
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-coco-panoptic")
|
45 |
+
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-coco-panoptic")
|
46 |
+
|
47 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
48 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
49 |
+
inputs = processor(images=image, return_tensors="pt")
|
50 |
|
|
|
51 |
with torch.no_grad():
|
52 |
outputs = model(**inputs)
|
53 |
|
54 |
+
# model predicts class_queries_logits of shape `(batch_size, num_queries)`
|
55 |
+
# and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
|
56 |
+
class_queries_logits = outputs.class_queries_logits
|
57 |
+
masks_queries_logits = outputs.masks_queries_logits
|
58 |
+
|
59 |
+
# you can pass them to processor for postprocessing
|
60 |
+
result = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
61 |
+
# we refer to the demo notebooks for visualization (see "Resources" section in the Mask2Former docs)
|
62 |
+
predicted_panoptic_map = result["segmentation"]
|
63 |
```
|
64 |
|
65 |
For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/mask2former).
|