# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" from typing import Dict, List import evaluate import datasets import numpy as np from seametrics.panoptic import PanopticQuality from seametrics.payload import Payload _CITATION = """\ @inproceedings{DBLP:conf/cvpr/KirillovHGRD19, author = {Alexander Kirillov and Kaiming He and Ross B. Girshick and Carsten Rother and Piotr Doll{\'{a}}r}, title = {Panoptic Segmentation}, booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR} 2019, Long Beach, CA, USA, June 16-20, 2019}, pages = {9404--9413}, publisher = {Computer Vision Foundation / {IEEE}}, year = {2019}, url = {http://openaccess.thecvf.com/content\_CVPR\_2019/html/Kirillov\_Panoptic\_Segmentation\_CVPR\_2019\_paper.html } """ _DESCRIPTION = """\ This evaluation metric calculates Panoptic Quality (PQ) for panoptic segmentation masks. """ _KWARGS_DESCRIPTION = """ Calculates PQ-score given predicted and ground truth panoptic segmentation masks. Args: predictions: a 4-d array of shape (batch_size, img_height, img_width, 2). The last dimension should hold the category index at position 0, and the instance ID at position 1. references: a 4-d array of shape (batch_size, img_height, img_width, 2). The last dimension should hold the category index at position 0, and the instance ID at position 1. Returns: A single float number in range [0, 1] that represents the PQ score. 1 is perfect panoptic segmentation, 0 is worst possible panoptic segmentation. Examples: >>> import evaluate >>> from seametrics.payload.processor import PayloadProcessor >>> MODEL_FIELD = ["maskformer-27k-100ep"] >>> payload = PayloadProcessor("SAILING_PANOPTIC_DATASET_QA", >>> gt_field="ground_truth_det", >>> models=MODEL_FIELD, >>> sequence_list=["Trip_55_Seq_2", "Trip_197_Seq_1", "Trip_197_Seq_68"], >>> excluded_classes=[""]).payload >>> module = evaluate.load("SEA-AI/PanopticQuality") >>> module.add_payload(payload, model_name=MODEL_FIELD[0]) >>> module.compute() 100%|██████████| 3/3 [00:03<00:00, 1.30s/it] Added data ... Start computing ... Finished! """ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class PQMetric(evaluate.Metric): def __init__( self, label2id: Dict[str, int] = None, stuff: List[str] = None, per_class: bool = True, split_sq_rq: bool = True, **kwargs ): super().__init__(**kwargs) DEFAULT_LABEL2ID = {'WATER': 0, 'SKY': 1, 'LAND': 2, 'MOTORBOAT': 3, 'FAR_AWAY_OBJECT': 4, 'SAILING_BOAT_WITH_CLOSED_SAILS': 5, 'SHIP': 6, 'WATERCRAFT': 7, 'SPHERICAL_BUOY': 8, 'CONSTRUCTION': 9, 'FLOTSAM': 10, 'SAILING_BOAT_WITH_OPEN_SAILS': 11, 'CONTAINER': 12, 'PILLAR_BUOY': 13, 'AERIAL_ANIMAL': 14, 'HUMAN_IN_WATER': 15, 'OWN_BOAT': 16, 'WOODEN_LOG': 17, 'MARITIME_ANIMAL': 18} DEFAULT_STUFF = ["WATER", "SKY", "LAND", "CONSTRUCTION", "ICE", "OWN_BOAT"] self.label2id = label2id if label2id is not None else DEFAULT_LABEL2ID self.stuff = stuff if stuff is not None else DEFAULT_STUFF self.per_class = per_class self.split_sq_rq = split_sq_rq self.pq_metric = PanopticQuality( things=set([self.label2id[label] for label in self.label2id.keys() if label not in self.stuff]), stuffs=set([self.label2id[label] for label in self.label2id.keys() if label in self.stuff]), return_per_class=per_class, return_sq_and_rq=split_sq_rq ) def _info(self): return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=datasets.Features( { "predictions": datasets.Sequence( datasets.Sequence( datasets.Sequence( datasets.Sequence(datasets.Value("float")) ) ), ), "references": datasets.Sequence( # batch datasets.Sequence( # img height datasets.Sequence( # img width datasets.Sequence(datasets.Value("float")) # 2 ) ), ), } ), # Additional links to the codebase or references codebase_urls=[ "https://lightning.ai/docs/torchmetrics/stable/detection/panoptic_quality.html" ], ) def add(self, *, prediction, reference, **kwargs): """Adds a batch of predictions and references to the metric""" # in case the inputs are lists, convert them to numpy arrays self.pq_metric.update(prediction, reference) # does not impact the metric, but is required for the interface x_x super(evaluate.Metric, self).add( prediction=self._postprocess(prediction), references=self._postprocess(reference), **kwargs ) def _compute(self, *, predictions, references, **kwargs): """Called within the evaluate.Metric.compute() method""" tp = self.pq_metric.metric.true_positives.clone() fp = self.pq_metric.metric.false_positives.clone() fn = self.pq_metric.metric.false_negatives.clone() iou = self.pq_metric.metric.iou_sum.clone() id2label = {id: label for label, id in self.label2id.items()} things_stuffs = sorted(self.pq_metric.things) + sorted(self.pq_metric.stuffs) # compute scores result = self.pq_metric.compute() # shape : (n_classes (sorted things + sorted stuffs), scores (pq, sq, rq)) result_dict = dict() if self.per_class: if not self.split_sq_rq: result = result.T result_dict["scores"] = {id2label[numeric_label]: result[i].tolist() \ for i, numeric_label in enumerate(things_stuffs)} result_dict["scores"].update({"ALL": result.mean(axis=0).tolist()}) result_dict["numbers"] = {id2label[numeric_label]: [tp[i].item(), fp[i].item(), fn[i].item(), iou[i].item()] \ for i, numeric_label in enumerate(things_stuffs)} result_dict["numbers"].update({"ALL": [tp.sum().item(), fp.sum().item(), fn.sum().item(), iou.sum().item()]}) else: result_dict["scores"] = {"ALL": result.tolist() if self.split_sq_rq else [result.tolist()]} result_dict["numbers"] = {"ALL": [tp.sum().item(), fp.sum().item(), fn.sum().item(), iou.sum().item()]} return result_dict def add_payload(self, payload: Payload, model_name: str = None): """Converts the payload to the format expected by the metric""" # import only if needed since fiftyone is not a direct dependency from seametrics.panoptic.utils import payload_to_seg_metric predictions, references, label2id = payload_to_seg_metric(payload, model_name, self.label2id) self.label2id = label2id self.add(prediction=predictions, reference=references) def _postprocess(self, np_array): """Converts the numpy arrays to lists for type checking""" # add fake data to avoid out of memory problem # only reuqired for interface, not used by metric anyway return np.zeros((1,1,1,1)).tolist()