franzi2505 commited on
Commit
dd0aa4a
1 Parent(s): bada1b4
Files changed (1) hide show
  1. PanopticQuality.py +11 -11
PanopticQuality.py CHANGED
@@ -170,28 +170,28 @@ class PQMetric(evaluate.Metric):
170
 
171
  def _compute(self, *, predictions, references, **kwargs):
172
  """Called within the evaluate.Metric.compute() method"""
173
- tp = self.pq_metric.metric.true_positives.clone()
174
- fp = self.pq_metric.metric.false_positives.clone()
175
- fn = self.pq_metric.metric.false_negatives.clone()
176
- iou = self.pq_metric.metric.iou_sum.clone()
177
 
178
  # compute scores
179
- result = self.pq_metric.compute() # shape : (area_rngs, n_classes (sorted things + sorted stuffs), scores (pq, sq, rq))
180
 
181
  result_dict = dict()
182
 
183
  if self.per_class:
184
  if not self.split_sq_rq:
185
  result = result.unsqueeze(0)
186
- result_dict["scores"] = {self.id2label[numeric_label]: result[:,:, i].tolist() \
187
  for i, numeric_label in enumerate(self.things_stuffs)}
188
- result_dict["scores"].update({"ALL": result.mean(dim=-1).tolist()})
189
- result_dict["numbers"] = {self.id2label[numeric_label]: [tp[:, i].tolist(), fp[:, i].tolist(), fn[:, i].tolist(), iou[:, i].tolist()] \
190
  for i, numeric_label in enumerate(self.things_stuffs)}
191
- result_dict["numbers"].update({"ALL": [tp.sum(dim=1).tolist(), fp.sum(dim=1).tolist(), fn.sum(dim=1).tolist(), iou.sum(dim=1).tolist()]})
192
  else:
193
- result_dict["scores"] = {"ALL": result.tolist() if self.split_sq_rq else ([result.tolist()] if len(self.pq_metric.get_areas())>1 else [[result.tolist()]])}
194
- result_dict["numbers"] = {"ALL": [tp.sum(dim=-1).tolist(), fp.sum(dim=-1).tolist(), fn.sum(dim=-1).tolist(), iou.sum(dim=-1).tolist()]}
195
 
196
  return result_dict
197
 
 
170
 
171
  def _compute(self, *, predictions, references, **kwargs):
172
  """Called within the evaluate.Metric.compute() method"""
173
+ tp = self.pq_metric.metric.true_positives.clone().cpu()
174
+ fp = self.pq_metric.metric.false_positives.clone().cpu()
175
+ fn = self.pq_metric.metric.false_negatives.clone().cpu()
176
+ iou = self.pq_metric.metric.iou_sum.clone().cpu()
177
 
178
  # compute scores
179
+ result = self.pq_metric.compute().cpu() # shape : (area_rngs, n_classes (sorted things + sorted stuffs), scores (pq, sq, rq))
180
 
181
  result_dict = dict()
182
 
183
  if self.per_class:
184
  if not self.split_sq_rq:
185
  result = result.unsqueeze(0)
186
+ result_dict["scores"] = {self.id2label[numeric_label]: result[:,:, i].numpy() \
187
  for i, numeric_label in enumerate(self.things_stuffs)}
188
+ result_dict["scores"].update({"ALL": result.mean(dim=-1).numpy()})
189
+ result_dict["numbers"] = {self.id2label[numeric_label]: np.stack([tp[:, i].numpy(), fp[:, i].numpy(), fn[:, i].numpy(), iou[:, i].numpy()])\
190
  for i, numeric_label in enumerate(self.things_stuffs)}
191
+ result_dict["numbers"].update({"ALL": np.stack([tp.sum(dim=1).numpy(), fp.sum(dim=1).numpy(), fn.sum(dim=1).numpy(), iou.sum(dim=1).numpy()])})
192
  else:
193
+ result_dict["scores"] = {"ALL": result.numpy() if self.split_sq_rq else (result.numpy()[np.newaxis, ...] if len(self.pq_metric.get_areas())>1 else result.numpy()[np.newaxis, np.newaxis, ...])}
194
+ result_dict["numbers"] = {"ALL": np.stack([tp.sum(dim=-1).numpy(), fp.sum(dim=-1).numpy(), fn.sum(dim=-1).numpy(), iou.sum(dim=-1).numpy()])}
195
 
196
  return result_dict
197