|
|
|
|
|
import numpy as np |
|
|
|
np.seterr(divide='ignore', invalid='ignore') |
|
|
|
|
|
|
|
def hist_info(n_cl, pred, gt): |
|
assert (pred.shape == gt.shape) |
|
k = (gt >= 0) & (gt < n_cl) |
|
labeled = np.sum(k) |
|
correct = np.sum((pred[k] == gt[k])) |
|
|
|
return np.bincount(n_cl * gt[k].astype(int) + pred[k].astype(int), |
|
minlength=n_cl ** 2).reshape(n_cl, |
|
n_cl), labeled, correct |
|
|
|
|
|
def compute_score(hist, correct, labeled): |
|
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) |
|
acc = np.diag(hist) / hist.sum(1) |
|
mean_acc = np.nanmean(acc) |
|
mean_IU = np.nanmean(iu) |
|
mean_IU_no_back = np.nanmean(iu[1:]) |
|
freq = hist.sum(1) / hist.sum() |
|
freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() |
|
mean_pixel_acc = correct / labeled |
|
|
|
return iu, mean_IU, mean_IU_no_back, mean_pixel_acc, mean_acc |
|
|
|
|
|
|
|
def meanIoU(area_intersection, area_union): |
|
iou = 1.0 * np.sum(area_intersection, axis=1) / np.sum(area_union, axis=1) |
|
meaniou = np.nanmean(iou) |
|
meaniou_no_back = np.nanmean(iou[1:]) |
|
|
|
return iou, meaniou, meaniou_no_back |
|
|
|
|
|
def intersectionAndUnion(imPred, imLab, numClass): |
|
|
|
|
|
imPred = imPred * (imLab >= 0) |
|
|
|
|
|
intersection = imPred * (imPred == imLab) |
|
(area_intersection, _) = np.histogram(intersection, bins=numClass, |
|
range=(1, numClass)) |
|
|
|
|
|
(area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) |
|
(area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) |
|
area_union = area_pred + area_lab - area_intersection |
|
|
|
return area_intersection, area_union |
|
|
|
|
|
def mean_pixel_accuracy(pixel_correct, pixel_labeled): |
|
mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / ( |
|
np.spacing(1) + np.sum(pixel_labeled)) |
|
|
|
return mean_pixel_accuracy |
|
|
|
|
|
def pixelAccuracy(imPred, imLab): |
|
|
|
|
|
pixel_labeled = np.sum(imLab >= 0) |
|
pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) |
|
pixel_accuracy = 1.0 * pixel_correct / pixel_labeled |
|
|
|
return pixel_accuracy, pixel_correct, pixel_labeled |
|
|
|
def compute_metrics(results, num_classes): |
|
hist = np.zeros((num_classes, num_classes)) |
|
correct = 0 |
|
labeled = 0 |
|
count = 0 |
|
for d in results: |
|
hist += d['hist'] |
|
correct += d['correct'] |
|
labeled += d['labeled'] |
|
count += d['count'] |
|
|
|
_, mean_IU, _, mean_pixel_acc, mean_acc = compute_score(hist, correct, |
|
labeled) |
|
return mean_IU, mean_pixel_acc, mean_acc |