|
|
|
|
|
|
|
|
|
import contextlib |
|
import io |
|
import itertools |
|
import json |
|
import tempfile |
|
import time |
|
from loguru import logger |
|
from tabulate import tabulate |
|
from tqdm import tqdm |
|
|
|
import numpy as np |
|
|
|
import torch |
|
|
|
|
|
from .utils import ( |
|
gather, |
|
is_main_process, |
|
postprocess, |
|
synchronize, |
|
time_synchronized, |
|
xyxy2xywh |
|
) |
|
|
|
|
|
|
|
def per_class_AR_table(coco_eval, class_names, headers=["class", "AR"], colums=6): |
|
per_class_AR = {} |
|
recalls = coco_eval.eval["recall"] |
|
|
|
|
|
assert len(class_names) == recalls.shape[1] |
|
|
|
for idx, name in enumerate(class_names): |
|
recall = recalls[:, idx, 0, -1] |
|
recall = recall[recall > -1] |
|
ar = np.mean(recall) if recall.size else float("nan") |
|
per_class_AR[name] = float(ar * 100) |
|
|
|
num_cols = min(colums, len(per_class_AR) * len(headers)) |
|
result_pair = [x for pair in per_class_AR.items() for x in pair] |
|
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)]) |
|
table_headers = headers * (num_cols // len(headers)) |
|
table = tabulate( |
|
row_pair, tablefmt="pipe", floatfmt=".3f", headers=table_headers, numalign="left", |
|
) |
|
return table |
|
|
|
|
|
def per_class_AP_table(coco_eval, class_names, headers=["class", "AP"], colums=6): |
|
per_class_AP = {} |
|
precisions = coco_eval.eval["precision"] |
|
|
|
|
|
assert len(class_names) == precisions.shape[2] |
|
|
|
for idx, name in enumerate(class_names): |
|
|
|
|
|
precision = precisions[:, :, idx, 0, -1] |
|
precision = precision[precision > -1] |
|
ap = np.mean(precision) if precision.size else float("nan") |
|
per_class_AP[name] = float(ap * 100) |
|
|
|
num_cols = min(colums, len(per_class_AP) * len(headers)) |
|
result_pair = [x for pair in per_class_AP.items() for x in pair] |
|
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)]) |
|
table_headers = headers * (num_cols // len(headers)) |
|
table = tabulate( |
|
row_pair, tablefmt="pipe", floatfmt=".3f", headers=table_headers, numalign="left", |
|
) |
|
return table |
|
|
|
|
|
class COCOEvaluator: |
|
""" |
|
COCO AP Evaluation class. All the data in the val2017 dataset are processed |
|
and evaluated by COCO API. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
dataloader, |
|
img_size: int, |
|
confthre: float, |
|
nmsthre: float, |
|
num_classes: int, |
|
testdev: bool = False, |
|
per_class_AP: bool = False, |
|
per_class_AR: bool = False, |
|
): |
|
""" |
|
Args: |
|
dataloader (Dataloader): evaluate dataloader. |
|
img_size: image size after preprocess. images are resized |
|
to squares whose shape is (img_size, img_size). |
|
confthre: confidence threshold ranging from 0 to 1, which |
|
is defined in the config file. |
|
nmsthre: IoU threshold of non-max supression ranging from 0 to 1. |
|
per_class_AP: Show per class AP during evalution or not. Default to False. |
|
per_class_AR: Show per class AR during evalution or not. Default to False. |
|
""" |
|
self.dataloader = dataloader |
|
self.img_size = img_size |
|
self.confthre = confthre |
|
self.nmsthre = nmsthre |
|
self.num_classes = num_classes |
|
self.testdev = testdev |
|
self.per_class_AP = per_class_AP |
|
self.per_class_AR = per_class_AR |
|
|
|
def evaluate( |
|
self, |
|
model, |
|
distributed=False, |
|
half=False, |
|
trt_file=None, |
|
decoder=None, |
|
test_size=None, |
|
): |
|
""" |
|
COCO average precision (AP) Evaluation. Iterate inference on the test dataset |
|
and the results are evaluated by COCO API. |
|
|
|
NOTE: This function will change training mode to False, please save states if needed. |
|
|
|
Args: |
|
model : model to evaluate. |
|
|
|
Returns: |
|
ap50_95 (float) : COCO AP of IoU=50:95 |
|
ap50 (float) : COCO AP of IoU=50 |
|
summary (sr): summary info of evaluation. |
|
""" |
|
|
|
tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor |
|
model = model.eval() |
|
if half: |
|
model = model.half() |
|
ids = [] |
|
data_list = [] |
|
progress_bar = iter if is_main_process() else iter |
|
|
|
inference_time = 0 |
|
nms_time = 0 |
|
n_samples = max(len(self.dataloader) - 1, 1) |
|
|
|
if trt_file is not None: |
|
from torch2trt import TRTModule |
|
|
|
model_trt = TRTModule() |
|
model_trt.load_state_dict(torch.load(trt_file)) |
|
|
|
x = torch.ones(1, 3, test_size[0], test_size[1]).cuda() |
|
model(x) |
|
model = model_trt |
|
|
|
import tqdm |
|
for cur_iter, (imgs, _, info_imgs, ids) in tqdm.tqdm(enumerate( |
|
progress_bar(self.dataloader) |
|
), dynamic_ncols=True, leave=False, total=len(self.dataloader)): |
|
with torch.no_grad(): |
|
imgs = imgs.type(tensor_type) |
|
|
|
|
|
is_time_record = cur_iter < len(self.dataloader) - 1 |
|
if is_time_record: |
|
start = time.time() |
|
|
|
outputs = model(imgs) |
|
if decoder is not None: |
|
outputs = decoder(outputs, dtype=outputs.type()) |
|
|
|
if is_time_record: |
|
infer_end = time_synchronized() |
|
inference_time += infer_end - start |
|
|
|
outputs = postprocess( |
|
outputs, self.num_classes, self.confthre, self.nmsthre |
|
) |
|
if is_time_record: |
|
nms_end = time_synchronized() |
|
nms_time += nms_end - infer_end |
|
|
|
data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids, imgs)) |
|
|
|
statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples]) |
|
if distributed: |
|
data_list = gather(data_list, dst=0) |
|
data_list = list(itertools.chain(*data_list)) |
|
torch.distributed.reduce(statistics, dst=0) |
|
|
|
eval_results = self.evaluate_prediction(data_list, statistics) |
|
synchronize() |
|
return eval_results |
|
|
|
def convert_to_coco_format(self, outputs, info_imgs, ids, input_imgs=None): |
|
data_list = [] |
|
img_i = 0 |
|
for (output, img_h, img_w, img_id, img) in zip( |
|
outputs, info_imgs[0], info_imgs[1], ids, input_imgs |
|
): |
|
if output is None: |
|
continue |
|
output = output.cpu() |
|
|
|
bboxes = output[:, 0:4] |
|
|
|
|
|
scale = min( |
|
self.img_size[0] / float(img_h), self.img_size[1] / float(img_w) |
|
) |
|
bboxes /= scale |
|
bboxes = xyxy2xywh(bboxes) |
|
|
|
cls = output[:, 6] |
|
scores = output[:, 4] * output[:, 5] |
|
for ind in range(bboxes.shape[0]): |
|
|
|
|
|
_d = self.dataloader.dataset |
|
if _d.__class__.__name__ == 'MergedDataset': |
|
|
|
raise NotImplementedError |
|
from data import ABDataset |
|
if _d.__class__.__name__ == '_AugWrapperForDataset': |
|
_d = _d.raw_dataset |
|
if isinstance(_d, ABDataset): |
|
_d = _d.dataset |
|
if _d.__class__.__name__ == '_SplitDataset': |
|
raise NotImplementedError |
|
_d = _d.underlying_dataset |
|
|
|
class_ids = _d.class_ids |
|
if int(cls[ind]) >= len(class_ids): |
|
raise RuntimeError |
|
label = self.dataloader.dataset.class_ids[-1] |
|
else: |
|
label = class_ids[int(cls[ind])] |
|
pred_data = { |
|
"image_id": int(img_id), |
|
"category_id": label, |
|
"bbox": bboxes[ind].numpy().tolist(), |
|
"score": scores[ind].numpy().item(), |
|
"segmentation": [], |
|
} |
|
data_list.append(pred_data) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return data_list |
|
|
|
def evaluate_prediction(self, data_dict, statistics): |
|
if not is_main_process(): |
|
return 0, 0, None |
|
|
|
|
|
|
|
annType = ["segm", "bbox", "keypoints"] |
|
|
|
inference_time = statistics[0].item() |
|
nms_time = statistics[1].item() |
|
n_samples = statistics[2].item() |
|
|
|
a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size) |
|
a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size) |
|
|
|
time_info = ", ".join( |
|
[ |
|
"Average {} time: {:.2f} ms".format(k, v) |
|
for k, v in zip( |
|
["forward", "NMS", "inference"], |
|
[a_infer_time, a_nms_time, (a_infer_time + a_nms_time)], |
|
) |
|
] |
|
) |
|
|
|
info = time_info + "\n" |
|
|
|
|
|
if len(data_dict) > 0: |
|
|
|
_d = self.dataloader.dataset |
|
if _d.__class__.__name__ == 'MergedDataset': |
|
|
|
raise NotImplementedError |
|
from data import ABDataset |
|
if _d.__class__.__name__ == '_AugWrapperForDataset': |
|
_d = _d.raw_dataset |
|
if isinstance(_d, ABDataset): |
|
_d = _d.dataset |
|
if _d.__class__.__name__ == '_SplitDataset': |
|
raise NotImplementedError |
|
_d = _d.underlying_dataset |
|
|
|
cocoGt = _d.coco |
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.testdev: |
|
json.dump(data_dict, open("./yolox_testdev_2017.json", "w")) |
|
cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json") |
|
else: |
|
_, tmp = tempfile.mkstemp() |
|
json.dump(data_dict, open(tmp, "w")) |
|
cocoDt = cocoGt.loadRes(tmp) |
|
|
|
|
|
|
|
from pycocotools.cocoeval import COCOeval |
|
|
|
logger.warning("Use standard COCOeval.") |
|
|
|
cocoEval = COCOeval(cocoGt, cocoDt, annType[1]) |
|
cocoEval.evaluate() |
|
cocoEval.accumulate() |
|
redirect_string = io.StringIO() |
|
with contextlib.redirect_stdout(redirect_string): |
|
cocoEval.summarize() |
|
info += redirect_string.getvalue() |
|
cat_ids = list(cocoGt.cats.keys()) |
|
cat_names = [cocoGt.cats[catId]['name'] for catId in sorted(cat_ids)] |
|
if self.per_class_AP: |
|
AP_table = per_class_AP_table(cocoEval, class_names=cat_names) |
|
info += "per class AP:\n" + AP_table + "\n" |
|
if self.per_class_AR: |
|
AR_table = per_class_AR_table(cocoEval, class_names=cat_names) |
|
info += "per class AR:\n" + AR_table + "\n" |
|
return cocoEval.stats[0], cocoEval.stats[1], info |
|
else: |
|
return 0, 0, info |
|
|