Skip to content

Commit

Permalink
prep calc auc
Browse files Browse the repository at this point in the history
  • Loading branch information
MiXaiLL76 committed Jun 26, 2024
1 parent 09eaa2f commit 986c14c
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 19 deletions.
1 change: 1 addition & 0 deletions csrc/faster_eval_api/faster_eval_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ namespace coco_eval

m.def("deepcopy", &COCOeval::deepcopy, "COCOeval::deepcopy");
m.def("_summarize", &COCOeval::_summarize, "COCOeval::_summarize");
m.def("calc_auc", &COCOeval::calc_auc, "COCOeval::calc_auc");

pybind11::class_<COCOeval::Dataset>(m, "Dataset").def(pybind11::init<>())
.def("append", &COCOeval::Dataset::append)
Expand Down
57 changes: 38 additions & 19 deletions faster_coco_eval/core/faster_eval_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import logging
import time
from typing import List, Union

import numpy as np

Expand Down Expand Up @@ -139,8 +140,10 @@ def accumulate(self):
def math_matches(self):
"""For each ground truth, find the best matching detection and set the
detection as matched."""
for gidx, ground_truth_matches in enumerate(self.ground_truth_matches):
gt_ids = self.ground_truth_orig_id[gidx]
for gidx, ground_truth_matches in enumerate(
self.ground_truth_matches[::-1]
):
gt_ids = self.ground_truth_orig_id[-gidx - 1]

for idx, dt_id in enumerate(ground_truth_matches):
if dt_id == 0:
Expand All @@ -152,6 +155,11 @@ def math_matches(self):

_gt_ann = self.cocoGt.anns[gt_id]
_dt_ann = self.cocoDt.anns[dt_id]

if self.params.useCats:
if _gt_ann["category_id"] != _dt_ann["category_id"]:
continue

_img_id = self.cocoGt.ann_img_map[gt_id]
_catId = _gt_ann["category_id"] if self.params.useCats else -1

Expand All @@ -173,6 +181,8 @@ def math_matches(self):
continue

iou = self.ious[(_img_id, _catId)][iou_dt_id, iou_gt_id]
if (iou <= 0) and self.params.useCats:
continue

if not _gt_ann.get("matched", False):
_dt_ann["tp"] = True
Expand Down Expand Up @@ -211,8 +221,6 @@ def compute_mIoU(self) -> float:
ious.append(dt_ann["iou"])
return sum(ious) / len(ious)

# TODO:
# Convert to C++
def compute_mAUC(self) -> float:
"""Compute the mAUC metric."""
aucs = []
Expand Down Expand Up @@ -290,26 +298,37 @@ def stats_as_dict(self):
_label: float(self.all_stats[i]) for i, _label in enumerate(labels)
}

# TODO:
# Convert to C++
@staticmethod
def calc_auc(recall_list, precision_list):
"""
Calculate area under precision recall curve
recall_list: list of recall values
precision_list: list of precision values
def calc_auc(
recall_list: Union[List[float], np.ndarray],
precision_list: Union[List[float], np.ndarray],
method: str = "c++",
):
"""Calculate area under precision recall curve.
Args:
recall_list (Union[List[float], np.ndarray]):
list of recall values
precision_list (Union[List[float], np.ndarray]):
list of precision values
method (str, optional): method to calculate auc. Defaults to "c++".
Returns:
float: area under precision recall curve
"""
# https://towardsdatascience.com/how-to-efficiently-implement-area-under-precision-recall-curve-pr-auc-a85872fd7f14
# mrec = np.concatenate(([0.], recall_list, [1.]))
# mpre = np.concatenate(([0.], precision_list, [0.]))
mrec = recall_list
mpre = precision_list
if method == "c++":
return round(_C.calc_auc(recall_list, precision_list), 15)
else:
mrec = recall_list
mpre = precision_list

for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

i = np.where(mrec[1:] != mrec[:-1])[0]
return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
i = np.where(mrec[1:] != mrec[:-1])[0]
return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])


# Reassignment, for smooth operation of pycocotools replacement
Expand Down
16 changes: 16 additions & 0 deletions tests/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import os
import unittest

import numpy as np

from faster_coco_eval import COCO, COCOeval_faster
from faster_coco_eval.extra import PreviewResults

Expand Down Expand Up @@ -126,6 +128,20 @@ def test_confusion_matrix(self):

self.assertEqual(result_cm, prepared_result)

def test_auc(self):
x = np.linspace(0, 0.55, 100)
y = np.linspace(0, 2, 100) + 0.1

cpp_auc = COCOeval_faster.calc_auc(x, y)
py_auc = COCOeval_faster.calc_auc(x, y, method="py")
# sklearn not in test space!
# from sklearn import metrics
# orig_auc = metrics.auc(x, y)
orig_auc = 1.1550000000000005

self.assertAlmostEqual(cpp_auc, orig_auc, places=8)
self.assertAlmostEqual(py_auc, orig_auc, places=8)


if __name__ == "__main__":
unittest.main()

0 comments on commit 986c14c

Please sign in to comment.