From 2e373ace8923d7f4e2fa8253f30745bfee798914 Mon Sep 17 00:00:00 2001 From: Sasha Harrison Date: Tue, 19 Apr 2022 14:28:31 -0700 Subject: [PATCH 1/3] add warning for unspecified arguments, and change default values --- nucleus/metrics/cuboid_metrics.py | 40 +++++++++++++++++++++++++------ nucleus/metrics/cuboid_utils.py | 2 ++ 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/nucleus/metrics/cuboid_metrics.py b/nucleus/metrics/cuboid_metrics.py index a6a11d03..b1ad2cd8 100644 --- a/nucleus/metrics/cuboid_metrics.py +++ b/nucleus/metrics/cuboid_metrics.py @@ -1,4 +1,5 @@ import sys +import warnings from abc import abstractmethod from typing import List, Optional, Union @@ -10,6 +11,9 @@ from .filtering import ListOfAndFilters, ListOfOrAndFilters from .filters import confidence_filter +DEFAULT_IOU_THRESHOLD = 0.1 +DEFAULT_CONFIDENCE_THRESHOLD = 0.0 + class CuboidMetric(Metric): """Abstract class for metrics of cuboids. @@ -28,7 +32,7 @@ class CuboidMetric(Metric): def __init__( self, enforce_label_match: bool = False, - confidence_threshold: float = 0.0, + confidence_threshold: Optional[float] = None, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] ] = None, @@ -54,6 +58,12 @@ def __init__( (AND), forming a more selective and multiple column predicate. Finally, the most outer list combines these filters as a disjunction (OR). """ + print("confidence threshold: ", confidence_threshold) + if not confidence_threshold: + confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD + warnings.warn( + f"Got confidence_threshold value of `None`. In this case, we set the confidence_threshold to {confidence_threshold} (include all predictions, regardless of confidence). Consider specifying this value explicitly during metric initialization" + ) self.enforce_label_match = enforce_label_match assert 0 <= confidence_threshold <= 1 self.confidence_threshold = confidence_threshold @@ -99,8 +109,8 @@ class CuboidIOU(CuboidMetric): def __init__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, - confidence_threshold: float = 0.0, + iou_threshold: Optional[float] = None, + confidence_threshold: Optional[float] = None, iou_2d: bool = False, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -127,6 +137,11 @@ def __init__( interpreted as a conjunction (AND), forming a more selective and multiple column predicate. Finally, the most outer list combines these filters as a disjunction (OR). """ + if not iou_threshold: + iou_threshold = DEFAULT_IOU_THRESHOLD + warnings.warn( + f"The IoU threshold used for matching was initialized to `None`. In this case, the value of iou_threshold defaults to {iou_threshold}. If this values will produce unexpected behavior, consider specifying the iou_threshold argument during metric initialization" + ) assert ( 0 <= iou_threshold <= 1 ), "IoU threshold must be between 0 and 1." @@ -166,8 +181,8 @@ class CuboidPrecision(CuboidMetric): def __init__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, - confidence_threshold: float = 0.0, + iou_threshold: Optional[float] = None, + confidence_threshold: Optional[float] = None, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] ] = None, @@ -192,6 +207,11 @@ def __init__( interpreted as a conjunction (AND), forming a more selective and multiple column predicate. Finally, the most outer list combines these filters as a disjunction (OR). """ + if not iou_threshold: + iou_threshold = DEFAULT_IOU_THRESHOLD + warnings.warn( + f"The IoU threshold used for matching was initialized to `None`. In this case, the value of iou_threshold defaults to {iou_threshold}. If this values will produce unexpected behavior, consider specifying the iou_threshold argument during metric initialization" + ) assert ( 0 <= iou_threshold <= 1 ), "IoU threshold must be between 0 and 1." @@ -213,6 +233,7 @@ def eval( annotations, threshold_in_overlap_ratio=self.iou_threshold, ) + print(stats) weight = stats["tp_sum"] + stats["fp_sum"] precision = stats["tp_sum"] / max(weight, sys.float_info.epsilon) return ScalarResult(precision, weight) @@ -225,8 +246,8 @@ class CuboidRecall(CuboidMetric): def __init__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, - confidence_threshold: float = 0.0, + iou_threshold: Optional[float] = None, + confidence_threshold: Optional[float] = None, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] ] = None, @@ -241,6 +262,11 @@ def __init__( iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0 confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0 """ + if not iou_threshold: + iou_threshold = DEFAULT_IOU_THRESHOLD + warnings.warn( + f"The IoU threshold used for matching was initialized to `None`. In this case, the value of iou_threshold defaults to {iou_threshold}. If this values will produce unexpected behavior, consider specifying the iou_threshold argument during metric initialization" + ) assert ( 0 <= iou_threshold <= 1 ), "IoU threshold must be between 0 and 1." diff --git a/nucleus/metrics/cuboid_utils.py b/nucleus/metrics/cuboid_utils.py index e23821de..27f4650e 100644 --- a/nucleus/metrics/cuboid_utils.py +++ b/nucleus/metrics/cuboid_utils.py @@ -314,6 +314,8 @@ def recall_precision( threshold_in_overlap_ratio=threshold_in_overlap_ratio, ) + print("mapping: ", mapping) + for pred_id, gt_id in mapping: if fn[gt_id] == 0: continue From 5f726e226b5637cfb443a9b64ceb24c012fbb812 Mon Sep 17 00:00:00 2001 From: Sasha Harrison Date: Tue, 19 Apr 2022 15:56:50 -0700 Subject: [PATCH 2/3] add unit test coveragE --- nucleus/metrics/base.py | 10 + nucleus/metrics/cuboid_metrics.py | 16 +- nucleus/metrics/cuboid_utils.py | 33 ++- tests/metrics/test_cuboid_metrics.py | 307 ++++++++++++++++++++++++++- 4 files changed, 351 insertions(+), 15 deletions(-) diff --git a/nucleus/metrics/base.py b/nucleus/metrics/base.py index b316e1be..ce05230a 100644 --- a/nucleus/metrics/base.py +++ b/nucleus/metrics/base.py @@ -11,6 +11,8 @@ ) from nucleus.prediction import PredictionList +EPSILON = 10 ** -4 # 0.0001 + class MetricResult(ABC): """Base MetricResult class""" @@ -41,6 +43,14 @@ def aggregate(results: Iterable["ScalarResult"]) -> "ScalarResult": value = total_value / max(total_weight, sys.float_info.epsilon) return ScalarResult(value, total_weight) + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return ( + abs(self.value - other.value) < EPSILON + and self.weight == other.weight + ) + class Metric(ABC): """Abstract class for defining a metric, which takes a list of annotations diff --git a/nucleus/metrics/cuboid_metrics.py b/nucleus/metrics/cuboid_metrics.py index b1ad2cd8..8d23176f 100644 --- a/nucleus/metrics/cuboid_metrics.py +++ b/nucleus/metrics/cuboid_metrics.py @@ -58,7 +58,6 @@ def __init__( (AND), forming a more selective and multiple column predicate. Finally, the most outer list combines these filters as a disjunction (OR). """ - print("confidence threshold: ", confidence_threshold) if not confidence_threshold: confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD warnings.warn( @@ -162,13 +161,15 @@ def eval( iou_3d_metric, iou_2d_metric = detection_iou( predictions, annotations, - threshold_in_overlap_ratio=self.iou_threshold, + self.iou_threshold, + self.enforce_label_match, ) - weight = max(len(annotations), len(predictions)) if self.iou_2d: + weight = len(iou_2d_metric) avg_iou = iou_2d_metric.sum() / max(weight, sys.float_info.epsilon) else: + weight = len(iou_3d_metric) avg_iou = iou_3d_metric.sum() / max(weight, sys.float_info.epsilon) return ScalarResult(avg_iou, weight) @@ -231,9 +232,10 @@ def eval( stats = recall_precision( predictions, annotations, - threshold_in_overlap_ratio=self.iou_threshold, + self.iou_threshold, + self.confidence_threshold, + self.enforce_label_match, ) - print(stats) weight = stats["tp_sum"] + stats["fp_sum"] precision = stats["tp_sum"] / max(weight, sys.float_info.epsilon) return ScalarResult(precision, weight) @@ -286,7 +288,9 @@ def eval( stats = recall_precision( predictions, annotations, - threshold_in_overlap_ratio=self.iou_threshold, + self.iou_threshold, + self.confidence_threshold, + self.enforce_label_match, ) weight = stats["tp_sum"] + stats["fn_sum"] recall = stats["tp_sum"] / max(weight, sys.float_info.epsilon) diff --git a/nucleus/metrics/cuboid_utils.py b/nucleus/metrics/cuboid_utils.py index 27f4650e..cdf720b3 100644 --- a/nucleus/metrics/cuboid_utils.py +++ b/nucleus/metrics/cuboid_utils.py @@ -101,18 +101,25 @@ def wrapper( return wrapper -def process_dataitem(dataitem): +def process_dataitem(item_list, confidence_threshold=None): + if confidence_threshold: + item_list = [ + item + for item in item_list + if item.confidence >= confidence_threshold + ] processed_item = {} processed_item["xyz"] = np.array( - [[ann.position.x, ann.position.y, ann.position.z] for ann in dataitem] + [[ann.position.x, ann.position.y, ann.position.z] for ann in item_list] ) processed_item["wlh"] = np.array( [ [ann.dimensions.x, ann.dimensions.y, ann.dimensions.z] - for ann in dataitem + for ann in item_list ] ) - processed_item["yaw"] = np.array([ann.yaw for ann in dataitem]) + processed_item["yaw"] = np.array([ann.yaw for ann in item_list]) + processed_item["labels"] = [ann.label for ann in item_list] return processed_item @@ -278,6 +285,8 @@ def recall_precision( prediction: List[CuboidPrediction], groundtruth: List[CuboidAnnotation], threshold_in_overlap_ratio: float, + confidence_threshold: float, + enforce_label_match: bool, ) -> Dict[str, float]: """ Calculates the precision and recall of each lidar frame. @@ -295,7 +304,7 @@ def recall_precision( num_instances = 0 gt_items = process_dataitem(groundtruth) - pred_items = process_dataitem(prediction) + pred_items = process_dataitem(prediction, confidence_threshold) num_predicted += pred_items["xyz"].shape[0] num_instances += gt_items["xyz"].shape[0] @@ -314,11 +323,13 @@ def recall_precision( threshold_in_overlap_ratio=threshold_in_overlap_ratio, ) - print("mapping: ", mapping) - for pred_id, gt_id in mapping: if fn[gt_id] == 0: continue + if enforce_label_match and not ( + gt_items["labels"][gt_id] == pred_items["labels"][pred_id] + ): + continue tp[pred_id] = 1 fp[pred_id] = 0 fn[gt_id] = 0 @@ -342,6 +353,7 @@ def detection_iou( prediction: List[CuboidPrediction], groundtruth: List[CuboidAnnotation], threshold_in_overlap_ratio: float, + enforce_label_match: bool, ) -> Tuple[np.ndarray, np.ndarray]: """ Calculates the 2D IOU and 3D IOU overlap between predictions and groundtruth. @@ -372,8 +384,13 @@ def detection_iou( ) for i, m in enumerate(iou_3d.max(axis=1)): + j = iou_3d[i].argmax() + if ( + enforce_label_match + and gt_items["labels"][i] != pred_items["labels"][j] + ): + continue if m >= threshold_in_overlap_ratio: - j = iou_3d[i].argmax() meter_3d.append(iou_3d[i, j]) meter_2d.append(iou_2d[i, j]) diff --git a/tests/metrics/test_cuboid_metrics.py b/tests/metrics/test_cuboid_metrics.py index 8c85063a..5fdf1636 100644 --- a/tests/metrics/test_cuboid_metrics.py +++ b/tests/metrics/test_cuboid_metrics.py @@ -1,5 +1,14 @@ import pytest +from nucleus.annotation import CuboidAnnotation, Point3D +from nucleus.metrics.base import ScalarResult +from nucleus.metrics.cuboid_metrics import ( + CuboidIOU, + CuboidPrecision, + CuboidRecall, +) +from nucleus.prediction import CuboidPrediction + try: import shapely except ModuleNotFoundError: @@ -8,4 +17,300 @@ allow_module_level=True, ) -# TODO(gunnar): Add Cuboid tests! +CAR_LABEL = "car" +PEDESTRIAN_LABEL = "pedestrian" + + +def test_cuboid_metrics_simple(): + # single item, perfect predictions + annotations = [ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + ] + predictions = [ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + ] + assert CuboidIOU().eval(annotations, predictions) == ScalarResult( + 1.0, len(annotations) + ), "Unexpected Cuboid IoU result" + assert CuboidPrecision().eval(annotations, predictions) == ScalarResult( + 1.0, len(annotations) + ), "Unexpected Cuboid Precision result" + assert CuboidRecall().eval(annotations, predictions) == ScalarResult( + 1.0, len(annotations) + ), "Unexpected Cuboid Recall result" + + +def test_cuboid_metrics_numerical_check(): + # single item, realistic predictions w/ matches and non-matches + annotations = [ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + ] + predictions = [ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + ] + cuboid_iou_result = CuboidIOU().eval(annotations, predictions) + cuboid_precision_result = CuboidPrecision().eval(annotations, predictions) + cuboid_recall_result = CuboidRecall().eval(annotations, predictions) + assert cuboid_iou_result == ScalarResult( + 0.4316, 2 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result}" + assert cuboid_precision_result == ScalarResult( + 2.0 / 3.0, len(predictions) + ), f"Unexpected Cuboid Precision result {cuboid_precision_result}" + assert cuboid_recall_result == ScalarResult( + 2.0 / 3.0, len(annotations) + ), f"Unexpected Cuboid Recall result {cuboid_recall_result}" + + +def test_cuboid_metrics_class_labels(): + annotations = [ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + ] + predictions = [ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=PEDESTRIAN_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + ] + + cuboid_iou_result1 = CuboidIOU().eval(annotations, predictions) + cuboid_precision_result1 = CuboidPrecision().eval(annotations, predictions) + cuboid_recall_result1 = CuboidRecall().eval(annotations, predictions) + assert cuboid_iou_result1 == ScalarResult( + 0.47928, 1 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result1}" + assert cuboid_precision_result1 == ScalarResult( + 1.0 / 3.0, len(predictions) + ), f"Unexpected Cuboid Precision result {cuboid_precision_result1}" + assert cuboid_recall_result1 == ScalarResult( + 1.0 / 3.0, len(annotations) + ), f"Unexpected Cuboid Recall result {cuboid_recall_result1}" + + cuboid_iou_result2 = CuboidIOU(enforce_label_match=False).eval( + annotations, predictions + ) + cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False).eval( + annotations, predictions + ) + cuboid_recall_result2 = CuboidRecall(enforce_label_match=False).eval( + annotations, predictions + ) + assert cuboid_iou_result2 == ScalarResult( + 0.4316, 2 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result2}" + assert cuboid_precision_result2 == ScalarResult( + 2.0 / 3.0, len(predictions) + ), f"Unexpected Cuboid Precision result {cuboid_precision_result2}" + assert cuboid_recall_result2 == ScalarResult( + 2.0 / 3.0, len(annotations) + ), f"Unexpected Cuboid Recall result {cuboid_recall_result2}" + + +def test_cuboid_metrics_multi_item(): + # single item, perfect precision + annotations = [ + # first item + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + # second item + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_B", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(30, 50, 120), + dimensions=Point3D(1, 2.5, 3), + yaw=0.0, + reference_id="item_B", + ), + ] + predictions = [ + # first item + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=PEDESTRIAN_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + # second item + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_B", + ), # false positive + ] + + cuboid_iou_result1 = CuboidIOU().eval(annotations, predictions) + cuboid_precision_result1 = CuboidPrecision().eval(annotations, predictions) + cuboid_recall_result1 = CuboidRecall().eval(annotations, predictions) + assert cuboid_iou_result1 == ScalarResult( + 0.47928, 1 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result1}" + assert cuboid_precision_result1 == ScalarResult( + 1.0 / len(predictions), len(predictions) + ), f"Unexpected Cuboid Precision result {cuboid_precision_result1}" + assert cuboid_recall_result1 == ScalarResult( + 1.0 / len(annotations), len(annotations) + ), f"Unexpected Cuboid Recall result {cuboid_recall_result1}" + + cuboid_iou_result2 = CuboidIOU(enforce_label_match=False).eval( + annotations, predictions + ) + cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False).eval( + annotations, predictions + ) + cuboid_recall_result2 = CuboidRecall(enforce_label_match=False).eval( + annotations, predictions + ) + assert cuboid_iou_result2 == ScalarResult( + 0.4316, 2 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result2}" + assert cuboid_precision_result2 == ScalarResult( + 2.0 / len(predictions), len(predictions) + ), f"Unexpected Cuboid Precision result {cuboid_precision_result2}" + assert cuboid_recall_result2 == ScalarResult( + 2.0 / len(annotations), len(annotations) + ), f"Unexpected Cuboid Recall result {cuboid_recall_result2}" From af6c06e3da94470c01e72ed5f393be849f7a45b2 Mon Sep 17 00:00:00 2001 From: Sasha Harrison Date: Sat, 23 Apr 2022 14:07:33 -0700 Subject: [PATCH 3/3] address PR comments --- nucleus/metrics/cuboid_metrics.py | 10 +- nucleus/metrics/cuboid_utils.py | 77 ++- .../available_eval_functions.py | 19 +- tests/metrics/test_cuboid_metrics.py | 498 ++++++++++-------- 4 files changed, 338 insertions(+), 266 deletions(-) diff --git a/nucleus/metrics/cuboid_metrics.py b/nucleus/metrics/cuboid_metrics.py index 8d23176f..a06a91ce 100644 --- a/nucleus/metrics/cuboid_metrics.py +++ b/nucleus/metrics/cuboid_metrics.py @@ -31,6 +31,7 @@ class CuboidMetric(Metric): def __init__( self, + iou_threshold: float, enforce_label_match: bool = False, confidence_threshold: Optional[float] = None, annotation_filters: Optional[ @@ -148,6 +149,7 @@ def __init__( self.iou_2d = iou_2d super().__init__( enforce_label_match=enforce_label_match, + iou_threshold=iou_threshold, confidence_threshold=confidence_threshold, annotation_filters=annotation_filters, prediction_filters=prediction_filters, @@ -162,15 +164,15 @@ def eval( predictions, annotations, self.iou_threshold, - self.enforce_label_match, ) + # If there are zero IoU matches, avg_iou defaults to value 0 if self.iou_2d: weight = len(iou_2d_metric) - avg_iou = iou_2d_metric.sum() / max(weight, sys.float_info.epsilon) + avg_iou = iou_2d_metric.sum() / weight if weight > 0 else 0.0 else: weight = len(iou_3d_metric) - avg_iou = iou_3d_metric.sum() / max(weight, sys.float_info.epsilon) + avg_iou = iou_3d_metric.sum() / weight if weight > 0 else 0.0 return ScalarResult(avg_iou, weight) @@ -219,6 +221,7 @@ def __init__( self.iou_threshold = iou_threshold super().__init__( enforce_label_match=enforce_label_match, + iou_threshold=iou_threshold, confidence_threshold=confidence_threshold, annotation_filters=annotation_filters, prediction_filters=prediction_filters, @@ -275,6 +278,7 @@ def __init__( self.iou_threshold = iou_threshold super().__init__( enforce_label_match=enforce_label_match, + iou_threshold=iou_threshold, confidence_threshold=confidence_threshold, annotation_filters=annotation_filters, prediction_filters=prediction_filters, diff --git a/nucleus/metrics/cuboid_utils.py b/nucleus/metrics/cuboid_utils.py index cdf720b3..24523e60 100644 --- a/nucleus/metrics/cuboid_utils.py +++ b/nucleus/metrics/cuboid_utils.py @@ -1,4 +1,5 @@ from functools import wraps +from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np @@ -35,6 +36,14 @@ def __init__(self, *args, **kwargs): from .base import ScalarResult +@dataclass +class ProcessedCuboids: + xyz: np.array + wlh: np.array + yaw: np.array + labels: List[str] + + def group_cuboids_by_label( annotations: List[CuboidAnnotation], predictions: List[CuboidPrediction], @@ -101,26 +110,25 @@ def wrapper( return wrapper -def process_dataitem(item_list, confidence_threshold=None): +def process_cuboids(item_list, confidence_threshold=None): if confidence_threshold: item_list = [ item for item in item_list if item.confidence >= confidence_threshold ] - processed_item = {} - processed_item["xyz"] = np.array( + xyz = np.array( [[ann.position.x, ann.position.y, ann.position.z] for ann in item_list] ) - processed_item["wlh"] = np.array( + wlh = np.array( [ [ann.dimensions.x, ann.dimensions.y, ann.dimensions.z] for ann in item_list ] ) - processed_item["yaw"] = np.array([ann.yaw for ann in item_list]) - processed_item["labels"] = [ann.label for ann in item_list] - return processed_item + yaw = np.array([ann.yaw for ann in item_list]) + labels = [ann.label for ann in item_list] + return ProcessedCuboids(xyz, wlh, yaw, labels) def compute_outer_iou( @@ -185,7 +193,6 @@ def compute_outer_iou( .intersection(polygon_1) .area ) - intersection = height_intersection * area_intersection area_0 = wlh_0[:, 0] * wlh_0[:, 1] area_1 = wlh_1[:, 0] * wlh_1[:, 1] @@ -303,33 +310,29 @@ def recall_precision( num_predicted = 0 num_instances = 0 - gt_items = process_dataitem(groundtruth) - pred_items = process_dataitem(prediction, confidence_threshold) + gt_items = process_cuboids(groundtruth) + pred_items = process_cuboids(prediction, confidence_threshold) - num_predicted += pred_items["xyz"].shape[0] - num_instances += gt_items["xyz"].shape[0] + num_predicted += pred_items.xyz.shape[0] + num_instances += gt_items.xyz.shape[0] - tp = np.zeros(pred_items["xyz"].shape[0]) - fp = np.ones(pred_items["xyz"].shape[0]) - fn = np.ones(gt_items["xyz"].shape[0]) + tp = np.zeros(pred_items.xyz.shape[0]) + fp = np.ones(pred_items.xyz.shape[0]) + fn = np.ones(gt_items.xyz.shape[0]) mapping = associate_cuboids_on_iou( - pred_items["xyz"], - pred_items["wlh"], - pred_items["yaw"] + np.pi / 2, - gt_items["xyz"], - gt_items["wlh"], - gt_items["yaw"] + np.pi / 2, + pred_items.xyz, + pred_items.wlh, + pred_items.yaw + np.pi / 2, + gt_items.xyz, + gt_items.wlh, + gt_items.yaw + np.pi / 2, threshold_in_overlap_ratio=threshold_in_overlap_ratio, ) for pred_id, gt_id in mapping: if fn[gt_id] == 0: continue - if enforce_label_match and not ( - gt_items["labels"][gt_id] == pred_items["labels"][pred_id] - ): - continue tp[pred_id] = 1 fp[pred_id] = 0 fn[gt_id] = 0 @@ -353,7 +356,6 @@ def detection_iou( prediction: List[CuboidPrediction], groundtruth: List[CuboidAnnotation], threshold_in_overlap_ratio: float, - enforce_label_match: bool, ) -> Tuple[np.ndarray, np.ndarray]: """ Calculates the 2D IOU and 3D IOU overlap between predictions and groundtruth. @@ -365,31 +367,26 @@ def detection_iou( :param threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. """ - gt_items = process_dataitem(groundtruth) - pred_items = process_dataitem(prediction) + gt_items = process_cuboids(groundtruth) + pred_items = process_cuboids(prediction) meter_2d = [] meter_3d = [] - if gt_items["xyz"].shape[0] == 0 or pred_items["xyz"].shape[0] == 0: + if gt_items.xyz.shape[0] == 0 or pred_items.xyz.shape[0] == 0: return np.array([0.0]), np.array([0.0]) iou_3d, iou_2d = compute_outer_iou( - gt_items["xyz"], - gt_items["wlh"], - gt_items["yaw"], - pred_items["xyz"], - pred_items["wlh"], - pred_items["yaw"], + gt_items.xyz, + gt_items.wlh, + gt_items.yaw, + pred_items.xyz, + pred_items.wlh, + pred_items.yaw, ) for i, m in enumerate(iou_3d.max(axis=1)): j = iou_3d[i].argmax() - if ( - enforce_label_match - and gt_items["labels"][i] != pred_items["labels"][j] - ): - continue if m >= threshold_in_overlap_ratio: meter_3d.append(iou_3d[i, j]) meter_2d.append(iou_2d[i, j]) diff --git a/nucleus/validate/eval_functions/available_eval_functions.py b/nucleus/validate/eval_functions/available_eval_functions.py index 80cb2dd7..06691477 100644 --- a/nucleus/validate/eval_functions/available_eval_functions.py +++ b/nucleus/validate/eval_functions/available_eval_functions.py @@ -10,12 +10,15 @@ from ..data_transfer_objects.eval_function import EvalFunctionEntry from ..errors import EvalFunctionNotAvailableError +DEFAULT_2D_IOU_THRESHOLD = 0.5 +DEFAULT_3D_IOU_THRESHOLD = 0.1 + class PolygonIOUConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = False, - iou_threshold: float = 0.0, + iou_threshold: float = DEFAULT_2D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -77,7 +80,7 @@ def expected_name(cls) -> str: class PolygonMAPConfig(EvalFunctionConfig): def __call__( self, - iou_threshold: float = 0.5, + iou_threshold: float = DEFAULT_2D_IOU_THRESHOLD, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] ] = None, @@ -135,7 +138,7 @@ class PolygonRecallConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = False, - iou_threshold: float = 0.5, + iou_threshold: float = DEFAULT_2D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -198,7 +201,7 @@ class PolygonPrecisionConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = False, - iou_threshold: float = 0.5, + iou_threshold: float = DEFAULT_2D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -261,7 +264,7 @@ class CuboidIOU2DConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, + iou_threshold: float = DEFAULT_2D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -315,7 +318,7 @@ class CuboidIOU3DConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, + iou_threshold: float = DEFAULT_3D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -370,7 +373,7 @@ class CuboidPrecisionConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, + iou_threshold: float = DEFAULT_3D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] @@ -424,7 +427,7 @@ class CuboidRecallConfig(EvalFunctionConfig): def __call__( self, enforce_label_match: bool = True, - iou_threshold: float = 0.0, + iou_threshold: float = DEFAULT_3D_IOU_THRESHOLD, confidence_threshold: float = 0.0, annotation_filters: Optional[ Union[ListOfOrAndFilters, ListOfAndFilters] diff --git a/tests/metrics/test_cuboid_metrics.py b/tests/metrics/test_cuboid_metrics.py index 5fdf1636..b951feeb 100644 --- a/tests/metrics/test_cuboid_metrics.py +++ b/tests/metrics/test_cuboid_metrics.py @@ -1,13 +1,13 @@ import pytest -from nucleus.annotation import CuboidAnnotation, Point3D +from nucleus.annotation import CuboidAnnotation, Point3D, AnnotationList from nucleus.metrics.base import ScalarResult from nucleus.metrics.cuboid_metrics import ( CuboidIOU, CuboidPrecision, CuboidRecall, ) -from nucleus.prediction import CuboidPrediction +from nucleus.prediction import CuboidPrediction, PredictionList try: import shapely @@ -19,104 +19,113 @@ CAR_LABEL = "car" PEDESTRIAN_LABEL = "pedestrian" +DEFAULT_90_DEGREE_ROTATION = 1.57079 def test_cuboid_metrics_simple(): # single item, perfect predictions - annotations = [ - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(1000, 1000, 1000), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - ] - predictions = [ - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(1000, 1000, 1000), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - ] - assert CuboidIOU().eval(annotations, predictions) == ScalarResult( + annotations = AnnotationList( + cuboid_annotations=[ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + ] + ) + predictions = PredictionList( + cuboid_predictions=[ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + ] + ) + assert CuboidIOU()(annotations, predictions) == ScalarResult( 1.0, len(annotations) ), "Unexpected Cuboid IoU result" - assert CuboidPrecision().eval(annotations, predictions) == ScalarResult( + assert CuboidPrecision()(annotations, predictions) == ScalarResult( 1.0, len(annotations) ), "Unexpected Cuboid Precision result" - assert CuboidRecall().eval(annotations, predictions) == ScalarResult( + assert CuboidRecall()(annotations, predictions) == ScalarResult( 1.0, len(annotations) ), "Unexpected Cuboid Recall result" def test_cuboid_metrics_numerical_check(): # single item, realistic predictions w/ matches and non-matches - annotations = [ - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(1000, 1000, 1000), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(-100, -100, -100), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), # false negative - ] - predictions = [ - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(1.0, 1.0, 1.0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(999, 999, 999), - dimensions=Point3D(8, 8, 6), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(250, 250, 250), - dimensions=Point3D(2, 2, 2), - yaw=0.0, - reference_id="item_A", - ), # false positive - ] - cuboid_iou_result = CuboidIOU().eval(annotations, predictions) - cuboid_precision_result = CuboidPrecision().eval(annotations, predictions) - cuboid_recall_result = CuboidRecall().eval(annotations, predictions) + annotations = AnnotationList( + cuboid_annotations=[ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + ] + ) + predictions = PredictionList( + cuboid_predictions=[ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + ] + ) + cuboid_iou_result = CuboidIOU()(annotations, predictions) + cuboid_precision_result = CuboidPrecision()(annotations, predictions) + cuboid_recall_result = CuboidRecall()(annotations, predictions) assert cuboid_iou_result == ScalarResult( 0.4316, 2 ), f"Unexpected Cuboid IoU result: {cuboid_iou_result}" @@ -128,57 +137,112 @@ def test_cuboid_metrics_numerical_check(): ), f"Unexpected Cuboid Recall result {cuboid_recall_result}" +def test_cuboid_metrics_numerical_check_rotation(): + annotations = AnnotationList( + cuboid_annotations=[ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 5, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(8, 4, 6), + yaw=DEFAULT_90_DEGREE_ROTATION, + reference_id="item_A", + ), + ] + ) + predictions = PredictionList( + cuboid_predictions=[ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 5, 5), + yaw=DEFAULT_90_DEGREE_ROTATION, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(8, 4, 6), + yaw=0.0, + reference_id="item_A", + ), + ] + ) + cuboid_iou_result = CuboidIOU()(annotations, predictions) + cuboid_precision_result = CuboidPrecision()(annotations, predictions) + cuboid_recall_result = CuboidRecall()(annotations, predictions) + assert cuboid_iou_result == ScalarResult( + 1.0 / 3.0, 2 + ), f"Unexpected Cuboid IoU result: {cuboid_iou_result}" + + def test_cuboid_metrics_class_labels(): - annotations = [ - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(1000, 1000, 1000), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(-100, -100, -100), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), # false negative - ] - predictions = [ - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(1.0, 1.0, 1.0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=PEDESTRIAN_LABEL, - position=Point3D(999, 999, 999), - dimensions=Point3D(8, 8, 6), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(250, 250, 250), - dimensions=Point3D(2, 2, 2), - yaw=0.0, - reference_id="item_A", - ), # false positive - ] + annotations = AnnotationList( + cuboid_annotations=[ + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + ] + ) + predictions = PredictionList( + cuboid_predictions=[ + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=PEDESTRIAN_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + ] + ) - cuboid_iou_result1 = CuboidIOU().eval(annotations, predictions) - cuboid_precision_result1 = CuboidPrecision().eval(annotations, predictions) - cuboid_recall_result1 = CuboidRecall().eval(annotations, predictions) + cuboid_iou_result1 = CuboidIOU(enforce_label_match=True)( + annotations, predictions + ) + cuboid_precision_result1 = CuboidPrecision(enforce_label_match=True)( + annotations, predictions + ) + cuboid_recall_result1 = CuboidRecall(enforce_label_match=True)( + annotations, predictions + ) assert cuboid_iou_result1 == ScalarResult( 0.47928, 1 ), f"Unexpected Cuboid IoU result: {cuboid_iou_result1}" @@ -189,13 +253,13 @@ def test_cuboid_metrics_class_labels(): 1.0 / 3.0, len(annotations) ), f"Unexpected Cuboid Recall result {cuboid_recall_result1}" - cuboid_iou_result2 = CuboidIOU(enforce_label_match=False).eval( + cuboid_iou_result2 = CuboidIOU(enforce_label_match=False)( annotations, predictions ) - cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False).eval( + cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False)( annotations, predictions ) - cuboid_recall_result2 = CuboidRecall(enforce_label_match=False).eval( + cuboid_recall_result2 = CuboidRecall(enforce_label_match=False)( annotations, predictions ) assert cuboid_iou_result2 == ScalarResult( @@ -211,81 +275,85 @@ def test_cuboid_metrics_class_labels(): def test_cuboid_metrics_multi_item(): # single item, perfect precision - annotations = [ - # first item - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(1000, 1000, 1000), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(-100, -100, -100), - dimensions=Point3D(10, 10, 10), - yaw=0.0, - reference_id="item_A", - ), # false negative - # second item - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(0, 0, 0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_B", - ), - CuboidAnnotation( - label=CAR_LABEL, - position=Point3D(30, 50, 120), - dimensions=Point3D(1, 2.5, 3), - yaw=0.0, - reference_id="item_B", - ), - ] - predictions = [ - # first item - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(1.0, 1.0, 1.0), - dimensions=Point3D(10, 10, 5), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=PEDESTRIAN_LABEL, - position=Point3D(999, 999, 999), - dimensions=Point3D(8, 8, 6), - yaw=0.0, - reference_id="item_A", - ), - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(250, 250, 250), - dimensions=Point3D(2, 2, 2), - yaw=0.0, - reference_id="item_A", - ), # false positive - # second item - CuboidPrediction( - label=CAR_LABEL, - position=Point3D(250, 250, 250), - dimensions=Point3D(2, 2, 2), - yaw=0.0, - reference_id="item_B", - ), # false positive - ] + annotations = AnnotationList( + cuboid_annotations=[ + # first item + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(1000, 1000, 1000), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(-100, -100, -100), + dimensions=Point3D(10, 10, 10), + yaw=0.0, + reference_id="item_A", + ), # false negative + # second item + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(0, 0, 0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_B", + ), + CuboidAnnotation( + label=CAR_LABEL, + position=Point3D(30, 50, 120), + dimensions=Point3D(1, 2.5, 3), + yaw=0.0, + reference_id="item_B", + ), + ] + ) + predictions = PredictionList( + cuboid_predictions=[ + # first item + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(1.0, 1.0, 1.0), + dimensions=Point3D(10, 10, 5), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=PEDESTRIAN_LABEL, + position=Point3D(999, 999, 999), + dimensions=Point3D(8, 8, 6), + yaw=0.0, + reference_id="item_A", + ), + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_A", + ), # false positive + # second item + CuboidPrediction( + label=CAR_LABEL, + position=Point3D(250, 250, 250), + dimensions=Point3D(2, 2, 2), + yaw=0.0, + reference_id="item_B", + ), # false positive + ] + ) - cuboid_iou_result1 = CuboidIOU().eval(annotations, predictions) - cuboid_precision_result1 = CuboidPrecision().eval(annotations, predictions) - cuboid_recall_result1 = CuboidRecall().eval(annotations, predictions) + cuboid_iou_result1 = CuboidIOU()(annotations, predictions) + cuboid_precision_result1 = CuboidPrecision()(annotations, predictions) + cuboid_recall_result1 = CuboidRecall()(annotations, predictions) assert cuboid_iou_result1 == ScalarResult( 0.47928, 1 ), f"Unexpected Cuboid IoU result: {cuboid_iou_result1}" @@ -296,13 +364,13 @@ def test_cuboid_metrics_multi_item(): 1.0 / len(annotations), len(annotations) ), f"Unexpected Cuboid Recall result {cuboid_recall_result1}" - cuboid_iou_result2 = CuboidIOU(enforce_label_match=False).eval( + cuboid_iou_result2 = CuboidIOU(enforce_label_match=False)( annotations, predictions ) - cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False).eval( + cuboid_precision_result2 = CuboidPrecision(enforce_label_match=False)( annotations, predictions ) - cuboid_recall_result2 = CuboidRecall(enforce_label_match=False).eval( + cuboid_recall_result2 = CuboidRecall(enforce_label_match=False)( annotations, predictions ) assert cuboid_iou_result2 == ScalarResult(