From 58d1c6786fbcb736b01a2020eabccd6b801fa7aa Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 10 Jan 2024 22:28:55 +0900 Subject: [PATCH 1/3] docs: update documents for result Signed-off-by: ktro2828 --- .../result/perception/__init__.py | 2 +- .../result/perception/frame_config.py | 40 ++--- .../result/perception/frame_result.py | 43 +++-- .../result/perception/object_result.py | 170 +++++++++--------- .../result/perception/pass_fail_result.py | 48 ++--- .../result/sensing/__init__.py | 2 +- .../result/sensing/frame_config.py | 12 +- .../result/sensing/frame_result.py | 17 +- .../result/sensing/object_result.py | 26 +-- 9 files changed, 158 insertions(+), 202 deletions(-) diff --git a/perception_eval/perception_eval/result/perception/__init__.py b/perception_eval/perception_eval/result/perception/__init__.py index 799ab305..caaa01dc 100644 --- a/perception_eval/perception_eval/result/perception/__init__.py +++ b/perception_eval/perception_eval/result/perception/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/result/perception/frame_config.py b/perception_eval/perception_eval/result/perception/frame_config.py index 2e310d80..8374d58a 100644 --- a/perception_eval/perception_eval/result/perception/frame_config.py +++ b/perception_eval/perception_eval/result/perception/frame_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,26 +30,7 @@ class PerceptionFrameConfig: - """ - Config class for critical object filter - - Attributes: - self.target_labels (List[str]): Target list - self.max_x_position_list (Optional[List[float]]): - Maximum x position threshold list for each label. Defaults to None. - self.max_y_position_list (Optional[List[float]]): - Maximum y position threshold list for each label. Defaults to None. - self.max_distance_list (Optional[List[float]]]): - Maximum distance threshold list for each label. Defaults to None. - self.min_distance_list (Optional[List[float]]): - Minimum distance threshold list for object. Defaults to None. - self.min_point_numbers (Optional[List[int]]): - Minimum number of points to be included in object's box. Defaults to None. - self.confidence_threshold_list (Optional[List[float]]): - The list of confidence threshold for each label. Defaults to None. - self.target_uuids (Optional[List[str]]): The list of target uuid. Defaults to None. - self.filtering_params: (Dict[str, Any]): The container of filtering parameters. - """ + """Frame level configuration for perception evaluation.""" def __init__( self, @@ -67,8 +48,9 @@ def __init__( ) -> None: """ Args: + ----- evaluator_config (PerceptionEvaluationConfig): Evaluation config - target_labels (List[str]): The list of target label. + target_labels (List[Union[str, LabelType]): The list of target labels. max_x_position_list (Optional[List[float]]): Maximum x position threshold list for each label. Defaults to None. max_y_position_list (Optional[List[float]]): @@ -82,6 +64,8 @@ def __init__( confidence_threshold_list (Optional[List[float]]): The list of confidence threshold for each label. Defaults to None. target_uuids (Optional[List[str]]): The list of target uuid. Defaults to None. + ignore_attributes (Optional[List[str]]): The list of attribute names to ignore. Defaults to None. + success_thresholds (Optional[List[float]]): The list of thresholds to be success. Defaults to None. """ self.evaluation_task = evaluator_config.evaluation_task if all([isinstance(label, str) for label in target_labels]): @@ -110,6 +94,18 @@ def __init__( @classmethod def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameConfig: + """Constructs from scene level configuration. + + This method should be used if frame level configuration was not specified by user. + + Args: + ----- + eval_cfg (PerceptionEvaluationConfig): Scene level configuration. + + Returns: + -------- + PerceptionFrameConfig: Frame level configuration. + """ if eval_cfg.evaluation_task.is_3d(): success_thresholds = eval_cfg.metrics_param.plane_distance_thresholds else: diff --git a/perception_eval/perception_eval/result/perception/frame_result.py b/perception_eval/perception_eval/result/perception/frame_result.py index 5b432d29..a1085c83 100644 --- a/perception_eval/perception_eval/result/perception/frame_result.py +++ b/perception_eval/perception_eval/result/perception/frame_result.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,25 +37,15 @@ class PerceptionFrameResult: - """The result for 1 frame (the pair of estimated objects and ground truth objects) - - Attributes: - object_results (List[PerceptionObjectResult]): Filtered object results to each estimated object. - frame_ground_truth (FrameGroundTruth): Filtered ground truth of frame. - frame_number (int): The file name of frame in the datasets. - unix_time (int): The unix time for frame [us]. - target_labels (List[AutowareLabel]): The list of target label. - metrics_score (MetricsScore): Metrics score results. - pass_fail_result (PassFailResult): Pass fail results. + """Frame level result for perception evaluation. Args: - object_results (List[PerceptionObjectResult]): The list of object result. - frame_ground_truth (FrameGroundTruth): FrameGroundTruth instance. - metrics_config (MetricsScoreConfig): Metrics config class. - critical_object_filter_config (CriticalObjectFilterConfig): Critical object filter config. - frame_pass_fail_config (PerceptionPassFailConfig): Frame pass fail config. - unix_time (int): The unix time for frame [us] - target_labels (List[AutowareLabel]): The list of target label. + ----- + unix_time (int): Unix timestamp. + frame_config (PerceptionFrameConfig): Frame level configuration. + metrics_config (MetricsScoreConfig): Configuration for metrics score. + object_results (List[PerceptionObjectResult]): List of object results. + frame_ground_truth (FrameGroundTruth): Ground truth at one frame. """ def __init__( @@ -99,10 +89,17 @@ def evaluate_frame( previous_result: Optional[PerceptionFrameResult] = None, ) -> None: """ - Evaluate a frame from the pair of estimated objects and ground truth objects + Evaluate one frame. + + Internally, this method runs the following. + 1. Calculate metrics score if corresponding metrics config exists. + 2. Evaluate pass fail result. + Args: - ros_critical_ground_truth_objects (List[ObjectType]): The list of Ground truth objects filtered by ROS node. - previous_result (Optional[PerceptionFrameResult]): The previous frame result. If None, set it as empty list []. Defaults to None. + ----- + critical_ground_truth_objects (List[ObjectType]): List of ground truth objects. + previous_result (Optional[PerceptionFrameResult]): If the previous result exist + it should be input to perform time series evaluation. Defaults to None. """ # Divide objects by label to dict object_results_dict = objects_filter.divide_objects(self.object_results, self.target_labels) @@ -140,10 +137,12 @@ def get_object_status(frame_results: List[PerceptionFrameResult]) -> List[Ground """Returns the number of TP/FP/TN/FN ratios per frame as tuple. Args: + ----- frame_results (List[PerceptionFrameResult]): List of frame results. Returns: - List[GroundTruthStatus]: Sequence of matching status ratios for each GT. + -------- + List[GroundTruthStatus]: List of matching status ratios for each GT. """ status_infos: List[GroundTruthStatus] = [] for frame_result in frame_results: diff --git a/perception_eval/perception_eval/result/perception/object_result.py b/perception_eval/perception_eval/result/perception/object_result.py index 72f8870e..0e61199b 100644 --- a/perception_eval/perception_eval/result/perception/object_result.py +++ b/perception_eval/perception_eval/result/perception/object_result.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,17 +42,15 @@ class PerceptionObjectResult: - """Object result class for perception evaluation. + """Object level result for perception evaluation. - Attributes: + This class consists of the pair of estimated and ground truth objects. + In case of ground truth object is `None`, it means the estimated object is FP regardless of the matching score. + + Args: + ----- estimated_object (ObjectType): Estimated object. - ground_truth_object (Optional[ObjectType]): Ground truth object. - is_label_correct (bool): Whether the label both of `estimated_object` and `ground_truth_object` are same. - center_distance (Optional[CenterDistanceMatching]): CenterDistanceMatching instance. - plane_distance (Optional[PlaneDistanceMatching]): PlaneDistanceMatching instance. - In 2D evaluation, this is None. - iou_2d (IOU2dMatching): IOU2dMatching instance. - iou_3d (IOU3dMatching): IOU3dMatching instance. In 2D evaluation, this is None. + ground_truth_object (Optional[ObjectType]): Ground truth object. `None` is allowed. """ def __init__( @@ -60,13 +58,6 @@ def __init__( estimated_object: ObjectType, ground_truth_object: Optional[ObjectType], ) -> None: - """[summary] - Evaluation result for an object estimated object. - - Args: - estimated_object (ObjectType): The estimated object by inference like CenterPoint - ground_truth_objects (Optional[ObjectType]): The list of Ground truth objects - """ if ground_truth_object is not None: assert type(estimated_object) is type( ground_truth_object @@ -108,11 +99,22 @@ def get_status( ) -> Tuple[MatchingStatus, Optional[MatchingStatus]]: """Returns matching status both of estimation and GT as `tuple`. + In case of the ground truth is `None`, the status of GT is `None` and estimation is FP. + Otherwise, the status is determined by the following rules. + 1. The matching score is better than the threshold: + a. Evaluation task is FP validation: (FP, TN) + b. Otherwise: (TP, TP) + 2. Otherwise: + a. Evaluation task is FP validation: (FP, FP) + b. Otherwise: (FP, FN) + Args: - matching_mode (MatchingMode): Matching policy. + ----- + matching_mode (MatchingMode): Matching mode. matching_threshold (float): Matching threshold. Returns: + -------- Tuple[MatchingStatus, Optional[MatchingStatus]]: Matching status of estimation and GT. """ if self.ground_truth_object is None: @@ -131,24 +133,18 @@ def get_status( else (MatchingStatus.FP, MatchingStatus.FN) ) - def is_result_correct( - self, - matching_mode: MatchingMode, - matching_threshold: Optional[float], - ) -> bool: - """The function judging whether the result is target or not. - Return `False`, if label of GT is "FP" and matching. + def is_result_correct(self, matching_mode: MatchingMode, matching_threshold: Optional[float]) -> bool: + """The method judging whether the result is target or not. + + Returns `False`, if label of GT is "FP" and matching. Args: - matching_mode (MatchingMode): - The matching mode to evaluate. - matching_threshold (float): - The matching threshold to evaluate. - For example, if matching_mode = IOU3d and matching_threshold = 0.5, - and IoU of the object is higher than "matching_threshold", - this function appends to return objects. + ----- + matching_mode (MatchingMode): The matching mode to evaluate. + matching_threshold (float): The matching threshold to evaluate. Returns: + -------- bool: If label is correct and satisfy matching threshold, return True """ if self.ground_truth_object is None: @@ -167,15 +163,18 @@ def is_result_correct( return not is_matching if self.ground_truth_object.semantic_label.is_fp() else is_matching def get_matching(self, matching_mode: MatchingMode) -> Optional[MatchingMethod]: - """Get MatchingMethod instance with corresponding MatchingMode. + """Returns the matching module for corresponding matching mode. - Args: - matching_mode (MatchingMode): MatchingMode instance. + Note that, + - 2D detection/tracking tasks, input `PLANEDISTANCE` and `IOU3D` return `None`. + - 2D classification task, all return `None`. - Raises: - NotImplementedError: When unexpected MatchingMode is input. + Args: + ----- + matching_mode (MatchingMode): The matching mode. Returns: + -------- Optional[MatchingMethod]: Corresponding MatchingMethods instance. """ if matching_mode == MatchingMode.CENTERDISTANCE: @@ -187,16 +186,17 @@ def get_matching(self, matching_mode: MatchingMode) -> Optional[MatchingMethod]: elif matching_mode == MatchingMode.IOU3D: return self.iou_3d else: - raise NotImplementedError + raise NotImplementedError(f"Unexpected matching mode: {matching_mode}") @property def distance_error_bev(self) -> Optional[float]: - """Get error center distance between ground truth and estimated object in BEV space. + """Returns the center distance error in BEV coords between estimation and GT. - If `self.ground_truth_object=None`, returns None. + If GT is `None` returns `None`. Returns: - Optional[float]: error center distance between ground truth and estimated object. + -------- + Optional[float]: Calculated error [m]. """ if self.ground_truth_object is None: return None @@ -204,12 +204,13 @@ def distance_error_bev(self) -> Optional[float]: @property def distance_error(self) -> Optional[float]: - """Get error center distance between ground truth and estimated object. + """Returns the center distance error in BEV coords between estimation and GT. - If `self.ground_truth_object=None`, returns None. + If GT is `None` returns `None`. Returns: - Optional[float]: error center distance between ground truth and estimated object. + -------- + Optional[float]: Calculated error [m]. """ if self.ground_truth_object is None: return None @@ -217,41 +218,36 @@ def distance_error(self) -> Optional[float]: @property def position_error(self) -> Optional[Tuple[float, float, float]]: - """Get the position error vector from estimated to ground truth object. + """Returns the position error vector from estimated to ground truth object. - If `self.ground_truth_object=None`, returns None. + If GT is `None` returns `None`. Returns: - float: x-axis position error[m]. - float: y-axis position error[m]. - float: z-axis position error[m]. + -------- + Optional[Tuple[float, float, float]]: Errors ordering (x, y, z) [m]. """ return self.estimated_object.get_position_error(self.ground_truth_object) @property def heading_error(self) -> Optional[Tuple[float, float, float]]: - """Get the heading error vector from estimated to ground truth object. + """Returns the heading error vector from estimated to ground truth object. - If `self.ground_truth_object=None`, returns None. + If GT is `None` returns `None`. Returns: - float: Roll error, in [-pi, pi]. - float: Pitch error, in [-pi, pi]. - float: Yaw error, in [-pi, pi]. + Optional[Tuple[float, float, float]]: Errors ordering (roll, pitch, yaw) [rad] in `[-pi, pi]`. """ return self.estimated_object.get_heading_error(self.ground_truth_object) @property def velocity_error(self) -> Optional[Tuple[float, float, float]]: - """Get the velocity error vector from estimated to ground truth object. + """Returns the velocity error vector from estimated to ground truth object. - If `self.ground_truth_object=None`, returns None. + If GT is `None` returns `None`. Also, velocity of estimated or ground truth object is None, returns None too. Returns: - float: x-axis velocity error[m/s]. - float: y-axis velocity error[m/s]. - float: z-axis velocity error[m/s]. + Optional[Tuple[float, float, float]]: Errors ordering (x, y, z) [m/s]. """ return self.estimated_object.get_velocity_error(self.ground_truth_object) @@ -272,7 +268,7 @@ def get_object_results( target_labels: Optional[List[LabelType]] = None, matching_policy: MatchingPolicy = MatchingPolicy(), ) -> List[PerceptionObjectResult]: - """Returns list of DynamicObjectWithPerceptionResult. + """Returns list of object results. For classification, matching objects their uuid. Otherwise, matching them depending on their center distance by default. @@ -281,14 +277,16 @@ def get_object_results( Otherwise, they all are FP. Args: + ----- evaluation_task (EvaluationTask): Evaluation task. - estimated_objects (List[ObjectType]): Estimated objects list. - ground_truth_objects (List[ObjectType]): Ground truth objects list. - matching_mode (MatchingMode): MatchingMode instance. - matchable_thresholds (Optional[List[float]]): Thresholds to be + estimated_objects (List[ObjectType]): List of estimated objects. + ground_truth_objects (List[ObjectType]): List of ground truth objects. + target_labels (Optional[List[LabelType]]): List of target labels to match. Defaults to None. + matching_policy (MatchingPolicy): Policy of matching. Returns: - object_results (List[DynamicObjectWithPerceptionResult]): Object results list. + -------- + object_results (List[PerceptionObjectResult]): List of object results. """ # There is no estimated object (= all FN) if not estimated_objects: @@ -346,16 +344,18 @@ def _get_object_results_with_id( estimated_objects: List[DynamicObject2D], ground_truth_objects: List[DynamicObject2D], ) -> List[PerceptionObjectResult]: - """Returns the list of DynamicObjectWithPerceptionResult considering their uuids. + """Returns the list of object results considering their uuids. This function is used in 2D classification evaluation. Args: - estimated_objects (List[DynamicObject2D]): Estimated objects list. - ground_truth_objects (List[DynamicObject2D]): Ground truth objects list. + ----- + estimated_objects (List[DynamicObject2D]): List of estimated objects. + ground_truth_objects (List[DynamicObject2D]): List of ground truth objects. Returns: - object_results (List[PerceptionObjectResult]): Object results list. + -------- + object_results (List[PerceptionObjectResult]): List of object results. """ object_results: List[PerceptionObjectResult] = [] estimated_objects_ = estimated_objects.copy() @@ -387,26 +387,18 @@ def _get_object_results_with_id( return object_results -def _get_fp_object_results( - estimated_objects: List[ObjectType], -) -> List[PerceptionObjectResult]: - """Returns the list of DynamicObjectWithPerceptionResult that have no ground truth. +def _get_fp_object_results(estimated_objects: List[ObjectType]) -> List[PerceptionObjectResult]: + """Returns the list of FP object results that have no ground truth. Args: - estimated_objects (List[ObjectType]): Estimated objects list. + ----- + estimated_objects (List[ObjectType]): List of object results. Returns: - object_results (List[DynamicObjectWithPerceptionResult]): FP object results list. + -------- + List[PerceptionObjectResult]: List of FP object results. """ - object_results: List[PerceptionObjectResult] = [] - for est_obj_ in estimated_objects: - object_result_ = PerceptionObjectResult( - estimated_object=est_obj_, - ground_truth_object=None, - ) - object_results.append(object_result_) - - return object_results + return [PerceptionObjectResult(est, None) for est in estimated_objects] def _get_score_table( @@ -418,17 +410,15 @@ def _get_score_table( """Returns score table, in shape (num_estimation, num_ground_truth). Args: + ----- estimated_objects (List[ObjectType]): Estimated objects list. ground_truth_objects (List[ObjectType]): Ground truth objects list. - allow_matching_unknown (bool): Indicates whether allow to match with unknown label. - matching_method_module (Callable): MatchingMethod instance. - target_labels (Optional[List[LabelType]]): Target labels to be evaluated. - matching_thresholds (Optional[List[float]]): List of thresholds + target_labels (List[LabelType]): Target labels to be evaluated. + matching_policy (MatchingPolicy): Policy of matching. Returns: - score_table (numpy.ndarray): in shape (num_estimation, num_ground_truth). + np.ndarray: Array in shape (num_estimation, num_ground_truth). """ - # fill matching score table, in shape (NumEst, NumGT) num_row: int = len(estimated_objects) num_col: int = len(ground_truth_objects) score_table = np.full((num_row, num_col), np.nan) diff --git a/perception_eval/perception_eval/result/perception/pass_fail_result.py b/perception_eval/perception_eval/result/perception/pass_fail_result.py index 4e0b68f4..f1712be6 100644 --- a/perception_eval/perception_eval/result/perception/pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/pass_fail_result.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ from typing import Optional from typing import Tuple from typing import TYPE_CHECKING -import warnings import numpy as np from perception_eval.matching import MatchingMode @@ -32,22 +31,14 @@ class PassFailResult: - """Class for keeping TP/FP/TN/FP object results and GT objects for critical GT objects. - - Attributes: - frame_config (PerceptionFrameConfig): Critical object filter config. - tn_objects (List[ObjectType]): TN ground truth objects list. - fn_objects (List[ObjectType]): FN ground truth objects list. - fp_object_results (List[PerceptionObjectResult]): FP object results list. - tp_object_results (List[PerceptionObjectResult]): TP object results list. + """Class to determine pass fail. Args: + ----- unix_time (int): UNIX timestamp. frame_number (int): The Number of frame. - critical_object_filter_config (CriticalObjectFilterConfig): Critical object filter config. - frame_id (str): `base_link` or `map`. - ego2map (Optional[numpy.ndarray]): Array of 4x4 matrix to transform coordinates from ego to map. - Defaults to None. + frame_config (PerceptionFrameConfig): Frame level configuration. + ego2map (Optional[np.ndarray]): 4x4 matrix to transform coordinates from ego to map. Defaults to None. """ def __init__( @@ -76,8 +67,9 @@ def evaluate( """Evaluate object results' pass fail. Args: - object_results (List[PerceptionObjectResult]): Object results list. - ros_critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects + ----- + object_results (List[PerceptionObjectResult]): List of object results. + critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects must be evaluated at current frame. """ critical_ground_truth_objects = objects_filter.filter_objects( @@ -105,6 +97,7 @@ def get_num_success(self) -> int: """Returns the number of success. Returns: + -------- int: Number of success. """ return len(self.tp_object_results) + len(self.tn_objects) @@ -113,28 +106,17 @@ def get_num_fail(self) -> int: """Returns the number of fail. Returns: + -------- int: Number of fail. """ return len(self.fp_object_results) + len(self.fn_objects) - def get_fail_object_num(self) -> int: - """Get the number of fail objects. - - Returns: - int: Number of fail objects. - """ - warnings.warn( - "`get_fail_object_num()` is removed in next minor update, please use `get_num_fail()`", - DeprecationWarning, - ) - return len(self.fn_objects) + len(self.fp_object_results) - def __get_positive_object_results( self, object_results: List[PerceptionObjectResult], critical_ground_truth_objects: List[ObjectType], ) -> Tuple[List[PerceptionObjectResult], List[PerceptionObjectResult]]: - """Get TP and FP object results list from `object_results`. + """Returns list of TP and FP results from object results. Args: object_results (List[PerceptionObjectResult]): Object results list. @@ -142,8 +124,8 @@ def __get_positive_object_results( must be evaluated at current frame. Returns: - List[PerceptionObjectResult]: TP object results. - List[PerceptionObjectResult]: FP object results. + -------- + Tuple[List[PerceptionObjectResult], List[PerceptionObjectResult]]: TP and FP object results. """ tp_object_results, fp_object_results = objects_filter.get_positive_objects( object_results=object_results, @@ -155,12 +137,12 @@ def __get_positive_object_results( ) # filter by critical_ground_truth_objects - tp_critical_results: List[PerceptionObjectResult] = [ + tp_critical_results = [ tp_result for tp_result in tp_object_results if tp_result.ground_truth_object in critical_ground_truth_objects ] - fp_critical_results: List[PerceptionObjectResult] = [ + fp_critical_results = [ fp_result for fp_result in fp_object_results if fp_result.ground_truth_object in critical_ground_truth_objects diff --git a/perception_eval/perception_eval/result/sensing/__init__.py b/perception_eval/perception_eval/result/sensing/__init__.py index 025f5eb8..5bc65556 100644 --- a/perception_eval/perception_eval/result/sensing/__init__.py +++ b/perception_eval/perception_eval/result/sensing/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/result/sensing/frame_config.py b/perception_eval/perception_eval/result/sensing/frame_config.py index 454a49e1..f150d093 100644 --- a/perception_eval/perception_eval/result/sensing/frame_config.py +++ b/perception_eval/perception_eval/result/sensing/frame_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,14 +17,10 @@ class SensingFrameConfig: - """Config class for sensing detection evaluation per frame. - - Attributes: - box_scale_0m (float): Scale factor for bounding box at 0m. - box_scale_100m (float): Scale factor for bounding box at 100m. - min_points_threshold (int): The minimum number of points should be detected in bounding box. + """Frame level configuration for sensing evaluation. Args: + ----- target_uuids (Optional[List[str]]): List of target uuids to be filtered. box_scale_0m (float): Scale factor for bounding box at 0m. box_scale_100m (float): Scale factor for bounding box at 100m. @@ -52,9 +48,11 @@ def get_scale_factor(self, distance: float) -> float: scale = ((box_scale_100m - box_scale_0m) / (100 - 0)) * (distance - 0) + box_scale_0m Args: + ----- distance (float): The distance from vehicle to target bounding box. Returns: + -------- float: Calculated scale factor. """ return self.scale_slope_ * distance + self.box_scale_0m diff --git a/perception_eval/perception_eval/result/sensing/frame_result.py b/perception_eval/perception_eval/result/sensing/frame_result.py index 1557af01..fc72e48b 100644 --- a/perception_eval/perception_eval/result/sensing/frame_result.py +++ b/perception_eval/perception_eval/result/sensing/frame_result.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,17 +44,13 @@ class SensingFrameResult: in non-detection area. Args: - sensing_frame_config (SensingFrameConfig): The configuration of sensing evaluation. + ----- + sensing_frame_config (SensingFrameConfig): Frame level configuration for sensing evaluation. unix_time (int): Unix time [us]. - frame_name (str): Frame name in string. + frame_number (int): Number of frame. """ - def __init__( - self, - sensing_frame_config: SensingFrameConfig, - unix_time: int, - frame_number: int, - ) -> None: + def __init__(self, sensing_frame_config: SensingFrameConfig, unix_time: int, frame_number: int) -> None: # Config self.sensing_frame_config = sensing_frame_config @@ -77,6 +73,7 @@ def evaluate_frame( """Evaluate each object at current frame. Args: + ----- ground_truth_objects (list[DynamicObject]): Ground truth objects list. pointcloud_for_detection (numpy.ndarray): Array of pointcloud in detection area. pointcloud_for_non_detection (List[numpy.ndarray]): List of pointcloud array in non-detection area. @@ -101,6 +98,7 @@ def _evaluate_pointcloud_for_detection( If the object is occluded, the result is appended to `self.detection_warning_results`. Args: + ----- ground_truth_objects (list[DynamicObject]): Ground truth objects list. pointcloud_for_detection (numpy.ndarray): Array of pointcloud in detection area. """ @@ -136,6 +134,7 @@ def _evaluate_pointcloud_for_non_detection( `self.pointcloud_failed_non_detection`. Args: + ----- ground_truth_objects (list[DynamicObject]): Ground truth objects list. pointcloud_for_non_detection (list[numpy.ndarray]): List of pointcloud array in non-detection area. """ diff --git a/perception_eval/perception_eval/result/sensing/object_result.py b/perception_eval/perception_eval/result/sensing/object_result.py index 86be57a8..de451383 100644 --- a/perception_eval/perception_eval/result/sensing/object_result.py +++ b/perception_eval/perception_eval/result/sensing/object_result.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,15 +20,13 @@ class SensingObjectResult: - """The class to evaluate sensing result for dynamic object. + """Object level result for sensing evaluation. - Attributes: - self.ground_truth_object (DynamicObject): The target DynamicObject. - self.inside_pointcloud (numpy.ndarray): The array of pointcloud in bounding box. - self.inside_pointcloud_num (int): The number of pointcloud in bounding box. - self.is_detected (bool): The boolean flag indicates whether pointcloud is in bounding box. - self.nearest_point (np.ndarray): The nearest point from base_link. - self.is_occluded (bool): Whether the object is occluded. + Args: + ground_truth_object (DynamicObject): Ground truth object. + pointcloud (numpy.ndarray): Array of pointcloud after removing ground. + scale_factor (float): Scale factor for bounding box. + min_points_threshold (int): The minimum number of points should be detected in bounding box. """ def __init__( @@ -38,13 +36,6 @@ def __init__( scale_factor: float, min_points_threshold: int, ) -> None: - """[summary] - Args: - ground_truth_object (DynamicObject): Ground truth object. - pointcloud (numpy.ndarray): Array of pointcloud after removing ground. - scale_factor (float): Scale factor for bounding box. - min_points_threshold (int): The minimum number of points should be detected in bounding box. - """ self.ground_truth_object: DynamicObject = ground_truth_object # Evaluate @@ -58,10 +49,11 @@ def __init__( self.is_occluded: bool = ground_truth_object.visibility == Visibility.NONE def _get_nearest_point(self) -> Optional[np.ndarray]: - """[summary] + """ Returns the nearest point from base_link. The pointcloud must be base_link coords. Returns: + -------- Optional[np.ndarray]: The nearest point included in the object's bbox, in shape (3,). If there is no point in bbox, returns None. """ From 772548c102e5149fb55e636be6262c3c49837df5 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 10 Jan 2024 22:38:58 +0900 Subject: [PATCH 2/3] docs: update documents for object Signed-off-by: ktro2828 --- .../perception_eval/object/__init__.py | 28 ++++++++----- .../perception_eval/object/object2d.py | 33 ++++----------- .../perception_eval/object/object3d.py | 41 ++++++++++++++----- 3 files changed, 56 insertions(+), 46 deletions(-) diff --git a/perception_eval/perception_eval/object/__init__.py b/perception_eval/perception_eval/object/__init__.py index 595542c0..641784f2 100644 --- a/perception_eval/perception_eval/object/__init__.py +++ b/perception_eval/perception_eval/object/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,13 +36,16 @@ def distance_objects(object_1: ObjectType, object_2: ObjectType) -> float: - """ - Calculate the 3D/2D center distance between two objects. + """Calculates the 3D/2D center distance between two objects. Args: - object_1 (ObjectType): An object - object_2 (ObjectType): An object - Returns: float: The center distance between object_1 and object_2. + ----- + object_1 (ObjectType): An object. + object_2 (ObjectType): An object. + + Returns: + -------- + float: The center distance between object_1 and object_2. """ if type(object_1) is not type(object_2): raise TypeError(f"objects' type must be same, but got {type(object_1) and {type(object_2)}}") @@ -53,13 +56,16 @@ def distance_objects(object_1: ObjectType, object_2: ObjectType) -> float: def distance_objects_bev(object_1: DynamicObject, object_2: DynamicObject) -> float: - """ - Calculate the BEV 2d center distance between two objects. + """Calculates the BEV 2d center distance between two objects. Args: - object_1 (DynamicObject): An object - object_2 (DynamicObject): An object - Returns: float: The 2d center distance from object_1 to object_2. + ----- + object_1 (DynamicObject): An object. + object_2 (DynamicObject): An object. + + Returns: + -------- + float: The 2d center distance from object_1 to object_2. """ assert isinstance(object_1, DynamicObject) and isinstance(object_2, DynamicObject) return distance_points_bev(object_1.state.position, object_2.state.position) diff --git a/perception_eval/perception_eval/object/object2d.py b/perception_eval/perception_eval/object/object2d.py index c211c7b2..3fe08619 100644 --- a/perception_eval/perception_eval/object/object2d.py +++ b/perception_eval/perception_eval/object/object2d.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,22 +34,12 @@ class Roi: TODO: - Support multi roi input for Blinker and BrakeLamp objects. - Attributes: - offset (Tuple[int, int]): Top-left pixels from (0, 0), (x, y) order. - size (Tuple[int, int]): Size of ROI, (width, height) order. - center (Tuple[int, int]): Center of ROI, (x, y) order. - height (int): Height of ROI. - width (int): Width of ROI. - area (int): Area of ROI. - Args: + ----- roi (Tuple[int, int, int, int]): (xmin, ymin, width, height) of ROI. """ - def __init__( - self, - roi: Tuple[int, int, int, int], - ) -> None: + def __init__(self, roi: Tuple[int, int, int, int]) -> None: if len(roi) != 4: raise ValueError("`roi` must be 4 length int array.") @@ -109,17 +99,9 @@ def corners(self) -> np.ndarray: class DynamicObject2D: """Dynamic object class for 2D object. - Attributes: - unix_time (int): Unix time[us]. - frame_id (FrameID): FrameID instance, where 2D objects are with respect, related to CAM_**. - semantic_score (float): Object's confidence [0, 1]. - semantic_label (Label): Object's Label. - roi (Optional[Roi]): ROI in image. For classification, None is OK. Defaults to None. - uuid (Optional[str]): Unique ID. For traffic light objects, set lane ID. Defaults to None. - visibility (Optional[Visibility]): Visibility status. Defaults to None. - Args: - unix_time (int): Unix time[us]. + ----- + unix_time (int): Unix time [us]. frame_id (FrameID): FrameID instance, where 2D objects are with respect, related to CAM_**. semantic_score (float): Object's confidence [0, 1]. semantic_label (Label): Object's Label. @@ -152,7 +134,8 @@ def get_corners(self) -> np.ndarray: """Returns the corners of bounding box in pixel. Returns: - numpy.ndarray: (top_left, top_right, bottom_right, bottom_left), in shape (4, 2). + -------- + np.ndarray: (top_left, top_right, bottom_right, bottom_left), in shape (4, 2). """ assert self.roi is not None, "self.roi is None." return self.roi.corners @@ -161,6 +144,7 @@ def get_area(self) -> int: """Returns the area of bounding box in pixel. Returns: + -------- int: Area of bounding box[px]. """ assert self.roi is not None, "self.roi is None." @@ -170,6 +154,7 @@ def get_polygon(self) -> Polygon: """Returns the corners as polygon. Returns: + -------- Polygon: Corners as Polygon. ((x0, y0), ..., (x0, y0)) """ assert self.roi is not None, "self.roi is None." diff --git a/perception_eval/perception_eval/object/object3d.py b/perception_eval/perception_eval/object/object3d.py index 5f43c2db..1f541151 100644 --- a/perception_eval/perception_eval/object/object3d.py +++ b/perception_eval/perception_eval/object/object3d.py @@ -38,17 +38,12 @@ class ObjectState: """Object state class. - Attributes: - position (Tuple[float, float, float]): (center_x, center_y, center_z)[m]. - orientation (Quaternion) : Quaternion instance. - size (Tuple[float, float, float]): Bounding box size, (wx, wy, wz)[m]. - velocity (Optional[Tuple[float, float, float]]): Velocity, (vx, vy, vz)[m/s]. - Args: - position (Tuple[float, float, float]): (center_x, center_y, center_z)[m]. + ----- + position (Tuple[float, float, float]): (center_x, center_y, center_z) [m]. orientation (Quaternion) : Quaternion instance. shape (Shape): Shape instance. - velocity (Optional[Tuple[float, float, float]]): Velocity, (vx, vy, vz)[m/s]. + velocity (Optional[Tuple[float, float, float]]): Velocity, (vx, vy, vz) [m/s]. """ def __init__( @@ -80,6 +75,7 @@ class DynamicObject: """Dynamic object class for 3D object. Attributes: + ----------- unix_time (int) : Unix time [us]. frame_id (FrameID): FrameID instance, where 3D objects are with respect, BASE_LINK or MAP. @@ -102,6 +98,7 @@ class DynamicObject: visibility (Optional[Visibility]): Visibility status. Defaults to None. Args: + ----- unix_time (int): Unix time [us]. frame_id (FrameID): FrameID instance, where 3D objects are with respect, BASE_LINK or MAP. position (Tuple[float, float, float]): (center_x, center_y, center_z)[m]. @@ -198,9 +195,11 @@ def __eq__(self, other: Optional[DynamicObject]) -> bool: it causes error. Args: + ----- other (Optional[DynamicObject]): Another object. Returns: + -------- bool """ if other is None: @@ -217,10 +216,12 @@ def get_distance(self, ego2map: Optional[np.ndarray] = None) -> float: """Get the 3d distance to the object from ego vehicle in bird eye view. Args: - ego2map (Optional[numpy.ndarray]):4x4 Transform matrix + ----- + ego2map (Optional[numpy.ndarray]): 4x4 Transform matrix from base_link coordinate system to map coordinate system. Returns: + -------- float: The 3d distance to the object from ego vehicle in bird eye view. """ if self.frame_id == FrameID.BASE_LINK: @@ -236,10 +237,12 @@ def get_distance_bev(self, ego2map: Optional[np.ndarray] = None) -> float: """Get the 2d distance to the object from ego vehicle in bird eye view. Args: + ----- ego2map (Optional[numpy.ndarray]):4x4 Transform matrix from base_link coordinate system to map coordinate system. Returns: + -------- float: The 2d distance to the object from ego vehicle in bird eye view. """ if self.frame_id == FrameID.BASE_LINK: @@ -255,10 +258,12 @@ def get_heading_bev(self, ego2map: Optional[np.ndarray] = None) -> float: """Get the object heading from ego vehicle in bird eye view. Args: + ----- ego2map (Optional[numpy.ndarray]):4x4 Transform matrix. from base_link coordinate system to map coordinate system. Returns: + -------- float: The heading (radian) """ if self.frame_id == FrameID.MAP: @@ -280,9 +285,11 @@ def get_corners(self, scale: float = 1.0) -> np.ndarray: """Get the bounding box corners. Args: + ----- scale (float): Scale factor to scale the corners. Defaults to 1.0. Returns: + -------- corners (numpy.ndarray): Objects corners array. """ # NOTE: This is with respect to base_link or map coordinate system. @@ -299,17 +306,18 @@ def get_corners(self, scale: float = 1.0) -> np.ndarray: return corners def get_footprint(self, scale: float = 1.0) -> Polygon: - """[summary] - Get footprint polygon from an object with respect ot base_link or map coordinate system. + """Get footprint polygon from an object with respect ot base_link or map coordinate system. Args: scale (float): Scale factor for footprint. Defaults to 1.0. Returns: + -------- Polygon: The footprint polygon of object with respect to base_link or map coordinate system. It consists of 4 corner 2d position of the object and start and end points are same point. ((x0, y0, 0), (x1, y1, 0), (x2, y2, 0), (x3, y3, 0), (x0, y0, 0)) Notes: + ------ center_position: (xc, yc) vector_center_to_corners[0]: (x0 - xc, y0 - yc) """ @@ -333,6 +341,7 @@ def get_position_error( """Get the position error between myself and other. If other is None, returns None. Returns: + -------- err_x (float): x-axis position error[m]. err_y (float): y-axis position error[m]. err_z (float): z-axis position error[m]. @@ -351,6 +360,7 @@ def get_heading_error( """Get the heading error between myself and other. If other is None, returns None. Returns: + -------- err_x (float): Roll error[rad], in [-pi, pi]. err_y (float): Pitch error[rad], in [-pi, pi]. err_z (float): Yaw error[rad], in [-pi, pi]. @@ -383,6 +393,7 @@ def get_velocity_error( If other is None, returns None. Also, velocity of myself or other is None, returns None too. Returns: + -------- err_vx (float): x-axis velocity error[m/s]. err_vy (float): y-axis velocity error[m/s]. err_vz (float): z-axis velocity error[m/s]. @@ -403,6 +414,7 @@ def get_area_bev(self) -> float: """Get area of object BEV. Returns: + -------- float: Area of footprint. """ return self.state.footprint.area @@ -411,6 +423,7 @@ def get_volume(self) -> float: """Get volume of bounding box. Returns: + -------- float: Box volume. """ return self.get_area_bev() * self.state.size[2] @@ -427,11 +440,13 @@ def crop_pointcloud( Otherwise, returns pointcloud array outside of box. Args: + ----- pointcloud (np.ndarray): The Array of pointcloud, in shape (N, 3). bbox_scale (float): Scale factor for bounding box. Defaults to 1.0. inside (bool): Whether crop inside pointcloud or outside. Defaults to True. Returns: + -------- numpy.ndarray: Pointcloud array inside or outside box. """ corners: np.ndarray = self.get_corners(scale=bbox_scale) @@ -445,10 +460,12 @@ def get_inside_pointcloud_num( """Calculate the number of pointcloud inside of own bounding box. Args: + ----- pointcloud (np.ndarray): The Array of pointcloud, in shape (N, 3). bbox_scale (float): Scale factor for bounding box. Defaults to 1.0. Returns: + -------- int: Number of points inside of the own box. """ inside_pointcloud: np.ndarray = self.crop_pointcloud(pointcloud, bbox_scale) @@ -477,12 +494,14 @@ def _set_states( """Set object state from positions, orientations, sizes, and twists. Args: + ----- positions (Optional[List[Tuple[float, float, float]]]): Sequence of positions. Defaults to None. orientations (Optional[List[Quaternion]], optional): Sequence of orientations. Defaults to None. sizes (Optional[List[Tuple[float, float, float]]]): Sequence of boxes sizes. Defaults to None. twists (Optional[List[Tuple[float, float, float]]]): Sequence of velocities. Defaults to None. Returns: + -------- Optional[List[ObjectState]]: The list of ObjectState """ if positions is None or orientations is None: From 3a6033fc790140c7691cf648f6f343c4ec38926f Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 10 Jan 2024 23:32:22 +0900 Subject: [PATCH 3/3] docs: update documents for metrics Signed-off-by: ktro2828 --- .../perception_eval/metrics/__init__.py | 2 +- .../metrics/classification/__init__.py | 2 +- .../metrics/classification/accuracy.py | 26 +++--- .../classification_metrics_score.py | 5 +- .../metrics/config/__init__.py | 2 +- .../metrics/config/_metrics_config_base.py | 3 +- .../config/classification_metrics_config.py | 11 +-- .../config/detection_metrics_config.py | 10 +-- .../config/prediction_metrics_config.py | 11 +-- .../metrics/config/tracking_metrics_config.py | 11 +-- .../metrics/detection/__init__.py | 2 +- .../perception_eval/metrics/detection/ap.py | 83 ++++++++----------- .../perception_eval/metrics/detection/map.py | 9 +- .../metrics/detection/tp_metrics.py | 28 ++----- .../perception_eval/metrics/metrics.py | 34 ++++---- .../metrics/metrics_score_config.py | 19 ++--- .../metrics/tracking/__init__.py | 2 +- .../metrics/tracking/_metrics_base.py | 17 ++-- .../perception_eval/metrics/tracking/clear.py | 24 ++---- .../tracking/tracking_metrics_score.py | 9 +- 20 files changed, 115 insertions(+), 195 deletions(-) diff --git a/perception_eval/perception_eval/metrics/__init__.py b/perception_eval/perception_eval/metrics/__init__.py index 0310fa43..7b7fad70 100644 --- a/perception_eval/perception_eval/metrics/__init__.py +++ b/perception_eval/perception_eval/metrics/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/metrics/classification/__init__.py b/perception_eval/perception_eval/metrics/classification/__init__.py index a529cf2f..c3139590 100644 --- a/perception_eval/perception_eval/metrics/classification/__init__.py +++ b/perception_eval/perception_eval/metrics/classification/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/metrics/classification/accuracy.py b/perception_eval/perception_eval/metrics/classification/accuracy.py index e8846437..80905793 100644 --- a/perception_eval/perception_eval/metrics/classification/accuracy.py +++ b/perception_eval/perception_eval/metrics/classification/accuracy.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,22 +26,10 @@ class ClassificationAccuracy: - """[summary] - Class to calculate classification accuracy. - - Attributes: - target_labels (List[LabelType]): Target labels list. - num_ground_truth (int): Number of ground truths. - objects_results_num (int): Number of object results. - num_tp (int): Number of TP results. - num_fp (int): Number of FP results. - accuracy (float): Accuracy score. When `num_ground_truth+objects_results_num-num_tp=0`, this is float('inf'). - precision (float): Precision score. When `num_tp+num_fp=0`, this is float('inf'). - recall (float): Recall score. When `num_ground_truth=0`, this is float('inf'). - f1score (float): F1 score. When `precision+recall=0`, this is float('inf'). - results (Dict[str, float]): Dict that items are scores mapped by score names. + """Class to calculate classification accuracy. Args: + ----- object_results (List[PerceptionObjectResult]): Object results list. num_ground_truth (int): Number of ground truths. target_labels (List[LabelType]): Target labels list. @@ -83,9 +71,11 @@ def calculate_tp_fp(self, object_results: List[PerceptionObjectResult]) -> Tuple """Calculate accuracy score. Args: + ----- object_results (List[PerceptionObjectResult]): Object results list. Returns: + -------- num_tp (int): Number of TP results. num_fp (int): Number of FP results. """ @@ -104,9 +94,11 @@ def calculate_accuracy(self, num_tp: int) -> float: Accuracy = (TP + TN) / (TP + TN + FP + FN) Args: + ----- num_tp (int): Number of TP results. Returns: + -------- float: Accuracy score. When `objects_results_num+num_ground_truth-num_tp=0`, returns float('inf'). """ return ( @@ -122,9 +114,11 @@ def calculate_precision_recall(self, num_tp: int) -> Tuple[float, float]: Recall = TP / (TP + FN) Args: + ----- num_tp (int): Number of TP results. Returns: + -------- precision (float): Precision score. When `self.object_results_num=0`, returns float('inf'). recall (float): Recall score. When `self.num_ground_truth=0`, returns float('inf'). """ @@ -138,11 +132,13 @@ def calculate_f1score(self, precision: float, recall: float, beta: float = 1.0) F score = (1 + beta**2) * precision * recall / (beta**2 * precision + recall) Args: + ----- precision (float): Precision score. recall (float): Recall score. beta (float): Defaults 1.0. Returns: + -------- f1score (float): F1 score. When `precision+recall=0`, returns float('inf'). """ return ( diff --git a/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py b/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py index 99f4297e..7310b7ec 100644 --- a/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py +++ b/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py @@ -29,10 +29,8 @@ class ClassificationMetricsScore: """Metrics score class for classification evaluation. - Attributes: - self.accuracies (List[ClassificationAccuracy]): List of ClassificationAccuracy instances. - Args: + ----- object_results_dict (Dict[LabelType, List[List[PerceptionObjectResult]]]): Dict that are list of PerceptionObjectResult mapped by their labels. num_ground_truth_dict (Dict[LabelType, int]): Dict that are number of PerceptionObjectResult @@ -62,6 +60,7 @@ def _summarize(self) -> Tuple[float, float, float, float]: """Summarize all ClassificationAccuracy. Returns: + -------- accuracy (float): Accuracy score. When `num_est+num_gt-num_tp=0`, this is float('inf'). precision (float): Precision score. When `num_gt+num_fp=0`, this is float('inf'). recall (float): Recall score. When `num_gt=0`, this is float('inf'). diff --git a/perception_eval/perception_eval/metrics/config/__init__.py b/perception_eval/perception_eval/metrics/config/__init__.py index 9be33190..e0c0beaf 100644 --- a/perception_eval/perception_eval/metrics/config/__init__.py +++ b/perception_eval/perception_eval/metrics/config/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/metrics/config/_metrics_config_base.py b/perception_eval/perception_eval/metrics/config/_metrics_config_base.py index 7085b6b3..011bd509 100644 --- a/perception_eval/perception_eval/metrics/config/_metrics_config_base.py +++ b/perception_eval/perception_eval/metrics/config/_metrics_config_base.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,6 +35,7 @@ class _MetricsConfigBase(ABC): Then, threshold for car objects is 1.0. threshold for bike objects is 0.5. threshold for pedestrian object is 0.5. Args: + ----- target_labels (List[LabelType]): Target labels list. center_distance_thresholds (List[List[float]]): Thresholds List of center distance. Defaults to None. diff --git a/perception_eval/perception_eval/metrics/config/classification_metrics_config.py b/perception_eval/perception_eval/metrics/config/classification_metrics_config.py index 606adb9a..e9f399bd 100644 --- a/perception_eval/perception_eval/metrics/config/classification_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/classification_metrics_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,15 +29,8 @@ class ClassificationMetricsConfig(_MetricsConfigBase): """Configuration class for classification evaluation metrics. - Attributes: - evaluation_task (EvaluationTask.CLASSIFICATION) - target_labels (List[LabelType]): Target labels list. - center_distance_thresholds (List[float]): Thresholds list of center distance matching. - plane_distance_thresholds (List[float]): Threshold list of plane distance matching. - iou_2d_thresholds (List[float]): Thresholds list of 2d iou matching. - iou_3d_thresholds (List[float]): Thresholds list of 3d iou matching. - Args: + ----- target_labels (List[LabelType]): Target labels list. center_distance_thresholds (List[float]): Thresholds list of center distance matching. plane_distance_thresholds (List[float]): Threshold list of plane distance matching. diff --git a/perception_eval/perception_eval/metrics/config/detection_metrics_config.py b/perception_eval/perception_eval/metrics/config/detection_metrics_config.py index 50f123e9..71df912b 100644 --- a/perception_eval/perception_eval/metrics/config/detection_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/detection_metrics_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,14 +29,6 @@ class DetectionMetricsConfig(_MetricsConfigBase): """Configuration class for detection evaluation metrics. - Attributes: - evaluation_task (EvaluationTask.DETECTION) - target_labels (List[LabelType]): Target labels list. - center_distance_thresholds (List[float]): Thresholds list of center distance matching. - plane_distance_thresholds (List[float]): Threshold list of plane distance matching. - iou_2d_thresholds (List[float]): Thresholds list of 2d iou matching. - iou_3d_thresholds (List[float]): Thresholds list of 3d iou matching. - Args: target_labels (List[LabelType]): Target labels list. center_distance_thresholds (List[float]): Thresholds list of center distance matching. diff --git a/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py b/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py index 5af96f20..4702921f 100644 --- a/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,15 +28,8 @@ class PredictionMetricsConfig(_MetricsConfigBase): """Configuration class for prediction evaluation metrics. - Attributes: - evaluation_task (EvaluationTask.PREDICTION) - target_labels (List[LabelType]): Target labels list. - center_distance_thresholds (List[float]): Thresholds list of center distance matching. - plane_distance_thresholds (List[float]): Threshold list of plane distance matching. - iou_2d_thresholds (List[float]): Thresholds list of 2d iou matching. - iou_3d_thresholds (List[float]): Thresholds list of 3d iou matching. - Args: + ----- target_labels (List[LabelType]): Target labels list. center_distance_thresholds (List[float]): Thresholds list of center distance matching. plane_distance_thresholds (List[float]): Threshold list of plane distance matching. diff --git a/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py b/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py index e2e9bcea..5ced4ae2 100644 --- a/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,15 +28,8 @@ class TrackingMetricsConfig(_MetricsConfigBase): """Configuration class for tracking evaluation metrics. - Attributes: - evaluation_task (EvaluationTask.TRACKING) - target_labels (List[LabelType]): Target labels list. - center_distance_thresholds (List[float]): Thresholds list of center distance matching. - plane_distance_thresholds (List[float]): Threshold list of plane distance matching. - iou_2d_thresholds (List[float]): Thresholds list of 2d iou matching. - iou_3d_thresholds (List[float]): Thresholds list of 3d iou matching. - Args: + ----- target_labels (List[LabelType]): Target labels list. center_distance_thresholds (List[float]): Thresholds list of center distance matching. plane_distance_thresholds (List[float]): Threshold list of plane distance matching. diff --git a/perception_eval/perception_eval/metrics/detection/__init__.py b/perception_eval/perception_eval/metrics/detection/__init__.py index acaaa366..31acc2a8 100644 --- a/perception_eval/perception_eval/metrics/detection/__init__.py +++ b/perception_eval/perception_eval/metrics/detection/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/metrics/detection/ap.py b/perception_eval/perception_eval/metrics/detection/ap.py index 076d45b1..c6afb7cc 100644 --- a/perception_eval/perception_eval/metrics/detection/ap.py +++ b/perception_eval/perception_eval/metrics/detection/ap.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,21 +39,8 @@ class Ap: """AP class. - Attributes: - ap (float): AP (Average Precision) score. - matching_average (Optional[float]): Average of matching score. - If there are no object results, this variable is None. - matching_mode (MatchingMode): MatchingMode instance. - matching_threshold (List[float]): Thresholds list for matching. - matching_standard_deviation (Optional[float]): Standard deviation of matching score. - If there are no object results, this variable is None. - target_labels (List[LabelType]): Target labels list. - tp_metrics (TPMetrics): Mode of TP metrics. - ground_truth_objects_num (int): Number ground truths. - tp_list (List[float]): List of the number of TP objects ordered by their confidences. - fp_list (List[float]): List of the number of FP objects ordered by their confidences. - Args: + ----- tp_metrics (TPMetrics): Mode of TP (True positive) metrics. object_results (List[List[PerceptionObjectResult]]): Object results list. num_ground_truth (int): Number of ground truths. @@ -115,16 +102,13 @@ def __init__( matching_mode=self.matching_mode, ) - def save_precision_recall_graph( - self, - result_directory: str, - frame_name: str, - ) -> None: - """[summary] - Save visualization image of precision and recall curve. + def save_precision_recall_graph(self, result_directory: str, frame_name: str) -> None: + """Save visualization image of precision and recall curve. + The circle points represent original values and the square points represent interpolated ones. Args: + ----- result_directory (str): The directory path to save images. frame_name (str): The frame name. """ @@ -161,16 +145,15 @@ def save_precision_recall_graph( plt.ylabel("Precision") plt.savefig(file_path) - def get_precision_recall_list( - self, - ) -> Tuple[List[float], List[float]]: - """[summary] - Calculate precision recall. + def get_precision_recall_list(self) -> Tuple[List[float], List[float]]: + """Calculate precision recall. Returns: + -------- Tuple[List[float], List[float]]: tp_list and fp_list Example: + -------- state self.tp_list = [1, 1, 2, 3] self.fp_list = [0, 1, 1, 1] @@ -194,13 +177,15 @@ def interpolate_precision_recall_list( self, precision_list: List[float], recall_list: List[float], - ): - """[summary] - Interpolate precision and recall with maximum precision value per recall bins. + ) -> Tuple[List[float], List[float]]: + """Interpolates precision and recall with maximum precision value per recall bins. Args: - precision_list (List[float]) - recall_list (List[float]) + precision_list (List[float]): List of precisions. + recall_list (List[float]): List of recalls. + + Returns: + Tuple[List[float], List[float]]: List of max precisions and recalls. """ max_precision_list: List[float] = [precision_list[-1]] max_precision_recall_list: List[float] = [recall_list[-1]] @@ -221,20 +206,22 @@ def _calculate_tp_fp( tp_metrics: Union[TPMetricsAp, TPMetricsAph], object_results: List[PerceptionObjectResult], ) -> Tuple[List[float], List[float]]: - """ - Calculate TP (true positive) and FP (false positive). + """Calculates TP (true positive) and FP (false positive). Args: + ----- tp_metrics (TPMetrics): The mode of TP (True positive) metrics object_results (List[PerceptionObjectResult]): the list of objects with result Return: + ------- Tuple[tp_list, fp_list] tp_list (List[float]): the list of TP ordered by object confidence fp_list (List[float]): the list of FP ordered by object confidence Example: + -------- whether object label is correct [True, False, True, True] return tp_list = [1, 1, 2, 3] @@ -282,22 +269,20 @@ def _calculate_tp_fp( return tp_list, fp_list - def _calculate_ap( - self, - precision_list: List[float], - recall_list: List[float], - ) -> float: - """[summary] - Calculate AP (average precision) + def _calculate_ap(self, precision_list: List[float], recall_list: List[float]) -> float: + """Calculates AP (average precision). Args: + ----- precision_list (List[float]): The list of precision recall_list (List[float]): The list of recall Returns: + -------- float: AP Example: + -------- precision_list = [1.0, 0.5, 0.67, 0.75] recall_list = [0.25, 0.25, 0.5, 0.75] @@ -329,14 +314,15 @@ def _calculate_average_sd( object_results: List[PerceptionObjectResult], matching_mode: MatchingMode, ) -> Tuple[Optional[float], Optional[float]]: - """[summary] - Calculate average and standard deviation. + """Calculates average and standard deviation. Args: + ----- object_results (List[PerceptionObjectResult]): The object results matching_mode (MatchingMode): [description] Returns: + -------- Tuple[float, float]: [description] """ @@ -352,10 +338,13 @@ def _calculate_average_sd( @staticmethod def _get_flat_str(str_list: List[str]) -> str: - """ - Example: - a = _get_flat_str([aaa, bbb, ccc]) - print(a) # aaa_bbb_ccc + """Flattens list of strings. + + Args: + str_list (List[str]): List of strings. + + Returns: + str: [aa, bb, cc] is flattened to aa_bb_cc. """ output = "" for one_str in str_list: diff --git a/perception_eval/perception_eval/metrics/detection/map.py b/perception_eval/perception_eval/metrics/detection/map.py index bc29e90e..9a74b91a 100644 --- a/perception_eval/perception_eval/metrics/detection/map.py +++ b/perception_eval/perception_eval/metrics/detection/map.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,12 +31,8 @@ class Map: """mAP metrics score class. - Attributes: - map_config (MapConfig): The config for mAP calculation. - aps (List[Ap]): The list of AP (Average Precision) for each label. - map (float): mAP value. - Args: + ----- object_results (List[List[PerceptionObjectResult]]): The list of object results target_labels (List[LabelType]): Target labels to evaluate mAP matching_mode (MatchingMode): Matching mode like distance between the center of @@ -100,6 +96,7 @@ def __str__(self) -> str: """__str__ method Returns: + -------- str: Formatted string. """ diff --git a/perception_eval/perception_eval/metrics/detection/tp_metrics.py b/perception_eval/perception_eval/metrics/detection/tp_metrics.py index 3ae6af1c..e6427d3f 100644 --- a/perception_eval/perception_eval/metrics/detection/tp_metrics.py +++ b/perception_eval/perception_eval/metrics/detection/tp_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,11 +27,7 @@ class TPMetrics(metaclass=ABCMeta): - """Tp metrics meta class - - Attributes: - mode (str): TP metrics name. - """ + """Tp metrics meta class""" mode: str @@ -52,12 +48,7 @@ def get_value(self, object_result: PerceptionObjectResult) -> float: class TPMetricsAp(TPMetrics): - """ - Ap metrics class - - Attributes: - mode (str): TP metrics name that is TPMetricsAp. - """ + """Ap metrics class.""" mode: str = "TPMetricsAp" @@ -76,12 +67,7 @@ def get_value(self, object_result: PerceptionObjectResult) -> float: class TPMetricsAph(TPMetrics): - """ - Aph metrics class - - Attributes: - mode (str): TP metrics name that is TPMetricsAph. - """ + """Aph metrics class.""" mode: str = "TPMetricsAph" @@ -119,11 +105,7 @@ def get_value(self, object_result: PerceptionObjectResult) -> float: class TPMetricsConfidence(TPMetrics): - """Confidence TP class. - - Attributes: - mode (str): TP metrics name that is TPMetricsConfidence. - """ + """Confidence TP class.""" mode: str = "TPMetricsConfidence" diff --git a/perception_eval/perception_eval/metrics/metrics.py b/perception_eval/perception_eval/metrics/metrics.py index de787f20..560b0a2e 100644 --- a/perception_eval/perception_eval/metrics/metrics.py +++ b/perception_eval/perception_eval/metrics/metrics.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,19 +33,8 @@ class MetricsScore: """Metrics score class. - Attributes: - detection_config (Optional[DetectionMetricsConfig]): Config for detection evaluation. - tracking_config (Optional[DetectionMetricsConfig]): Config for tracking evaluation. - prediction_config (Optional[PredictionMetricsConfig]): Config for prediction evaluation. - classification_config (Optional[ClassificationMetricsConfig]): Config for classification evaluation. - evaluation_task (EvaluationTask): EvaluationTask instance. - maps (List[Map]): List of mAP instances. Each mAP is different from threshold - for matching (ex. IoU 0.3). - tracking_scores (List[TrackingMetricsScore]): List of TrackingMetricsScore instances. - prediction_scores (List[TODO]): TBD - classification_score (List[ClassificationMetricsScore]): List of ClassificationMetricsScore instances. - Args: + ----- config (MetricsScoreConfig): MetricsScoreConfig instance. used_frame: List[int]: List of frame numbers loaded to evaluate. """ @@ -127,10 +116,10 @@ def evaluate_detection( object_results: Dict[LabelType, List[PerceptionObjectResult]], num_ground_truth: Dict[LabelType, int], ) -> None: - """[summary] - Calculate detection metrics + """Calculate detection metrics Args: + ----- object_results (Dict[LabelType, List[PerceptionObjectResult]]): The dict of object result """ if self.tracking_config is None: @@ -183,8 +172,7 @@ def evaluate_tracking( object_results: Dict[LabelType, List[List[PerceptionObjectResult]]], num_ground_truth: Dict[LabelType, int], ) -> None: - """[summary] - Calculate tracking metrics. + """Calculate tracking metrics. NOTE: object_results and ground_truth_objects must be nested list. @@ -192,6 +180,7 @@ def evaluate_tracking( In case of evaluating multi frame, [[], [t1], [t2], ..., [tn]] Args: + ----- object_results (List[List[PerceptionObjectResult]]): The list of object result for each frame. """ self.__num_gt += sum(num_ground_truth.values()) @@ -240,10 +229,10 @@ def evaluate_prediction( object_results: Dict[LabelType, List[PerceptionObjectResult]], num_ground_truth: Dict[LabelType, int], ) -> None: - """[summary] - Calculate prediction metrics + """Calculate prediction metrics. Args: + ----- object_results (List[PerceptionObjectResult]): The list of object result """ pass @@ -253,6 +242,13 @@ def evaluate_classification( object_results: Dict[LabelType, List[List[PerceptionObjectResult]]], num_ground_truth: Dict[LabelType, int], ) -> None: + """Calculates classification metrics score. + + Args: + ----- + object_results (Dict[LabelType, List[List[PerceptionObjectResult]]]): Object results. + num_ground_truth (Dict[LabelType, int]): The number of ground truths. + """ self.__num_gt += sum(num_ground_truth.values()) classification_score_ = ClassificationMetricsScore( object_results_dict=object_results, diff --git a/perception_eval/perception_eval/metrics/metrics_score_config.py b/perception_eval/perception_eval/metrics/metrics_score_config.py index 6260cbbc..39e0346b 100644 --- a/perception_eval/perception_eval/metrics/metrics_score_config.py +++ b/perception_eval/perception_eval/metrics/metrics_score_config.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,16 +36,9 @@ class MetricsScoreConfig: """A configuration class for each evaluation task metrics. - Attributes: - detection_config (Optional[DetectionMetricsConfig]): Config for detection evaluation. - tracking_config (Optional[DetectionMetricsConfig]): Config for tracking evaluation. - prediction_config (Optional[PredictionMetricsConfig]): Config for prediction evaluation. - classification_config (Optional[ClassificationMetricsConfig]): Config for classification evaluation. - evaluation_task (EvaluationTask): EvaluationTask instance. - Args: - evaluation_task (EvaluationTask): EvaluationTask instance. - **cfg: Configuration data. + ----- + params (PerceptionMetricsParam): Parameters for metrics score. """ def __init__(self, params: PerceptionMetricsParam) -> None: @@ -81,10 +74,16 @@ def _extract_params(config: _MetricsConfigBase, params: PerceptionMetricsParam) """Check if input parameters are valid. Args: + ----- config (_MetricsConfigBase): Metrics score instance. params (PerceptionMetricsParam): Parameters for metrics. + Returns: + -------- + Dict[str, Any]: Extracted params. + Raises: + ------- KeyError: When got invalid parameter names. """ input_params_dict = params.as_dict() diff --git a/perception_eval/perception_eval/metrics/tracking/__init__.py b/perception_eval/perception_eval/metrics/tracking/__init__.py index 9fcce01e..8c2bdae9 100644 --- a/perception_eval/perception_eval/metrics/tracking/__init__.py +++ b/perception_eval/perception_eval/metrics/tracking/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/perception_eval/perception_eval/metrics/tracking/_metrics_base.py b/perception_eval/perception_eval/metrics/tracking/_metrics_base.py index 7f984a78..a38a71f8 100644 --- a/perception_eval/perception_eval/metrics/tracking/_metrics_base.py +++ b/perception_eval/perception_eval/metrics/tracking/_metrics_base.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,15 +34,8 @@ class _TrackingMetricsBase(ABC): """Abstract base class for tracking metrics. - Attributes: - num_ground_truth (int): Number of ground truths. - target_labels (List[LabelType]): Target labels list. - matching_mode (MatchingMode): MatchingMode instance. - matching_threshold_list (List[float]): Thresholds list for matching. - metrics_field (Optional[List[str]]): Filed name of target metrics. If it is not specified, default supported metrics are used. - support_metrics (List[str]): List of supported metrics names. - Args: + ----- num_ground_truth (int): Number of ground truths. target_labels (List[LabelType]): Target labels list. matching_mode (MatchingMode): MatchingMode instance. @@ -95,12 +88,15 @@ def _check_metrics(self, metrics_field: Optional[List[str]]) -> List[str]: If `metrics_field=None`, returns default supported metrics. Args: + ----- metrics_field (Optional[List[str]]): The list of executing metrics field. Returns: + -------- metrics_field (List[str]) Raises: + ------- ValueError: If input metrics field is unsupported. """ if metrics_field is None: @@ -143,13 +139,16 @@ def get_precision_recall_list( """Calculate precision recall. Args: + ----- tp_list (List[float]): TP results list. ground_truth_objects_num (int): Number of ground truths. Returns: + -------- Tuple[List[float], List[float]]: tp_list and fp_list Examples: + --------- >>> tp_list = [1, 1, 2, 3] >>> ground_truth_num = 4 >>> precision_list, recall_list = self.get_precision_recall(tp_list, ground_truth_num) diff --git a/perception_eval/perception_eval/metrics/tracking/clear.py b/perception_eval/perception_eval/metrics/tracking/clear.py index 966def37..ed823675 100644 --- a/perception_eval/perception_eval/metrics/tracking/clear.py +++ b/perception_eval/perception_eval/metrics/tracking/clear.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,21 +43,8 @@ class CLEAR(_TrackingMetricsBase): NOTE: MT, ML, PT is under construction. - Attributes: - target_labels (List[LabelType]): The list of target label. - matching_mode (MatchingMode): The target matching mode. - metrics_field (Optional[List[str]]): The list of target metrics name. If not specified, set default supported metrics. - ground_truth_objects_num (int): The number of ground truth. - support_metrics (List[str]): The list of supported metrics name. (["MOTA", "MOTP"]) - tp (float): The total value/number of TP. - fp (float): The total value/number of FP. - id_switch (int): The total number of ID switch. - tp_matching_score (float): The total value of matching score in TP. - mota (float): MOTA score. - motp (float): MOTP score. - results (OrderedDict[str, Any]): The dict to keep scores. - Args: + ----- object_results (List[List[PerceptionObjectResult]]): The list of object results for each frames. num_ground_truth (int): The number of ground truth. target_labels (List[LabelType]): The list of target labels. @@ -138,6 +125,7 @@ def _calculate_score(self): if the number of GT is 0, MOTA returns inf and if the TP score is 0, MOTP returns inf. Returns: + -------- mota (float): MOTA score. motp (float): MOTP score. """ @@ -162,10 +150,12 @@ def _calculate_tp_fp( """Calculate matching compared with previous object results. Args: + ----- cur_object_results (List[List[PerceptionObjectResult]]): Object results list at current frame. prev_object_results (List[List[PerceptionObjectResult]]): Object results list at previous frame. Returns: + -------- tp (float): Total value of TP. If matching is True, num_tp += 1.0. fp (float): Total value of FP. If matching is False, num_fp += 1.0. num_id_switch (int): Total number of ID switch. If matching is switched compared with previous result, num_id_switch += 1. @@ -241,10 +231,12 @@ def _is_id_switched(cur_object_result: PerceptionObjectResult, prev_object_resul GT ID is unique between the different labels. Args: + ----- cur_object_result (PerceptionObjectResult): Object result at current frame. prev_object_result (PerceptionObjectResult): Object result at previous frame. Returns: + -------- bool: Return True if ID is switched. """ # current GT = None -> FP @@ -282,10 +274,12 @@ def _is_same_match( When previous or current GT is None(=FP), return False regardless the ID of estimated. Args: + ----- cur_object_result (PerceptionObjectResult): Object result at current frame. prev_object_result (PerceptionObjectResult):Object result at previous frame. Returns: + -------- bool: Return True if both estimated and GT ID are same. """ if cur_object_result.ground_truth_object is None or prev_object_result.ground_truth_object is None: diff --git a/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py b/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py index e09829f6..39689a89 100644 --- a/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py +++ b/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py @@ -1,4 +1,4 @@ -# Copyright 2022 TIER IV, Inc. +# Copyright 2022-2024 TIER IV, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,12 +33,8 @@ class TrackingMetricsScore: Length of input `target_labels` and `matching_threshold_list` must be same. - Attributes: - target_labels: (List[LabelType]): Target labels list. - matching_mode (MatchingMode): MatchingMode instance. - clears (List[CLEAR]): List of CLEAR instances. - Args: + ----- object_results_dict (Dict[LabelType, List[List[PerceptionObjectResult]]): Dict that items are object results list mapped by their labels. num_ground_truth (int): Number of ground truths. @@ -78,6 +74,7 @@ def _sum_clear(self) -> Tuple[float, float, int]: """Summing up multi CLEAR result. Returns: + -------- mota (float): MOTA score. motp (float): MOTP score. num_id_switch (int): The number of ID switched.