From c5803bee0958ce0011046c935ebc9c5ba995ea58 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Fri, 29 Dec 2023 13:10:35 +0900 Subject: [PATCH] feat: rename `thresholds` into `success_thresholds` Signed-off-by: ktro2828 --- .../perception/perception_frame_config.py | 20 ++++++++++++++----- .../perception/perception_pass_fail_result.py | 4 ++-- .../test/perception_fp_validation_lsim.py | 2 +- perception_eval/test/perception_lsim.py | 2 +- perception_eval/test/perception_lsim2d.py | 4 ++-- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/perception_eval/perception_eval/result/perception/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py index ad229281..2e310d80 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING from typing import Union +from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import check_thresholds from perception_eval.config.params import PerceptionFilterParam @@ -62,7 +63,7 @@ def __init__( confidence_threshold_list: Optional[List[float]] = None, target_uuids: Optional[List[str]] = None, ignore_attributes: Optional[List[str]] = None, - thresholds: Optional[List[float]] = None, + success_thresholds: Optional[List[float]] = None, ) -> None: """ Args: @@ -102,13 +103,22 @@ def __init__( ) num_elements: int = len(self.target_labels) - if thresholds is None: - self.thresholds = None + if success_thresholds is None: + self.success_thresholds = None else: - self.thresholds = check_thresholds(thresholds, num_elements) + self.success_thresholds = check_thresholds(success_thresholds, num_elements) @classmethod def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameConfig: + if eval_cfg.evaluation_task.is_3d(): + success_thresholds = eval_cfg.metrics_param.plane_distance_thresholds + else: + success_thresholds = ( + None + if eval_cfg.evaluation_task == EvaluationTask.CLASSIFICATION2D + else eval_cfg.metrics_param.iou_2d_thresholds + ) + return cls( evaluator_config=eval_cfg, target_labels=eval_cfg.target_labels, @@ -120,5 +130,5 @@ def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameC confidence_threshold_list=eval_cfg.filter_param.confidence_threshold_list, target_uuids=eval_cfg.filter_param.target_uuids, ignore_attributes=eval_cfg.filter_param.ignore_attributes, - thresholds=eval_cfg.metrics_param.plane_distance_thresholds, + success_thresholds=success_thresholds, ) diff --git a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index e05bd828..4a0201fd 100644 --- a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -96,7 +96,7 @@ def evaluate( object_results, self.frame_config.target_labels, MatchingMode.IOU2D if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - self.frame_config.thresholds, + self.frame_config.success_thresholds, ) self.critical_ground_truth_objects = critical_ground_truth_objects @@ -151,7 +151,7 @@ def __get_positive_object_results( matching_mode=MatchingMode.IOU2D if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - matching_threshold_list=self.frame_config.thresholds, + matching_threshold_list=self.frame_config.success_thresholds, ) # filter by critical_ground_truth_objects diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 6bddbd46..1e0a7bd8 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -75,7 +75,7 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: target_labels=["car", "bicycle", "pedestrian", "motorbike"], max_x_position_list=[100.0, 100.0, 100.0, 100.0], max_y_position_list=[100.0, 100.0, 100.0, 100.0], - thresholds=[2.0, 2.0, 2.0, 2.0], + success_thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index 63dcd874..2a987567 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -116,7 +116,7 @@ def callback( ignore_attributes=["cycle_state.without_rider"], max_x_position_list=[30.0, 30.0, 30.0, 30.0], max_y_position_list=[30.0, 30.0, 30.0, 30.0], - thresholds=[2.0, 2.0, 2.0, 2.0], + success_thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 9398eb2c..48b4b5a8 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -115,14 +115,14 @@ def callback( else ["car", "bicycle", "pedestrian", "motorbike"] ) ignore_attributes = ["cycle_state.without_rider"] if self.label_prefix == "autoware" else None - thresholds = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] + success_thresholds = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam frame_config = PerceptionFrameConfig( evaluator_config=self.evaluator.config, target_labels=target_labels, ignore_attributes=ignore_attributes, - thresholds=thresholds, + success_thresholds=success_thresholds, ) frame_result = self.evaluator.add_frame_result(