diff --git a/perception_eval/perception_eval/common/object.py b/perception_eval/perception_eval/common/object.py index 67d269b9..c10f648c 100644 --- a/perception_eval/perception_eval/common/object.py +++ b/perception_eval/perception_eval/common/object.py @@ -405,6 +405,55 @@ def get_velocity_error( return err_vx, err_vy, err_vz + def get_path_error( + self, + other: Optional[DynamicObject], + top_k: Optional[int] = None, + num_path_frames: Optional[int] = None, + ) -> Optional[np.ndarray]: + """[summary] + Returns errors of path as numpy.ndarray. + + Args: + other (Optional[DynamicObject]): DynamicObject instance. + top_k (Optional[int]): Select the top kth confidential paths. If None, calculate for all paths. + It must be integer bigger than 1. Defaults to None. + + Returns: + numpy.ndarray: in shape (K, T) + """ + if other is None: + return None + + self_paths: List[ObjectPath] = self.predicted_paths.copy() + other_paths: List[ObjectPath] = other.predicted_paths.copy() + + if top_k: + if top_k <= 0: + raise ValueError(f"top_k must be > 0, but got {top_k}") + self_paths.sort(key=lambda x: x.confidence, reverse=True) + other_paths.sort(key=lambda x: x.confidence, reverse=True) + self_paths = self_paths[:top_k] + other_paths = other_paths[:top_k] + + displacements: List[List[List[float]]] = [] + for self_path in self_paths: + for other_path in other_paths: + errors: List[List[float]] = [] + num_path_frames_ = ( + num_path_frames if num_path_frames else min(len(self_path), len(other_path)) + ) + for self_state, other_state in zip( + self_path[:num_path_frames_], + other_path[:num_path_frames_], + ): + err_x = other_state.position[0] - self_state.position[0] + err_y = other_state.position[1] - self_state.position[1] + err_z = other_state.position[2] - self_state.position[2] + errors.append([err_x, err_y, err_z]) + displacements.append(errors) + return np.array(displacements) + def get_area_bev(self) -> float: """[summary] Get area of object BEV. diff --git a/perception_eval/perception_eval/evaluation/metrics/metrics.py b/perception_eval/perception_eval/evaluation/metrics/metrics.py index 183c6672..411ec889 100644 --- a/perception_eval/perception_eval/evaluation/metrics/metrics.py +++ b/perception_eval/perception_eval/evaluation/metrics/metrics.py @@ -19,6 +19,9 @@ from perception_eval.evaluation.matching.object_matching import MatchingMode from perception_eval.evaluation.metrics.detection.map import Map from perception_eval.evaluation.metrics.metrics_score_config import MetricsScoreConfig +from perception_eval.evaluation.metrics.prediction.prediction_metrics_score import ( + PredictionMetricsScore, +) from perception_eval.evaluation.metrics.tracking.tracking_metrics_score import TrackingMetricsScore from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult @@ -54,7 +57,7 @@ def __init__( self.maps: List[Map] = [] # tracking metrics scores for each matching method self.tracking_scores: List[TrackingMetricsScore] = [] - # TODO: prediction metrics scores for each matching method + # prediction metrics scores for each matching method self.prediction_scores: List = [] self.__num_frame: int = len(used_frame) @@ -86,7 +89,9 @@ def __str__(self) -> str: for track_score in self.tracking_scores: str_ += str(track_score) - # TODO: prediction + # prediction + for pred_score in self.prediction_scores: + str_ += str(pred_score) return str_ @@ -223,4 +228,41 @@ def evaluate_prediction( Args: object_results (List[DynamicObjectWithPerceptionResult]): The list of object result """ - pass + self.__num_gt += sum(num_ground_truth.values()) + + for distance_threshold_ in self.prediction_config.center_distance_thresholds: + prediction_score_ = PredictionMetricsScore( + object_results_dict=object_results, + num_ground_truth_dict=num_ground_truth, + target_labels=self.prediction_config.target_labels, + matching_mode=MatchingMode.CENTERDISTANCE, + matching_threshold_list=distance_threshold_, + ) + self.prediction_scores.append(prediction_score_) + for iou_threshold_bev_ in self.prediction_config.iou_bev_thresholds: + prediction_score_ = PredictionMetricsScore( + object_results_dict=object_results, + num_ground_truth_dict=num_ground_truth, + target_labels=self.prediction_config.target_labels, + matching_mode=MatchingMode.IOUBEV, + matching_threshold_list=iou_threshold_bev_, + ) + self.prediction_scores.append(prediction_score_) + for iou_threshold_3d_ in self.prediction_config.iou_3d_thresholds: + prediction_score_ = PredictionMetricsScore( + object_results_dict=object_results, + num_ground_truth_dict=num_ground_truth, + target_labels=self.prediction_config.target_labels, + matching_mode=MatchingMode.IOU3D, + matching_threshold_list=iou_threshold_3d_, + ) + self.prediction_scores.append(prediction_score_) + for plane_distance_threshold_ in self.prediction_config.plane_distance_thresholds: + prediction_score_ = PredictionMetricsScore( + object_results_dict=object_results, + num_ground_truth_dict=num_ground_truth, + target_labels=self.prediction_config.target_labels, + matching_mode=MatchingMode.PLANEDISTANCE, + matching_threshold_list=plane_distance_threshold_, + ) + self.prediction_scores.append(prediction_score_) diff --git a/perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py b/perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py index 9be33190..89eb1d13 100644 --- a/perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py +++ b/perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py @@ -11,3 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from .path_displacement_error import PathDisplacementError +from .soft_ap import SoftAp + +__all__ = ["PathDisplacementError", "SoftAp"] diff --git a/perception_eval/perception_eval/evaluation/metrics/prediction/path_displacement_error.py b/perception_eval/perception_eval/evaluation/metrics/prediction/path_displacement_error.py new file mode 100644 index 00000000..39030410 --- /dev/null +++ b/perception_eval/perception_eval/evaluation/metrics/prediction/path_displacement_error.py @@ -0,0 +1,154 @@ +# Copyright 2022 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List +from typing import Optional + +import numpy as np +from perception_eval.common.label import AutowareLabel +from perception_eval.common.threshold import get_label_threshold +from perception_eval.evaluation.matching.object_matching import MatchingMode +from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult + + +class PathDisplacementError: + """[summary] + A class to calculate path displacement errors for motion prediction task. + + Support metrics: + ADE; Average Displacement Error + FDE; Final Displacement Error + Miss Rate + + Attributes: + self.ade (float) + self.fde (float) + self.miss_rate (float) + self.num_ground_truth (int) + self.target_labels (List[AutowareLabel]) + self.matching_mode (MatchingMode) + self.matching_threshold_list (List[float]) + self.num_path_frames (int) + self.top_k (Optional[int]) + self.miss_tolerance (float) + """ + + def __init__( + self, + object_results: List[DynamicObjectWithPerceptionResult], + num_ground_truth: int, + target_labels: List[AutowareLabel], + matching_mode: MatchingMode, + matching_threshold_list: List[float], + num_path_frames: int = 1, + top_k: Optional[int] = 1, + miss_tolerance: float = 2.0, + ) -> None: + """[summary] + + Args: + object_results (List[DynamicObjectWithPerceptionResult]): + num_ground_truth (int): + target_labels (List[AutowareLabel]): + matching_mode (MatchingMode): + matching_threshold_list (List[float]) + num_path_frames (int): Number of horizontal frames. Defaults to 10[frames]. + top_k (Optional[int]): Number of top kth confidential paths. If None, calculate all paths. Defaults to None. + miss_tolerance (float): Tolerance value to determine miss. Defaults to 2.0[m]. + """ + + self.num_ground_truth: int = num_ground_truth + + self.target_labels: List[AutowareLabel] = target_labels + self.matching_mode: MatchingMode = matching_mode + self.matching_threshold_list: List[float] = matching_threshold_list + + all_object_results: List[DynamicObjectWithPerceptionResult] = [] + if len(object_results) == 0 or not isinstance(object_results[0], list): + all_object_results = object_results + else: + for obj_results in object_results: + all_object_results += obj_results + self.objects_results_num: int = len(all_object_results) + self.num_path_frames: int = num_path_frames + self.top_k: Optional[int] = top_k + self.miss_tolerance: float = miss_tolerance + + displacements: np.ndarray = self.__get_displacements(all_object_results) + if len(displacements) > 0: + self.ade: float = np.linalg.norm(displacements[:, :, :, :2], axis=-1).mean() + self.fde: float = displacements[:, :, -1, :2].mean() + self.miss_rate: float = self.__get_miss_rate(displacements) + self.isnan: bool = False + else: + self.ade = np.nan + self.fde = np.nan + self.miss_rate = np.nan + self.isnan: bool = True + + def __get_displacements( + self, + object_results: List[DynamicObjectWithPerceptionResult], + ) -> np.ndarray: + """[summary] + Returns the displacement errors. + + Args: + object_results (List[DynamicObjectWithPerceptionResult]) + + Returns: + numpy.ndarray: Tensor of errors, in shape (N, K, T, 3). + N is number of objects, K is top k confidential paths, T is length of trajectory. + """ + displacements = [] + for obj_result in object_results: + matching_threshold: float = get_label_threshold( + semantic_label=obj_result.estimated_object.semantic_label, + target_labels=self.target_labels, + threshold_list=self.matching_threshold_list, + ) + is_result_correct: bool = obj_result.is_result_correct( + matching_mode=self.matching_mode, + matching_threshold=matching_threshold, + ) + if not is_result_correct: + continue + estimation = obj_result.estimated_object + ground_truth = obj_result.ground_truth_object + # (K, T, 3) + err: np.ndarray = estimation.get_path_error( + ground_truth, self.top_k, self.num_path_frames + ) + displacements.append(err.tolist()) + + # (N, K, T, 3) + return np.array(displacements) + + def __get_miss_rate(self, displacements: np.ndarray) -> float: + """[summary] + Returns miss rate. + + Args: + displacements (numpy.ndarray): in shape (N, K, T, 3). + + Returns: + float: Miss rate. + """ + # (N, K) + max_displacements: np.ndarray = np.linalg.norm( + displacements[:, :, :, :2], + axis=-1, + ).max(axis=-1) + is_miss: np.ndarray = max_displacements[max_displacements >= self.miss_tolerance] + return is_miss.size / max_displacements.size diff --git a/perception_eval/perception_eval/evaluation/metrics/prediction/prediction_metrics_score.py b/perception_eval/perception_eval/evaluation/metrics/prediction/prediction_metrics_score.py new file mode 100644 index 00000000..908b6895 --- /dev/null +++ b/perception_eval/perception_eval/evaluation/metrics/prediction/prediction_metrics_score.py @@ -0,0 +1,123 @@ +# Copyright 2022 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict +from typing import List + +import numpy as np +from perception_eval.common.label import AutowareLabel +from perception_eval.evaluation.matching.object_matching import MatchingMode +from perception_eval.evaluation.metrics.prediction import PathDisplacementError + +# from perception_eval.evaluation.metrics.prediction import SoftAp +from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult + + +class PredictionMetricsScore: + def __init__( + self, + object_results_dict: Dict[AutowareLabel, List[List[DynamicObjectWithPerceptionResult]]], + num_ground_truth_dict: Dict[AutowareLabel, int], + target_labels: List[AutowareLabel], + matching_mode: MatchingMode, + matching_threshold_list: List[float], + ) -> None: + """[summary] + Args: + object_results_dict (Dict[AutowareLabel, List[List[DynamicObjectWithPerceptionResult]]): + object results divided by label for multi frame. + num_ground_truth (int): The number of ground truth. + target_labels (List[AutowareLabel]): e.g. ["car", "pedestrian", "bus"] + matching_mode (MatchingMode): The target matching mode. + matching_threshold_list (List[float]): The list of matching threshold for each category. (e.g. [0.5, 0.3, 0.5]) + """ + assert len(target_labels) == len(matching_threshold_list) + self.target_labels: List[AutowareLabel] = target_labels + self.matching_mode: MatchingMode = matching_mode + + self.displacement_errors: List[PathDisplacementError] = [] + for target_label, matching_threshold in zip(target_labels, matching_threshold_list): + object_results = object_results_dict[target_label] + num_ground_truth = num_ground_truth_dict[target_label] + displacement_err = PathDisplacementError( + object_results=object_results, + num_ground_truth=num_ground_truth, + target_labels=[target_label], + matching_mode=self.matching_mode, + matching_threshold_list=[matching_threshold], + ) + self.displacement_errors.append(displacement_err) + self.__summarize_score() + + def __summarize_score(self) -> None: + """[summary] + Summarize score + """ + sum_ade: float = 0.0 + sum_fde: float = 0.0 + sum_miss_rate: float = 0.0 + num_labels: int = len(self.target_labels) + for err in self.displacement_errors: + if err.isnan: + num_labels -= 1 + continue + sum_ade += err.ade + sum_fde += err.fde + sum_miss_rate += err.miss_rate + + if num_labels == 0: + self.ade: float = np.nan + self.fde: float = np.nan + self.miss_rate: float = np.nan + else: + self.ade: float = sum_ade / num_labels + self.fde: float = sum_fde / num_labels + self.miss_rate: float = sum_miss_rate / num_labels + + def __str__(self) -> str: + """__str__ method""" + + str_: str = "\n" + str_ += f"ADE: {self.ade:.3f}, FDE: {self.fde:.3f}, Miss Rate: {self.miss_rate:.3f}" + str_ += f"({self.matching_mode.value})\n" + # Table + str_ += "\n" + # label + str_ += "| Label |" + target_str: str + for err in self.displacement_errors: + str_ += f" {err.target_labels[0].value}({err.matching_threshold_list[0]}) | " + str_ += "\n" + str_ += "| :--------: |" + for err in self.displacement_errors: + str_ += " :---: |" + str_ += "\n" + str_ += "| Predict_num |" + for err in self.displacement_errors: + str_ += f" {err.objects_results_num}" + str_ += "\n" + str_ += "| ADE |" + for err in self.displacement_errors: + str_ += f" {err.ade:.3f} | " + str_ += "\n" + str_ += "| FDE |" + for err in self.displacement_errors: + str_ += f" {err.fde:.3f} | " + str_ += "\n" + str_ += "| Miss Rate |" + for err in self.displacement_errors: + str_ += f" {err.miss_rate:.3f} | " + str_ += "\n" + + return str_ diff --git a/perception_eval/perception_eval/evaluation/metrics/prediction/soft_ap.py b/perception_eval/perception_eval/evaluation/metrics/prediction/soft_ap.py new file mode 100644 index 00000000..c6bf6843 --- /dev/null +++ b/perception_eval/perception_eval/evaluation/metrics/prediction/soft_ap.py @@ -0,0 +1,18 @@ +# Copyright 2022 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class SoftAp: + def __init__(self) -> None: + pass diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py b/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py index f348016a..d6ee4687 100644 --- a/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py +++ b/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ctypes import Union from typing import Dict from typing import List from typing import Tuple +from typing import Union from perception_eval.common.label import AutowareLabel from perception_eval.evaluation.matching.object_matching import MatchingMode diff --git a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py b/perception_eval/perception_eval/evaluation/result/perception_frame_result.py index 3dc30ddf..a2423324 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py +++ b/perception_eval/perception_eval/evaluation/result/perception_frame_result.py @@ -123,7 +123,7 @@ def evaluate_frame( tracking_results[label] = [prev_results, tracking_results[label]] self.metrics_score.evaluate_tracking(tracking_results, num_ground_truth_dict) if self.metrics_score.prediction_config is not None: - pass + self.metrics_score.evaluate_prediction(object_results_dict, num_ground_truth_dict) self.pass_fail_result.evaluate( object_results=self.object_results, diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 89fd872e..4da7b16e 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -189,7 +189,7 @@ def get_scene_result(self) -> MetricsScore: if self.evaluator_config.metrics_config.tracking_config is not None: scene_metrics_score.evaluate_tracking(all_frame_results, all_num_gt) if self.evaluator_config.metrics_config.prediction_config is not None: - pass + scene_metrics_score.evaluate_prediction(all_frame_results, all_num_gt) return scene_metrics_score