From dec21ea17f6f8619df5bb9c6dc3c18bd2eeac2a2 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Tue, 15 Nov 2022 14:05:26 +0900 Subject: [PATCH] feat(PerceptionVisualizer): add support of prediction visualization Signed-off-by: ktro2828 --- .../perception_visualization_config.py | 8 ++--- .../visualization/perception_visualizer.py | 31 ++++++++++++++++--- perception_eval/test/perception_lsim.py | 11 ++----- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/perception_eval/perception_eval/visualization/perception_visualization_config.py b/perception_eval/perception_eval/visualization/perception_visualization_config.py index 52cf420b..c95cd3f8 100644 --- a/perception_eval/perception_eval/visualization/perception_visualization_config.py +++ b/perception_eval/perception_eval/visualization/perception_visualization_config.py @@ -87,12 +87,12 @@ def __init__( self.confidence_threshold_list: Optional[List[float]] = confidence_threshold_list self.target_uuids: Optional[List[str]] = target_uuids - if max_x_position_list is None: - self.xlim: float = max(max_distance_list) - self.ylim: float = max(max_distance_list) - elif max_distance_list is None: + if max_x_position_list is not None: self.xlim: float = max(max_x_position_list) self.ylim: float = max(max_y_position_list) + elif max_distance_list is not None: + self.xlim: float = max(max_distance_list) + self.ylim: float = max(max_distance_list) else: self.xlim: float = 100.0 self.ylim: float = 100.0 diff --git a/perception_eval/perception_eval/visualization/perception_visualizer.py b/perception_eval/perception_eval/visualization/perception_visualizer.py index 0a9cee28..fc49007b 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer.py @@ -446,13 +446,20 @@ def plot_objects( # tracked path if self.config.evaluation_task == EvaluationTask.TRACKING: axes, tracking_artists = self._plot_tracked_path( - object_, is_ground_truth, axes=axes + object_, + is_ground_truth, + axes=axes, ) artists += tracking_artists # predicted path if self.config.evaluation_task == EvaluationTask.PREDICTION: - pass + axes, prediction_artists = self._plot_predicted_path( + object_, + is_ground_truth, + axes=axes, + ) + artists += prediction_artists box_center_x.append(box_center[0]) box_center_y.append(box_center[1]) @@ -501,7 +508,7 @@ def _plot_tracked_path( def _plot_predicted_path( self, - dynamic_objects: List[DynamicObject], + dynamic_object: DynamicObject, is_ground_truth: bool, axes: Optional[Axes] = None, ) -> Axes: @@ -509,14 +516,28 @@ def _plot_predicted_path( Plot predicted paths for one object. Args: - dynamic_objects (List[DynamicObject]): The list of object being visualized. + dynamic_object (DynamicObject): The list of object being visualized. is_ground_truth (bool): Whether ground truth object is. axes (Axes): The Axes instance. Returns: axes (Axes): The Axes instance. + artists (List[plt.Artist]): The list of Artist instance. """ - pass + if axes is None: + axes: Axes = plt.subplot() + + artists: List[plt.Artist] = [] + + for i, paths in enumerate(dynamic_object.predicted_paths): + path_arr: np.ndarray = np.array([p.position for p in paths]) + color_: np.ndarray = ( + self.__cmap.get_simple("red") if is_ground_truth else self.__cmap[i] / 255.0 + ) + plot_ = axes.plot(path_arr[:, 0], path_arr[:, 1], "o--", color=color_, markersize=1) + artists.append(plot_) + + return axes, artists def _save_animation(self, file_name: Optional[str] = None): """[summary] diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index 9d260df2..6a87a703 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -361,14 +361,9 @@ def visualize(self, frame_result: PerceptionFrameResult): f"{format_class_for_log(prediction_final_metric_score.prediction_scores[0], 100)}" ) - # # Visualize all frame results - # logging.info("Start visualizing prediction results") - # prediction_lsim.evaluator.visualize_all() - - # # Prediction performance report - # prediction_analyzer = PerceptionPerformanceAnalyzer(prediction_lsim.evaluator.evaluator_config) - # prediction_analyzer.add(prediction_lsim.evaluator.frame_results) - # score_df, error_df = prediction_analyzer.analyze() + # Visualize all frame results + logging.info("Start visualizing prediction results") + prediction_lsim.evaluator.visualize_all() # Clean up tmpdir if args.use_tmpdir: