From 56b15a2db9f19fb222ffc1ce22f09b45d9d59e75 Mon Sep 17 00:00:00 2001 From: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> Date: Tue, 19 Dec 2023 19:01:39 +0900 Subject: [PATCH 1/9] refactor: create object and dataset direction in common (#110) * refactor: create object and dataset direction in common Signed-off-by: ktro2828 * docs: update document links Signed-off-by: ktro2828 --------- Signed-off-by: ktro2828 --- .vscode/settings.json | 2 +- docs/en/common.md | 8 +-- docs/ja/common.md | 8 +-- .../perception_eval/common/__init__.py | 39 ----------- .../common/dataset/__init__.py | 19 ++++++ .../common/dataset/ground_truth.py | 47 ++++++++++++++ .../common/{dataset.py => dataset/load.py} | 48 +------------- .../{dataset_utils.py => dataset/utils.py} | 12 ++-- .../perception_eval/common/object/__init__.py | 65 +++++++++++++++++++ .../common/{ => object}/object2d.py | 0 .../common/{object.py => object/object3d.py} | 0 .../evaluation/matching/object_matching.py | 8 +-- .../evaluation/matching/objects_filter.py | 14 ++-- .../evaluation/result/object_result.py | 12 ++-- .../result/perception_frame_result.py | 2 +- .../result/perception_pass_fail_result.py | 2 +- .../manager/perception_evaluation_manager.py | 2 +- .../tool/perception_analyzer2d.py | 2 +- .../tool/perception_analyzer_base.py | 10 +-- perception_eval/perception_eval/util/debug.py | 2 +- .../visualization/perception_visualizer2d.py | 2 +- perception_eval/test/common/test_object.py | 4 +- .../evaluation/metrics/detection/test_ap.py | 2 +- .../evaluation/metrics/detection/test_map.py | 2 +- .../evaluation/metrics/tracking/test_clear.py | 2 +- .../tracking/test_tracking_metrics_score.py | 2 +- .../evaluation/result/test_object_result.py | 2 +- .../test/perception_fp_validation_lsim.py | 2 +- perception_eval/test/perception_lsim2d.py | 2 +- perception_eval/test/util/dummy_object.py | 2 +- 30 files changed, 187 insertions(+), 137 deletions(-) create mode 100644 perception_eval/perception_eval/common/dataset/__init__.py create mode 100644 perception_eval/perception_eval/common/dataset/ground_truth.py rename perception_eval/perception_eval/common/{dataset.py => dataset/load.py} (81%) rename perception_eval/perception_eval/common/{dataset_utils.py => dataset/utils.py} (98%) create mode 100644 perception_eval/perception_eval/common/object/__init__.py rename perception_eval/perception_eval/common/{ => object}/object2d.py (100%) rename perception_eval/perception_eval/common/{object.py => object/object3d.py} (100%) diff --git a/.vscode/settings.json b/.vscode/settings.json index d8f62c32..0250ecb9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -12,7 +12,7 @@ "editor.formatOnPaste": true, "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.organizeImports": true + "source.organizeImports": "explicit" }, "editor.defaultFormatter": "ms-python.black-formatter" }, diff --git a/docs/en/common.md b/docs/en/common.md index 5d4be59a..5bdaf6ae 100644 --- a/docs/en/common.md +++ b/docs/en/common.md @@ -1,6 +1,6 @@ # Common items in Perception and Sensing -## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object.py) +## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object/object3d.py) - Evaluation task: `DETECTION`, `TRACKING`, `PREDICTION`, `SENSING` @@ -27,7 +27,7 @@ | `predicted_confidence` | `float` | List of predicted confidence. | | `visibility` | `Optional[Visibility]` | Visibility status. | -## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object2d.py) +## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object/object2d.py) - Evaluation task: `DETECTION2D`, `TRACING2D`, `CLASSIFICATION2D` @@ -43,7 +43,7 @@ ## Ground truth -### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset.py) +### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset/ground_truth.py) | Argument | type | Description | | :----------- | :-----------------------: | :------------------------------------------------------------------------------------------------------ | @@ -53,7 +53,7 @@ | `ego2map` | `Optional[numpy.ndarray]` | 4x4 matrix to transform objects with respect to base_link coordinate system map one. (Defaults to None) | | `raw_data` | `Optional[numpy.ndarray]` | Array of pointcloud/image. (Defaults to None) | -### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset.py) +### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset/load.py) | Argument | type | Description | | :---------------- | :---------------------------------: | :------------------------------------------------ | diff --git a/docs/ja/common.md b/docs/ja/common.md index 6ef7e69a..d0d33a8d 100644 --- a/docs/ja/common.md +++ b/docs/ja/common.md @@ -1,6 +1,6 @@ # Common items in Perception and Sensing -## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object.py) +## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object/object3d.py) 3 次元オブジェクトのクラス. @@ -29,7 +29,7 @@ | `predicted_confidence` | `Optional[float]` | 予測状態の信頼度. (Default: None) | | `visibility` | `Optional[Visibility]` | 視認性のステータス. (Default: None) | -## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object2d.py) +## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object/object2d.py) 2 次元オブジェクトのクラス. @@ -47,7 +47,7 @@ ## Ground truth -### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset.py) +### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset/ground_truth.py) フレームごとの GT オブジェクトの集合のクラス. @@ -59,7 +59,7 @@ | `ego2map` | `Optional[numpy.ndarray]` | オブジェクトの座標系を base_link から map に変換する 4x4 行列. (Default: None) | | `raw_data` | `Optional[numpy.ndarray]` | 点群または画像. (Default: None) | -### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset.py) +### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset/load.py) データセットをロードする関数. diff --git a/perception_eval/perception_eval/common/__init__.py b/perception_eval/perception_eval/common/__init__.py index 6dc4be9c..9be33190 100644 --- a/perception_eval/perception_eval/common/__init__.py +++ b/perception_eval/perception_eval/common/__init__.py @@ -11,42 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from typing import Union - -import numpy as np -from perception_eval.common.object2d import DynamicObject2D -from perception_eval.common.object import DynamicObject -from perception_eval.common.point import distance_points -from perception_eval.common.point import distance_points_bev - -# Type aliases -ObjectType = Union[DynamicObject, DynamicObject2D] - - -def distance_objects(object_1: ObjectType, object_2: ObjectType) -> float: - """[summary] - Calculate the 3D/2D center distance between two objects. - Args: - object_1 (ObjectType): An object - object_2 (ObjectType): An object - Returns: float: The center distance between object_1 and object_2. - """ - if type(object_1) != type(object_2): - raise TypeError(f"objects' type must be same, but got {type(object_1) and {type(object_2)}}") - - if isinstance(object_1, DynamicObject): - return distance_points(object_1.state.position, object_2.state.position) - return np.linalg.norm(np.array(object_1.roi.center) - np.array(object_2.roi.center)) - - -def distance_objects_bev(object_1: DynamicObject, object_2: DynamicObject) -> float: - """[summary] - Calculate the BEV 2d center distance between two objects. - Args: - object_1 (DynamicObject): An object - object_2 (DynamicObject): An object - Returns: float: The 2d center distance from object_1 to object_2. - """ - assert isinstance(object_1, DynamicObject) and isinstance(object_2, DynamicObject) - return distance_points_bev(object_1.state.position, object_2.state.position) diff --git a/perception_eval/perception_eval/common/dataset/__init__.py b/perception_eval/perception_eval/common/dataset/__init__.py new file mode 100644 index 00000000..7f739a3c --- /dev/null +++ b/perception_eval/perception_eval/common/dataset/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .ground_truth import FrameGroundTruth +from .load import get_now_frame +from .load import load_all_datasets + +__all__ = ("FrameGroundTruth", "get_now_frame", "load_all_datasets") diff --git a/perception_eval/perception_eval/common/dataset/ground_truth.py b/perception_eval/perception_eval/common/dataset/ground_truth.py new file mode 100644 index 00000000..9fd0e857 --- /dev/null +++ b/perception_eval/perception_eval/common/dataset/ground_truth.py @@ -0,0 +1,47 @@ +from typing import Dict +from typing import List +from typing import Optional + +import numpy as np + +from ..object import ObjectType + + +class FrameGroundTruth: + """ + Ground truth data per frame + + Attributes: + unix_time (float): The unix time for the frame [us]. + frame_name (str): The file name for the frame. + objects (List[DynamicObject]): Objects data. + pointcloud (Optional[numpy.ndarray], optional): + Pointcloud data. Defaults to None, but if you want to visualize dataset, + you should load pointcloud data. + transform_matrix (Optional[np.ndarray]): The numpy array to transform position. + objects (List[ObjectType]): Objects data. + ego2map (Optional[np.ndarray]): The numpy array to transform position. + raw_data (Optional[Dict[str, numpy.ndarray]]): Raw data for each sensor modality. + + Args: + unix_time (int): The unix time for the frame [us] + frame_name (str): The file name for the frame + objects (List[DynamicObject]): Objects data. + ego2map (Optional[np.ndarray]): The array of 4x4 matrix. + Transform position with respect to vehicle coord system to map one. + raw_data (Optional[Dict[str, numpy.ndarray]]): Raw data for each sensor modality. + """ + + def __init__( + self, + unix_time: int, + frame_name: str, + objects: List[ObjectType], + ego2map: Optional[np.ndarray] = None, + raw_data: Optional[Dict[str, np.ndarray]] = None, + ) -> None: + self.unix_time: int = unix_time + self.frame_name: str = frame_name + self.objects: List[ObjectType] = objects + self.ego2map: Optional[np.ndarray] = ego2map + self.raw_data: Optional[Dict[str, np.ndarray]] = raw_data diff --git a/perception_eval/perception_eval/common/dataset.py b/perception_eval/perception_eval/common/dataset/load.py similarity index 81% rename from perception_eval/perception_eval/common/dataset.py rename to perception_eval/perception_eval/common/dataset/load.py index d4e8d6d7..16602822 100644 --- a/perception_eval/perception_eval/common/dataset.py +++ b/perception_eval/perception_eval/common/dataset/load.py @@ -14,64 +14,22 @@ import logging from typing import Any -from typing import Dict from typing import List from typing import Optional from typing import Sequence from typing import Union from nuimages import NuImages -import numpy as np from nuscenes.nuscenes import NuScenes from nuscenes.prediction.helper import PredictHelper -from perception_eval.common import ObjectType -from perception_eval.common.dataset_utils import _sample_to_frame -from perception_eval.common.dataset_utils import _sample_to_frame_2d from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import LabelConverter -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import FrameID from tqdm import tqdm - -class FrameGroundTruth: - """ - Ground truth data per frame - - Attributes: - unix_time (float): The unix time for the frame [us]. - frame_name (str): The file name for the frame. - objects (List[DynamicObject]): Objects data. - pointcloud (Optional[numpy.ndarray], optional): - Pointcloud data. Defaults to None, but if you want to visualize dataset, - you should load pointcloud data. - transform_matrix (Optional[np.ndarray]): The numpy array to transform position. - objects (List[ObjectType]): Objects data. - ego2map (Optional[np.ndarray]): The numpy array to transform position. - raw_data (Optional[Dict[str, numpy.ndarray]]): Raw data for each sensor modality. - - Args: - unix_time (int): The unix time for the frame [us] - frame_name (str): The file name for the frame - objects (List[DynamicObject]): Objects data. - ego2map (Optional[np.ndarray]): The array of 4x4 matrix. - Transform position with respect to vehicle coord system to map one. - raw_data (Optional[Dict[str, numpy.ndarray]]): Raw data for each sensor modality. - """ - - def __init__( - self, - unix_time: int, - frame_name: str, - objects: List[DynamicObject], - ego2map: Optional[np.ndarray] = None, - raw_data: Optional[Dict[str, np.ndarray]] = None, - ) -> None: - self.unix_time: int = unix_time - self.frame_name: str = frame_name - self.objects: List[ObjectType] = objects - self.ego2map: Optional[np.ndarray] = ego2map - self.raw_data: Optional[Dict[str, np.ndarray]] = raw_data +from .ground_truth import FrameGroundTruth +from .utils import _sample_to_frame +from .utils import _sample_to_frame_2d def load_all_datasets( diff --git a/perception_eval/perception_eval/common/dataset_utils.py b/perception_eval/perception_eval/common/dataset/utils.py similarity index 98% rename from perception_eval/perception_eval/common/dataset_utils.py rename to perception_eval/perception_eval/common/dataset/utils.py index 75101852..d5b25aec 100644 --- a/perception_eval/perception_eval/common/dataset_utils.py +++ b/perception_eval/perception_eval/common/dataset/utils.py @@ -32,8 +32,8 @@ from perception_eval.common.label import LabelConverter from perception_eval.common.label import LabelType from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object2d import DynamicObject2D from perception_eval.common.object import DynamicObject +from perception_eval.common.object import DynamicObject2D from perception_eval.common.schema import FrameID from perception_eval.common.schema import Visibility from perception_eval.common.shape import Shape @@ -41,7 +41,7 @@ from PIL import Image from pyquaternion.quaternion import Quaternion -from . import dataset +from .load import FrameGroundTruth ################################# # Dataset 3D # @@ -57,7 +57,7 @@ def _sample_to_frame( frame_id: FrameID, frame_name: str, load_raw_data: bool, -) -> dataset.FrameGroundTruth: +) -> FrameGroundTruth: """Load FrameGroundTruth instance from sample record. Args: @@ -132,7 +132,7 @@ def _sample_to_frame( ) objects_.append(object_) - frame = dataset.FrameGroundTruth( + frame = FrameGroundTruth( unix_time=unix_time_, frame_name=frame_name, objects=objects_, @@ -433,7 +433,7 @@ def _sample_to_frame_2d( frame_ids: List[FrameID], frame_name: str, load_raw_data: bool, -) -> dataset.FrameGroundTruth: +) -> FrameGroundTruth: """Returns FrameGroundTruth constructed with DynamicObject2D. Args: @@ -513,7 +513,7 @@ def _sample_to_frame_2d( ) objects_.append(object_) - frame = dataset.FrameGroundTruth( + frame = FrameGroundTruth( unix_time=unix_time, frame_name=frame_name, objects=objects_, diff --git a/perception_eval/perception_eval/common/object/__init__.py b/perception_eval/perception_eval/common/object/__init__.py new file mode 100644 index 00000000..98947efa --- /dev/null +++ b/perception_eval/perception_eval/common/object/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2023 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union + +import numpy as np + +from .object2d import DynamicObject2D +from .object2d import Roi +from .object3d import DynamicObject +from ..point import distance_points +from ..point import distance_points_bev + +ObjectType = Union[DynamicObject, DynamicObject2D] + + +__all__ = ( + "DynamicObject2D", + "Roi", + "DynamicObject", + "distance_objects", + "distance_objects_bev", + "ObjectType", +) + + +def distance_objects(object_1: ObjectType, object_2: ObjectType) -> float: + """ + Calculate the 3D/2D center distance between two objects. + + Args: + object_1 (ObjectType): An object + object_2 (ObjectType): An object + Returns: float: The center distance between object_1 and object_2. + """ + if type(object_1) is not type(object_2): + raise TypeError(f"objects' type must be same, but got {type(object_1) and {type(object_2)}}") + + if isinstance(object_1, DynamicObject): + return distance_points(object_1.state.position, object_2.state.position) + return np.linalg.norm(np.array(object_1.roi.center) - np.array(object_2.roi.center)) + + +def distance_objects_bev(object_1: DynamicObject, object_2: DynamicObject) -> float: + """ + Calculate the BEV 2d center distance between two objects. + + Args: + object_1 (DynamicObject): An object + object_2 (DynamicObject): An object + Returns: float: The 2d center distance from object_1 to object_2. + """ + assert isinstance(object_1, DynamicObject) and isinstance(object_2, DynamicObject) + return distance_points_bev(object_1.state.position, object_2.state.position) diff --git a/perception_eval/perception_eval/common/object2d.py b/perception_eval/perception_eval/common/object/object2d.py similarity index 100% rename from perception_eval/perception_eval/common/object2d.py rename to perception_eval/perception_eval/common/object/object2d.py diff --git a/perception_eval/perception_eval/common/object.py b/perception_eval/perception_eval/common/object/object3d.py similarity index 100% rename from perception_eval/perception_eval/common/object.py rename to perception_eval/perception_eval/common/object/object3d.py diff --git a/perception_eval/perception_eval/evaluation/matching/object_matching.py b/perception_eval/perception_eval/evaluation/matching/object_matching.py index 7fd5ee1f..0a57ceb5 100644 --- a/perception_eval/perception_eval/evaluation/matching/object_matching.py +++ b/perception_eval/perception_eval/evaluation/matching/object_matching.py @@ -21,10 +21,10 @@ from typing import Optional from typing import Tuple -from perception_eval.common import distance_objects -from perception_eval.common import distance_points_bev -from perception_eval.common import ObjectType +from perception_eval.common.object import distance_objects from perception_eval.common.object import DynamicObject +from perception_eval.common.object import ObjectType +from perception_eval.common.point import distance_points_bev from perception_eval.common.point import get_point_left_right from perception_eval.common.point import polygon_to_list from shapely.geometry import Polygon @@ -73,7 +73,7 @@ def __init__( ground_truth_object: Optional[ObjectType], ) -> None: if ground_truth_object is not None: - assert type(estimated_object) == type(ground_truth_object) + assert type(estimated_object) is type(ground_truth_object) self.value: Optional[float] = self._calculate_matching_score( estimated_object=estimated_object, ground_truth_object=ground_truth_object, diff --git a/perception_eval/perception_eval/evaluation/matching/objects_filter.py b/perception_eval/perception_eval/evaluation/matching/objects_filter.py index 14427628..594ba03c 100644 --- a/perception_eval/perception_eval/evaluation/matching/objects_filter.py +++ b/perception_eval/perception_eval/evaluation/matching/objects_filter.py @@ -20,11 +20,11 @@ import warnings import numpy as np -from perception_eval.common import ObjectType from perception_eval.common.label import CommonLabel from perception_eval.common.label import Label from perception_eval.common.label import LabelType from perception_eval.common.object import DynamicObject +from perception_eval.common.object import ObjectType from perception_eval.common.schema import FrameID from perception_eval.common.status import MatchingStatus from perception_eval.common.threshold import get_label_threshold @@ -260,12 +260,12 @@ def get_positive_objects( def get_negative_objects( - ground_truth_objects: List[DynamicObject], + ground_truth_objects: List[ObjectType], object_results: List[DynamicObjectWithPerceptionResult], target_labels: List[Label], matching_mode: Optional[MatchingMode] = None, matching_threshold_list: Optional[List[float]] = None, -) -> Tuple[List[DynamicObject], List[DynamicObject]]: +) -> Tuple[List[ObjectType], List[ObjectType]]: """Returns TN (True Negative) and FN (False Negative) objects as `tuple`. If a ground truth object is contained in object results, it is TP or FP. @@ -280,11 +280,11 @@ def get_negative_objects( each element corresponds to target label. Returns: - tn_objects (List[DynamicObject]): List of TN. - fn_objects (List[DynamicObject]): List of FN. + tn_objects (List[ObjectType]): List of TN. + fn_objects (List[ObjectType]): List of FN. """ - tn_objects: List[DynamicObject] = [] - fn_objects: List[DynamicObject] = [] + tn_objects: List[ObjectType] = [] + fn_objects: List[ObjectType] = [] non_candidates: List[ObjectType] = [] for object_result in object_results: diff --git a/perception_eval/perception_eval/evaluation/result/object_result.py b/perception_eval/perception_eval/evaluation/result/object_result.py index 4c86fb71..c8a4f4ff 100644 --- a/perception_eval/perception_eval/evaluation/result/object_result.py +++ b/perception_eval/perception_eval/evaluation/result/object_result.py @@ -20,13 +20,13 @@ from typing import Tuple import numpy as np -from perception_eval.common import distance_objects -from perception_eval.common import distance_objects_bev -from perception_eval.common import DynamicObject -from perception_eval.common import DynamicObject2D -from perception_eval.common import ObjectType from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import LabelType +from perception_eval.common.object import distance_objects +from perception_eval.common.object import distance_objects_bev +from perception_eval.common.object import DynamicObject +from perception_eval.common.object import DynamicObject2D +from perception_eval.common.object import ObjectType from perception_eval.common.status import MatchingStatus from perception_eval.common.threshold import get_label_threshold from perception_eval.evaluation.matching import CenterDistanceMatching @@ -64,7 +64,7 @@ def __init__( ground_truth_objects (Optional[ObjectType]): The list of Ground truth objects """ if ground_truth_object is not None: - assert type(estimated_object) == type( + assert type(estimated_object) is type( ground_truth_object ), f"Input objects type must be same, but got {type(estimated_object)} and {type(ground_truth_object)}" diff --git a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py b/perception_eval/perception_eval/evaluation/result/perception_frame_result.py index 5f46d445..d582bd4a 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py +++ b/perception_eval/perception_eval/evaluation/result/perception_frame_result.py @@ -18,9 +18,9 @@ from typing import List from typing import Optional -from perception_eval.common import ObjectType from perception_eval.common.dataset import FrameGroundTruth from perception_eval.common.label import LabelType +from perception_eval.common.object import ObjectType from perception_eval.common.status import GroundTruthStatus from perception_eval.common.status import MatchingStatus from perception_eval.evaluation import DynamicObjectWithPerceptionResult diff --git a/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py b/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py index faf09e15..5e64049b 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py @@ -18,7 +18,7 @@ import warnings import numpy as np -from perception_eval.common import ObjectType +from perception_eval.common.object import ObjectType from perception_eval.evaluation import DynamicObjectWithPerceptionResult from perception_eval.evaluation.matching import MatchingMode from perception_eval.evaluation.matching.objects_filter import filter_objects diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index a10ab63c..2bb70668 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -16,9 +16,9 @@ from typing import List from typing import Tuple -from perception_eval.common import ObjectType from perception_eval.common.dataset import FrameGroundTruth from perception_eval.common.label import LabelType +from perception_eval.common.object import ObjectType from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import PerceptionFrameResult from perception_eval.evaluation.matching.objects_filter import divide_objects diff --git a/perception_eval/perception_eval/tool/perception_analyzer2d.py b/perception_eval/perception_eval/tool/perception_analyzer2d.py index d5370c13..748b7244 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer2d.py +++ b/perception_eval/perception_eval/tool/perception_analyzer2d.py @@ -25,7 +25,7 @@ import numpy as np import pandas as pd from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.object2d import DynamicObject2D +from perception_eval.common.object import DynamicObject2D from perception_eval.common.status import MatchingStatus from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import DynamicObjectWithPerceptionResult diff --git a/perception_eval/perception_eval/tool/perception_analyzer_base.py b/perception_eval/perception_eval/tool/perception_analyzer_base.py index 09efed8a..35b8ae4d 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer_base.py +++ b/perception_eval/perception_eval/tool/perception_analyzer_base.py @@ -37,7 +37,7 @@ import numpy as np import pandas as pd from perception_eval.common.label import LabelType -from perception_eval.common.object import DynamicObject +from perception_eval.common.object import ObjectType from perception_eval.common.status import MatchingStatus from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import DynamicObjectWithPerceptionResult @@ -563,7 +563,7 @@ def clear(self) -> None: def format2df( self, - object_results: List[Union[DynamicObject, DynamicObjectWithPerceptionResult]], + object_results: List[Union[ObjectType, DynamicObjectWithPerceptionResult]], status: MatchingStatus, frame_num: int, start: int = 0, @@ -572,7 +572,7 @@ def format2df( """Format objects to pandas.DataFrame. Args: - object_results (List[Union[DynamicObject, DynamicObjectWithPerceptionResult]]): List of objects or object results. + object_results (List[Union[ObjectType, DynamicObjectWithPerceptionResult]]): List of objects or object results. status (MatchingStatus): Object's status. frame_num (int): Number of frame. start (int): Number of the first index. Defaults to 0. @@ -595,7 +595,7 @@ def format2df( @abstractmethod def format2dict( self, - object_result: Union[DynamicObject, DynamicObjectWithPerceptionResult], + object_result: Union[ObjectType, DynamicObjectWithPerceptionResult], status: MatchingStatus, frame_num: int, ego2map: Optional[np.ndarray] = None, @@ -603,7 +603,7 @@ def format2dict( """Format objects to dict. Args: - object_results (List[Union[DynamicObject, DynamicObjectWithPerceptionResult]]): List of objects or object results. + object_results (List[Union[ObjectType, DynamicObjectWithPerceptionResult]]): List of objects or object results. status (MatchingStatus): Object's status. frame_num (int): Number of frame. ego2map (Optional[np.ndarray]): Matrix to transform from ego coords to map coords. Defaults to None. diff --git a/perception_eval/perception_eval/util/debug.py b/perception_eval/perception_eval/util/debug.py index cb7e4208..34fa3c19 100644 --- a/perception_eval/perception_eval/util/debug.py +++ b/perception_eval/perception_eval/util/debug.py @@ -24,8 +24,8 @@ from perception_eval.common.label import Label from perception_eval.common.label import LabelType from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object2d import DynamicObject2D from perception_eval.common.object import DynamicObject +from perception_eval.common.object import DynamicObject2D from perception_eval.common.shape import Shape from pyquaternion.quaternion import Quaternion diff --git a/perception_eval/perception_eval/visualization/perception_visualizer2d.py b/perception_eval/perception_eval/visualization/perception_visualizer2d.py index 7785e7dd..4348e273 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer2d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer2d.py @@ -30,7 +30,7 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object2d import DynamicObject2D +from perception_eval.common.object import DynamicObject2D from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import DynamicObjectWithPerceptionResult from perception_eval.evaluation import PerceptionFrameResult diff --git a/perception_eval/test/common/test_object.py b/perception_eval/test/common/test_object.py index e3108a63..09d24473 100644 --- a/perception_eval/test/common/test_object.py +++ b/perception_eval/test/common/test_object.py @@ -19,8 +19,8 @@ import unittest import numpy as np -from perception_eval.common import distance_objects -from perception_eval.common import distance_objects_bev +from perception_eval.common.object import distance_objects +from perception_eval.common.object import distance_objects_bev from perception_eval.common.object import DynamicObject from perception_eval.util.debug import get_objects_with_difference from shapely.geometry import Polygon diff --git a/perception_eval/test/evaluation/metrics/detection/test_ap.py b/perception_eval/test/evaluation/metrics/detection/test_ap.py index 5f81263e..1001c763 100644 --- a/perception_eval/test/evaluation/metrics/detection/test_ap.py +++ b/perception_eval/test/evaluation/metrics/detection/test_ap.py @@ -24,9 +24,9 @@ import unittest import numpy as np -from perception_eval.common import DynamicObject from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel +from perception_eval.common.object import DynamicObject from perception_eval.evaluation.matching import MatchingMode from perception_eval.evaluation.matching.objects_filter import filter_objects from perception_eval.evaluation.metrics.detection.ap import Ap diff --git a/perception_eval/test/evaluation/metrics/detection/test_map.py b/perception_eval/test/evaluation/metrics/detection/test_map.py index bfb45461..d9158434 100644 --- a/perception_eval/test/evaluation/metrics/detection/test_map.py +++ b/perception_eval/test/evaluation/metrics/detection/test_map.py @@ -19,9 +19,9 @@ from typing import Tuple import unittest -from perception_eval.common import DynamicObject from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel +from perception_eval.common.object import DynamicObject from perception_eval.evaluation.matching.object_matching import MatchingMode from perception_eval.evaluation.matching.objects_filter import divide_objects from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num diff --git a/perception_eval/test/evaluation/metrics/tracking/test_clear.py b/perception_eval/test/evaluation/metrics/tracking/test_clear.py index 8aeba83c..94bbfb6e 100644 --- a/perception_eval/test/evaluation/metrics/tracking/test_clear.py +++ b/perception_eval/test/evaluation/metrics/tracking/test_clear.py @@ -22,9 +22,9 @@ from typing import Tuple import unittest -from perception_eval.common import DynamicObject from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel +from perception_eval.common.object import DynamicObject from perception_eval.evaluation.matching.object_matching import MatchingMode from perception_eval.evaluation.matching.objects_filter import filter_objects from perception_eval.evaluation.metrics.tracking.clear import CLEAR diff --git a/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py b/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py index ea719e53..b9345a10 100644 --- a/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py +++ b/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py @@ -21,9 +21,9 @@ from typing import Tuple import unittest -from perception_eval.common import DynamicObject from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel +from perception_eval.common.object import DynamicObject from perception_eval.evaluation.matching.object_matching import MatchingMode from perception_eval.evaluation.matching.objects_filter import divide_objects from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num diff --git a/perception_eval/test/evaluation/result/test_object_result.py b/perception_eval/test/evaluation/result/test_object_result.py index ca6f5d1b..34b02676 100644 --- a/perception_eval/test/evaluation/result/test_object_result.py +++ b/perception_eval/test/evaluation/result/test_object_result.py @@ -19,8 +19,8 @@ from typing import Tuple import unittest -from perception_eval.common import DynamicObject from perception_eval.common.evaluation_task import EvaluationTask +from perception_eval.common.object import DynamicObject from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult from perception_eval.evaluation.result.object_result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 4865151b..dd5cadcd 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -17,8 +17,8 @@ import tempfile from typing import List -from perception_eval.common import ObjectType from perception_eval.common.label import AutowareLabel +from perception_eval.common.object import ObjectType from perception_eval.common.status import get_scene_rates from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import get_object_status diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 1bfe0a92..6975cf41 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -18,7 +18,7 @@ from typing import List from typing import Union -from perception_eval.common.object2d import DynamicObject2D +from perception_eval.common.object import DynamicObject2D from perception_eval.config import PerceptionEvaluationConfig from perception_eval.evaluation import PerceptionFrameResult from perception_eval.evaluation.metrics import MetricsScore diff --git a/perception_eval/test/util/dummy_object.py b/perception_eval/test/util/dummy_object.py index 271947d6..251adc7f 100644 --- a/perception_eval/test/util/dummy_object.py +++ b/perception_eval/test/util/dummy_object.py @@ -18,8 +18,8 @@ from perception_eval.common.label import AutowareLabel from perception_eval.common.label import Label -from perception_eval.common.object2d import DynamicObject2D from perception_eval.common.object import DynamicObject +from perception_eval.common.object import DynamicObject2D from perception_eval.common.schema import FrameID from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType From 859e6e90f0ac0eb21f32b1a758ac6c66a4c92885 Mon Sep 17 00:00:00 2001 From: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> Date: Tue, 19 Dec 2023 22:03:49 +0900 Subject: [PATCH 2/9] refactor: move module directory (#111) * refactor: create new directory `object/dataset/result` Signed-off-by: ktro2828 * refactor: move metrics and matching modules out of evaluation Signed-off-by: ktro2828 * docs: fix dead links Signed-off-by: ktro2828 --------- Signed-off-by: ktro2828 --- docs/en/common.md | 8 +-- docs/en/perception/data_structure.md | 4 +- docs/en/perception/design.md | 6 +-- docs/en/perception/metrics.md | 16 +++--- docs/en/sensing/data_structure.md | 4 +- docs/ja/common.md | 8 +-- docs/ja/perception/data_structure.md | 4 +- docs/ja/perception/design.md | 6 +-- docs/ja/perception/metrics.md | 14 +++--- docs/ja/sensing/data_structure.md | 4 +- docs/ja/sensing/design.md | 6 +-- .../config/_evaluation_config_base.py | 7 ++- .../config/perception_evaluation_config.py | 12 ++--- .../{common => }/dataset/__init__.py | 0 .../{common => }/dataset/ground_truth.py | 16 +++--- .../{common => }/dataset/load.py | 8 ++- .../{common => }/dataset/utils.py | 13 +++-- .../perception_eval/evaluation/__init__.py | 25 ---------- .../manager/_evaluation_manager_base.py | 23 ++++++--- .../manager/perception_evaluation_manager.py | 41 ++++++++------- .../manager/sensing_evaluation_manager.py | 11 ++-- .../{evaluation => }/matching/__init__.py | 12 ++--- .../matching/object_matching.py | 11 ++-- .../matching/objects_filter.py | 21 +++++--- .../metrics}/__init__.py | 5 ++ .../metrics/classification/__init__.py | 0 .../metrics/classification/accuracy.py | 8 ++- .../classification_metrics_score.py | 10 ++-- .../metrics/config/__init__.py | 0 .../metrics/config/_metrics_config_base.py | 9 +++- .../config/classification_metrics_config.py | 7 ++- .../config/detection_metrics_config.py | 7 ++- .../config/prediction_metrics_config.py | 7 ++- .../metrics/config/tracking_metrics_config.py | 7 ++- .../metrics/detection/__init__.py | 0 .../{evaluation => }/metrics/detection/ap.py | 20 +++++--- .../{evaluation => }/metrics/detection/map.py | 17 ++++--- .../metrics/detection/tp_metrics.py | 7 ++- .../{evaluation => }/metrics/metrics.py | 11 ++-- .../metrics/metrics_score_config.py | 0 .../metrics/prediction/__init__.py | 0 .../metrics/tracking/__init__.py | 0 .../metrics/tracking/_metrics_base.py | 13 +++-- .../metrics/tracking/clear.py | 16 ++++-- .../{evaluation => }/metrics/tracking/hota.py | 17 ++++--- .../tracking/tracking_metrics_score.py | 16 +++--- .../{common => }/object/__init__.py | 4 +- .../{common => }/object/object2d.py | 0 .../{common => }/object/object3d.py | 0 .../result}/__init__.py | 9 ++++ .../result/perception/__init__.py | 30 +++++++++++ .../perception}/perception_frame_config.py | 10 ++-- .../perception}/perception_frame_result.py | 50 ++++++++++--------- .../perception_pass_fail_result.py | 36 +++++++------ .../perception/perception_result.py} | 49 +++++++++--------- .../metrics => result/sensing}/__init__.py | 7 +-- .../sensing/sensing_frame_config.py | 0 .../sensing/sensing_frame_result.py | 7 +-- .../sensing/sensing_result.py | 2 +- .../tool/perception_analyzer2d.py | 4 +- .../tool/perception_analyzer3d.py | 4 +- .../tool/perception_analyzer_base.py | 12 ++--- perception_eval/perception_eval/tool/utils.py | 8 +-- perception_eval/perception_eval/util/debug.py | 4 +- .../perception_eval/visualization/eda_tool.py | 12 ++--- .../visualization/perception_visualizer2d.py | 6 +-- .../visualization/perception_visualizer3d.py | 6 +-- .../visualization/sensing_visualizer.py | 6 +-- perception_eval/test/eda.py | 6 +-- .../evaluation/metrics/prediction/__init__.py | 13 ----- .../test/evaluation/result/__init__.py | 13 ----- .../test/evaluation/sensing/__init__.py | 13 ----- .../result => test/matching}/__init__.py | 0 .../matching/object_matching}/__init__.py | 0 .../object_matching/test_iou_2d_matching.py | 6 +-- .../object_matching/test_iou_3d_matching.py | 8 +-- .../test_plane_distance_matching.py | 4 +- .../matching/test_objects_filter.py | 24 ++++----- .../test/{evaluation => metrics}/__init__.py | 0 .../classification}/__init__.py | 0 .../metrics/classification/test_accuracy.py | 6 +-- .../test_classification_metrics_score.py | 10 ++-- .../detection}/__init__.py | 0 .../metrics/detection/test_ap.py | 16 +++--- .../metrics/detection/test_map.py | 16 +++--- .../prediction}/__init__.py | 0 .../metrics/test_metrics_score_config.py | 10 ++-- .../tracking}/__init__.py | 0 .../metrics/tracking/test_clear.py | 12 ++--- .../tracking/test_tracking_metrics_score.py | 18 +++---- perception_eval/test/object/__init__.py | 0 .../test/{common => object}/test_object.py | 6 +-- .../test/perception_field_analysis.py | 2 - .../test/perception_field_points_analysis.py | 2 +- .../test/perception_fp_validation_lsim.py | 15 ++++-- perception_eval/test/perception_lsim.py | 19 ++++--- perception_eval/test/perception_lsim2d.py | 15 ++++-- .../result/test_object_result.py | 6 +-- .../sensing => result}/test_sensing_result.py | 2 +- perception_eval/test/sensing_lsim.py | 6 +-- perception_eval/test/util/dummy_object.py | 4 +- .../test/visualization/test_eda_tool.py | 8 +-- 102 files changed, 546 insertions(+), 437 deletions(-) rename perception_eval/perception_eval/{common => }/dataset/__init__.py (100%) rename perception_eval/perception_eval/{common => }/dataset/ground_truth.py (83%) rename perception_eval/perception_eval/{common => }/dataset/load.py (97%) rename perception_eval/perception_eval/{common => }/dataset/utils.py (98%) delete mode 100644 perception_eval/perception_eval/evaluation/__init__.py rename perception_eval/perception_eval/{evaluation => }/matching/__init__.py (60%) rename perception_eval/perception_eval/{evaluation => }/matching/object_matching.py (98%) rename perception_eval/perception_eval/{evaluation => }/matching/objects_filter.py (98%) rename perception_eval/{test/evaluation/metrics/detection => perception_eval/metrics}/__init__.py (80%) rename perception_eval/perception_eval/{evaluation => }/metrics/classification/__init__.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/classification/accuracy.py (96%) rename perception_eval/perception_eval/{evaluation => }/metrics/classification/classification_metrics_score.py (95%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/__init__.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/_metrics_config_base.py (93%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/classification_metrics_config.py (94%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/detection_metrics_config.py (94%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/prediction_metrics_config.py (94%) rename perception_eval/perception_eval/{evaluation => }/metrics/config/tracking_metrics_config.py (94%) rename perception_eval/perception_eval/{evaluation => }/metrics/detection/__init__.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/detection/ap.py (96%) rename perception_eval/perception_eval/{evaluation => }/metrics/detection/map.py (92%) rename perception_eval/perception_eval/{evaluation => }/metrics/detection/tp_metrics.py (96%) rename perception_eval/perception_eval/{evaluation => }/metrics/metrics.py (97%) rename perception_eval/perception_eval/{evaluation => }/metrics/metrics_score_config.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/prediction/__init__.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/tracking/__init__.py (100%) rename perception_eval/perception_eval/{evaluation => }/metrics/tracking/_metrics_base.py (95%) rename perception_eval/perception_eval/{evaluation => }/metrics/tracking/clear.py (97%) rename perception_eval/perception_eval/{evaluation => }/metrics/tracking/hota.py (91%) rename perception_eval/perception_eval/{evaluation => }/metrics/tracking/tracking_metrics_score.py (93%) rename perception_eval/perception_eval/{common => }/object/__init__.py (94%) rename perception_eval/perception_eval/{common => }/object/object2d.py (100%) rename perception_eval/perception_eval/{common => }/object/object3d.py (100%) rename perception_eval/{test/evaluation/metrics/tracking => perception_eval/result}/__init__.py (66%) create mode 100644 perception_eval/perception_eval/result/perception/__init__.py rename perception_eval/perception_eval/{evaluation/result => result/perception}/perception_frame_config.py (96%) rename perception_eval/perception_eval/{evaluation/result => result/perception}/perception_frame_result.py (82%) rename perception_eval/perception_eval/{evaluation/result => result/perception}/perception_pass_fail_result.py (84%) rename perception_eval/perception_eval/{evaluation/result/object_result.py => result/perception/perception_result.py} (91%) rename perception_eval/perception_eval/{evaluation/metrics => result/sensing}/__init__.py (69%) rename perception_eval/perception_eval/{evaluation => result}/sensing/sensing_frame_config.py (100%) rename perception_eval/perception_eval/{evaluation => result}/sensing/sensing_frame_result.py (96%) rename perception_eval/perception_eval/{evaluation => result}/sensing/sensing_result.py (98%) delete mode 100644 perception_eval/test/evaluation/metrics/prediction/__init__.py delete mode 100644 perception_eval/test/evaluation/result/__init__.py delete mode 100644 perception_eval/test/evaluation/sensing/__init__.py rename perception_eval/{perception_eval/evaluation/result => test/matching}/__init__.py (100%) rename perception_eval/{perception_eval/evaluation/sensing => test/matching/object_matching}/__init__.py (100%) mode change 100644 => 100755 rename perception_eval/test/{evaluation => }/matching/object_matching/test_iou_2d_matching.py (96%) rename perception_eval/test/{evaluation => }/matching/object_matching/test_iou_3d_matching.py (94%) rename perception_eval/test/{evaluation => }/matching/object_matching/test_plane_distance_matching.py (97%) rename perception_eval/test/{evaluation => }/matching/test_objects_filter.py (97%) rename perception_eval/test/{evaluation => metrics}/__init__.py (100%) mode change 100644 => 100755 rename perception_eval/test/{evaluation/matching => metrics/classification}/__init__.py (100%) rename perception_eval/test/{evaluation => }/metrics/classification/test_accuracy.py (94%) rename perception_eval/test/{evaluation => }/metrics/classification/test_classification_metrics_score.py (88%) rename perception_eval/test/{evaluation/matching/object_matching => metrics/detection}/__init__.py (100%) mode change 100755 => 100644 rename perception_eval/test/{evaluation => }/metrics/detection/test_ap.py (98%) rename perception_eval/test/{evaluation => }/metrics/detection/test_map.py (98%) rename perception_eval/test/{evaluation/metrics => metrics/prediction}/__init__.py (100%) mode change 100755 => 100644 rename perception_eval/test/{evaluation => }/metrics/test_metrics_score_config.py (82%) rename perception_eval/test/{evaluation/metrics/classification => metrics/tracking}/__init__.py (100%) rename perception_eval/test/{evaluation => }/metrics/tracking/test_clear.py (98%) rename perception_eval/test/{evaluation => }/metrics/tracking/test_tracking_metrics_score.py (96%) create mode 100644 perception_eval/test/object/__init__.py rename perception_eval/test/{common => object}/test_object.py (98%) rename perception_eval/test/{evaluation => }/result/test_object_result.py (94%) rename perception_eval/test/{evaluation/sensing => result}/test_sensing_result.py (92%) diff --git a/docs/en/common.md b/docs/en/common.md index 5bdaf6ae..38c9501a 100644 --- a/docs/en/common.md +++ b/docs/en/common.md @@ -1,6 +1,6 @@ # Common items in Perception and Sensing -## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object/object3d.py) +## [` DynamicObject(...)`](../../perception_eval/perception_eval/object/object3d.py) - Evaluation task: `DETECTION`, `TRACKING`, `PREDICTION`, `SENSING` @@ -27,7 +27,7 @@ | `predicted_confidence` | `float` | List of predicted confidence. | | `visibility` | `Optional[Visibility]` | Visibility status. | -## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object/object2d.py) +## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/object/object2d.py) - Evaluation task: `DETECTION2D`, `TRACING2D`, `CLASSIFICATION2D` @@ -43,7 +43,7 @@ ## Ground truth -### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset/ground_truth.py) +### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/dataset/ground_truth.py) | Argument | type | Description | | :----------- | :-----------------------: | :------------------------------------------------------------------------------------------------------ | @@ -53,7 +53,7 @@ | `ego2map` | `Optional[numpy.ndarray]` | 4x4 matrix to transform objects with respect to base_link coordinate system map one. (Defaults to None) | | `raw_data` | `Optional[numpy.ndarray]` | Array of pointcloud/image. (Defaults to None) | -### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset/load.py) +### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/dataset/load.py) | Argument | type | Description | | :---------------- | :---------------------------------: | :------------------------------------------------ | diff --git a/docs/en/perception/data_structure.md b/docs/en/perception/data_structure.md index ea2c956e..0f8b552d 100644 --- a/docs/en/perception/data_structure.md +++ b/docs/en/perception/data_structure.md @@ -32,7 +32,7 @@ ### ` PerceptionFrameResult(...)` -For the details, see [perception_eval/evaluation/result/perception_frame_result.py](../../../perception_eval/perception_eval/evaluation/result/perception_frame_result.py) +For the details, see [perception_eval/result/perception/perception_frame_result.py](../../../perception_eval/perception_eval/result/perception/perception_frame_result.py) - Initialization @@ -118,7 +118,7 @@ For the details, see [perception_eval/evaluation/result/perception_frame_result. Call ` get_object_results(...)` function to generate a set of matching pairs `List[DynamicObjectWithPerceptionResult]` from a set of Estimated objects `List[ObjectType]`and a set of GT objects `List[ObjectType]`. -For the details,see [perception_eval/evaluation/result/object_result.py](../../../perception_eval/perception_eval/evaluation/result/object_result.py) +For the details,see [perception_eval/result/perception/perception_result.py](../../../perception_eval/perception_eval/result/perception/perception_result.py) ```python from perception_eval.evaluation.result.object_results import get_object_results diff --git a/docs/en/perception/design.md b/docs/en/perception/design.md index e49a4a47..1c89b0ba 100644 --- a/docs/en/perception/design.md +++ b/docs/en/perception/design.md @@ -203,7 +203,7 @@ - **4. Specify parameters besides parameters of `MetricsConfig`** - - About each MetricsConfig,see [perception_eval/evaluation/metrics/config](../../../perception_eval/perception_eval/evaluation/metrics/config/) + - About each MetricsConfig,see [perception_eval/metrics/config](../../../perception_eval/perception_eval/metrics/config/) ```python evaluation_config_dict = { @@ -236,7 +236,7 @@ - An interface to determine target objects dynamically - Specify in every frame, not in initialization of `PerceptionEvaluationManger` -- See [perception_eval/evaluation/result/perception_frame_config](../../../perception_eval/perception_eval/evaluation/result/perception_frame_config.py) +- See [perception_eval/result/perception/perception_frame_config](../../../perception_eval/perception_eval/result/perception/perception_frame_config.py) | Arguments | type | Mandatory | Description | | :-------------------------- | :--------------------------: | :-------------: | :-------------------------------------------------------------------------------------------------------------- | @@ -257,7 +257,7 @@ - A class to decide Pass / Fail. For Pass/Fail decision, determine TP/FP by **Plane distance**. - Specify in every frame, not in initializing `PerceptionEvaluationManager`. -- For the details, see [perception_eval/evaluation/result/perception_frame_config](../../../perception_eval/perception_eval/evaluation/result/perception_frame_config.py). +- For the details, see [perception_eval/result/perception/perception_frame_config](../../../perception_eval/perception_eval/result/perception/perception_frame_config.py). | Arguments | type | Mandatory | Description | | :------------------------ | :--------------------------: | :-------: | :--------------------------------------------------------------------------------------------------------------------- | diff --git a/docs/en/perception/metrics.md b/docs/en/perception/metrics.md index 8a8db4f2..d4cb1c2f 100644 --- a/docs/en/perception/metrics.md +++ b/docs/en/perception/metrics.md @@ -1,6 +1,6 @@ # Perception Evaluation Metrics -## [` MetricsScore(...)`](../../../perception_eval/perception_eval/evaluation/metrics/metrics.py) +## [` MetricsScore(...)`](../../../perception_eval/perception_eval/metrics/metrics.py) - A class to evaluate each of detection/tracking/prediction task @@ -10,10 +10,10 @@ - Initialize `detection/tracking/prediction_config` from input MetricsConfig - - [`detection_config (DetectionMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/detection_metrics_config.py) - - [`tracking_config (TrackingMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/tracking_metrics_config.py) - - [`prediction_config (PredictionMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/prediction_metrics_config.py) - - [`classification_config (ClassificationMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/classification_metrics_config.py) + - [`detection_config (DetectionMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/detection_metrics_config.py) + - [`tracking_config (TrackingMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/tracking_metrics_config.py) + - [`prediction_config (PredictionMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/prediction_metrics_config.py) + - [`classification_config (ClassificationMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/classification_metrics_config.py) - Calculate each metrics based on each config @@ -77,7 +77,7 @@ ## Detection -### [` Map(...)`](../../../perception_eval/perception_eval/evaluation/metrics/detection/map.py) +### [` Map(...)`](../../../perception_eval/perception_eval/metrics/detection/map.py) - A class to calculate mAP (mean Average Prevision) @@ -252,7 +252,7 @@ ## Matching - A class of the way of matching estimation and GT - - For the details, see [perception_eval/evaluation/matching/object_matching.py](../../../perception_eval/perception_eval/evaluation/matching/object_matching.py) + - For the details, see [perception_eval/evaluation/matching/object_matching.py](../../../perception_eval/perception_eval/matching/object_matching.py) | Matching Method | Value | | ------------------ | --------------------------------------------------------------------- | @@ -296,7 +296,7 @@ ## TP Metrics - A class to return TP value - - For the details,see [perception_eval/evaluation/metrics/detection/tp_metrics.py](../../../perception_eval/perception_eval/evaluation/metrics/detection/tp_metrics.py) + - For the details,see [perception_eval/evaluation/metrics/detection/tp_metrics.py](../../../perception_eval/perception_eval/metrics/detection/tp_metrics.py) | TP Metrics | Value | | ------------------- | ------------------------------------------ | diff --git a/docs/en/sensing/data_structure.md b/docs/en/sensing/data_structure.md index b30db7af..c5fb551e 100644 --- a/docs/en/sensing/data_structure.md +++ b/docs/en/sensing/data_structure.md @@ -48,7 +48,7 @@ ### ` SensingFrameResult(...)` Sensing result for pointcloud in detection/non-detection area at one frame. -For the details, see [perception_eval/evaluation/sensing/sensing_frame_result.py](../../../perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py) +For the details, see [perception_eval/result/sensing/sensing_frame_result.py](../../../perception_eval/perception_eval/result/sensing/sensing_frame_result.py) | Argument | type | Description | | :--------------------- | :------------------: | :---------------------- | @@ -76,7 +76,7 @@ Evaluate false position pointcloud detection in detection/non-detection area. ### ` DynamicObjectWithSensingResult(...)` Sensing result for one GT. -For the details, see [perception_eval/evaluation/sensing/sensing_result.py](../../../perception_eval/perception_eval/evaluation/sensing/sensing_result.py) +For the details, see [perception_eval/result/sensing/sensing_result.py](../../../perception_eval/perception_eval/result/sensing/sensing_result.py) | Argument | type | Description | | :----------------------------- | :-------------------: | :-------------------------------------------- | diff --git a/docs/ja/common.md b/docs/ja/common.md index d0d33a8d..922665da 100644 --- a/docs/ja/common.md +++ b/docs/ja/common.md @@ -1,6 +1,6 @@ # Common items in Perception and Sensing -## [` DynamicObject(...)`](../../perception_eval/perception_eval/common/object/object3d.py) +## [` DynamicObject(...)`](../../perception_eval/perception_eval/object/object3d.py) 3 次元オブジェクトのクラス. @@ -29,7 +29,7 @@ | `predicted_confidence` | `Optional[float]` | 予測状態の信頼度. (Default: None) | | `visibility` | `Optional[Visibility]` | 視認性のステータス. (Default: None) | -## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/common/object/object2d.py) +## [` DynamicObject2D(...)`](../../perception_eval/perception_eval/object/object2d.py) 2 次元オブジェクトのクラス. @@ -47,7 +47,7 @@ ## Ground truth -### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/common/dataset/ground_truth.py) +### [` FrameGroundTruth(...)`](../../perception_eval/perception_eval/dataset/ground_truth.py) フレームごとの GT オブジェクトの集合のクラス. @@ -59,7 +59,7 @@ | `ego2map` | `Optional[numpy.ndarray]` | オブジェクトの座標系を base_link から map に変換する 4x4 行列. (Default: None) | | `raw_data` | `Optional[numpy.ndarray]` | 点群または画像. (Default: None) | -### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/common/dataset/load.py) +### [` load_all_datasets(...) -> List[FrameGroundTruth]`](../../perception_eval/perception_eval/dataset/load.py) データセットをロードする関数. diff --git a/docs/ja/perception/data_structure.md b/docs/ja/perception/data_structure.md index 25be3eb7..c4f5cf47 100644 --- a/docs/ja/perception/data_structure.md +++ b/docs/ja/perception/data_structure.md @@ -33,7 +33,7 @@ ## Frame 単位でのデータ構造 -### [` PerceptionFrameResult(...)`](../../../perception_eval/perception_eval/evaluation/result/perception_frame_result.py) +### [` PerceptionFrameResult(...)`](../../../perception_eval/perception_eval/result/perception/perception_frame_result.py) - Initialization @@ -127,7 +127,7 @@ ## Object 単位でのデータ構造 -### [` DynamicObjectWithPerceptionResult(...)`](../../../perception_eval/perception_eval/evaluation/result/object_result.py) +### [` DynamicObjectWithPerceptionResult(...)`](../../../perception_eval/perception_eval/result/perception/perception_result.py) 推定オブジェクトの集合`List[ObjectType]`と GT オブジェクトの集合`List[ObjectType]`からマッチングペアの集合`List[DynamicObjectWithPerceptionResult]`を得るには,`get_object_results()`関数を使う. diff --git a/docs/ja/perception/design.md b/docs/ja/perception/design.md index f526c08d..74bda1c0 100644 --- a/docs/ja/perception/design.md +++ b/docs/ja/perception/design.md @@ -219,7 +219,7 @@ json_result = json.dump(dict_result) - **4. TP/FP/FN 判定用のパラメータに各 `MetricsConfig` の引数以外ものを設定した場合** - - 各 MetricsConfig は,[perception_eval/evaluation/metrics/config](../../../perception_eval/perception_eval/evaluation/metrics/config/)を参考 + - 各 MetricsConfig は,[perception_eval/metrics/config](../../../perception_eval/perception_eval/metrics/config/)を参考 ```python evaluation_config_dict = { @@ -248,7 +248,7 @@ json_result = json.dump(dict_result) Usage: {'plane_distance_thresholds', 'iou_3d_thresholds', 'center_distance_thresholds', 'target_labels', 'iou_bev_thresholds'} ``` -### [` CriticalObjectFilterConfig(...)`](../../../perception_eval/perception_eval/evaluation/result/perception_frame_config.py) +### [` CriticalObjectFilterConfig(...)`](../../../perception_eval/perception_eval/result/perception/perception_frame_config.py) - 注目物体を動的決定するためのインターフェイス. - `PerceptionEvaluationManger`の初期化時ではなく,各フレーム毎(=callback)に指定する. @@ -268,7 +268,7 @@ json_result = json.dump(dict_result) \* **max_x/y_position**,**max/min_distance**についてはどちらか片方のみ指定する必要がある. -### [` PerceptionPassFailConfig(...)`](../../../perception_eval/perception_eval/evaluation/result/perception_frame_config.py) +### [` PerceptionPassFailConfig(...)`](../../../perception_eval/perception_eval/result/perception/perception_frame_config.py) - Pass / Fail を決めるためのパラメータ. Pass/Fail の判定については,**Plane distance**によって TP/FP の判定を行う. - `PerceptionEvaluationManger`の初期化時ではなく,各フレーム毎(=callback)に指定する. diff --git a/docs/ja/perception/metrics.md b/docs/ja/perception/metrics.md index fc560f02..e945a854 100644 --- a/docs/ja/perception/metrics.md +++ b/docs/ja/perception/metrics.md @@ -1,6 +1,6 @@ # Perception Evaluation Metrics -## [` MetricsScore(...)`](../../../perception_eval/perception_eval/evaluation/metrics/metrics.py) +## [` MetricsScore(...)`](../../../perception_eval/perception_eval/metrics/metrics.py) - detection/tracking/prediction の各評価指標を実行する class @@ -10,9 +10,9 @@ - 入力された MetricsScoreConfig から,`detection/tracking/prediction_config`を生成 - - [`detection_config (DetectionMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/detection_metrics_config.py) - - [`tracking_config (TrackingMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/tracking_metrics_config.py) - - [`prediction_config (PredictionMetricsConfig)`](../../../perception_eval/perception_eval/evaluation/metrics/config/prediction_metrics_config.py) + - [`detection_config (DetectionMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/detection_metrics_config.py) + - [`tracking_config (TrackingMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/tracking_metrics_config.py) + - [`prediction_config (PredictionMetricsConfig)`](../../../perception_eval/perception_eval/metrics/config/prediction_metrics_config.py) - 各 config をもとにそれぞれの Metrics が計算される. @@ -76,7 +76,7 @@ ## Detection -### [` Map(...)`](../../../perception_eval/perception_eval/evaluation/metrics/detection/map.py) +### [` Map(...)`](../../../perception_eval/perception_eval/metrics/detection/map.py) - mAP (mean Average Precision) のスコア計算を行う class. @@ -249,7 +249,7 @@ ## Matching - 予測 object と Ground Truth のマッチング方式の class - - 詳細は,[perception_eval/evaluation/matching/object_matching.py](../../../perception_eval/perception_eval/evaluation/matching/object_matching.py)を参照 + - 詳細は,[perception_eval/evaluation/matching/object_matching.py](../../../perception_eval/perception_eval/matching/object_matching.py)を参照 | Matching Method | Value | | ------------------ | --------------------------------------------------------------------- | @@ -295,7 +295,7 @@ ## TP Metrics - True Positive 時の値を返す class - - 詳細は,[perception_eval/evaluation/metrics/detection/tp_metrics.py](../../../perception_eval/perception_eval/evaluation/metrics/detection/tp_metrics.py)を参照 + - 詳細は,[perception_eval/evaluation/metrics/detection/tp_metrics.py](../../../perception_eval/perception_eval/metrics/detection/tp_metrics.py)を参照 | TP Metrics | Value | | ------------------- | ----------------------------- | diff --git a/docs/ja/sensing/data_structure.md b/docs/ja/sensing/data_structure.md index a9abbddd..81e80fe1 100644 --- a/docs/ja/sensing/data_structure.md +++ b/docs/ja/sensing/data_structure.md @@ -50,7 +50,7 @@ ### SensingFrameResult -1 frame に対しての検出・非検出対象エリアに対する点群のセンシング結果[(参照)](../../../perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py) +1 frame に対しての検出・非検出対象エリアに対する点群のセンシング結果[(参照)](../../../perception_eval/perception_eval/result/sensing/sensing_frame_result.py) | Argument | type | Description | | :--------------------- | :------------------: | :------------- | @@ -76,7 +76,7 @@ ### DynamicObjectWithSensingResult -1 つの GT オブジェクト(アノテーションされた bounding box)に対しての結果[(参照)](../../../perception_eval/perception_eval/evaluation/sensing/sensing_result.py) +1 つの GT オブジェクト(アノテーションされた bounding box)に対しての結果[(参照)](../../../perception_eval/perception_eval/result/sensing/sensing_result.py) | Argument | type | Description | | :----------------------------- | :-------------------: | :----------------------------- | diff --git a/docs/ja/sensing/design.md b/docs/ja/sensing/design.md index a61c5984..c6ee4958 100644 --- a/docs/ja/sensing/design.md +++ b/docs/ja/sensing/design.md @@ -36,7 +36,7 @@ ### Frame 単位でのデータ構造 -- SensingFrameResult: 1 frame に対しての検出・非検出対象エリアに対する点群のセンシング結果[(参照)](../../../perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py) +- SensingFrameResult: 1 frame に対しての検出・非検出対象エリアに対する点群のセンシング結果[(参照)](../../../perception_eval/perception_eval/result/sensing/sensing_frame_result.py) ```txt Arguments: @@ -65,7 +65,7 @@ メソッドを呼ぶことで評価が実行される. -- SensingFrameConfig: Frame 単位での評価用の config[(参照)](../../../perception_eval/perception_eval/evaluation/sensing/sensing_frame_config.py) +- SensingFrameConfig: Frame 単位での評価用の config[(参照)](../../../perception_eval/perception_eval/result/sensing/sensing_frame_config.py) ```txt Arguments: @@ -83,7 +83,7 @@ ### Object 単位でのデータ構造 -- DynamicObjectWithSensingResult: 1 ground truth object(アノテーションされた bounding box)に対しての結果[(参照)](../../../perception_eval/perception_eval/evaluation/sensing/sensing_result.py) +- DynamicObjectWithSensingResult: 1 ground truth object(アノテーションされた bounding box)に対しての結果[(参照)](../../../perception_eval/perception_eval/result/sensing/sensing_result.py) ```txt Arguments: diff --git a/perception_eval/perception_eval/config/_evaluation_config_base.py b/perception_eval/perception_eval/config/_evaluation_config_base.py index 3fa67b96..7138eab0 100644 --- a/perception_eval/perception_eval/config/_evaluation_config_base.py +++ b/perception_eval/perception_eval/config/_evaluation_config_base.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABC from abc import abstractmethod import datetime @@ -22,13 +24,16 @@ from typing import List from typing import Sequence from typing import Tuple +from typing import TYPE_CHECKING from typing import Union -from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.evaluation_task import set_task from perception_eval.common.label import LabelConverter from perception_eval.common.schema import FrameID +if TYPE_CHECKING: + from perception_eval.common.evaluation_task import EvaluationTask + class _EvaluationConfigBase(ABC): """Abstract base class for evaluation config diff --git a/perception_eval/perception_eval/config/perception_evaluation_config.py b/perception_eval/perception_eval/config/perception_evaluation_config.py index d43a69f0..ccc010d9 100644 --- a/perception_eval/perception_eval/config/perception_evaluation_config.py +++ b/perception_eval/perception_eval/config/perception_evaluation_config.py @@ -18,16 +18,19 @@ from typing import Optional from typing import Sequence from typing import Tuple +from typing import TYPE_CHECKING from typing import Union from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import set_thresholds -from perception_eval.evaluation.metrics import MetricsScoreConfig +from perception_eval.metrics import MetricsScoreConfig from ._evaluation_config_base import _EvaluationConfigBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + class PerceptionEvaluationConfig(_EvaluationConfigBase): """Configuration class for perception evaluation. @@ -87,10 +90,7 @@ def __init__( load_raw_data=load_raw_data, ) - self.metrics_config: MetricsScoreConfig = MetricsScoreConfig( - self.evaluation_task, - **self.metrics_params, - ) + self.metrics_config = MetricsScoreConfig(self.evaluation_task, **self.metrics_params) @staticmethod def _extract_label_params(evaluation_config_dict: Dict[str, Any]) -> Dict[str, Any]: diff --git a/perception_eval/perception_eval/common/dataset/__init__.py b/perception_eval/perception_eval/dataset/__init__.py similarity index 100% rename from perception_eval/perception_eval/common/dataset/__init__.py rename to perception_eval/perception_eval/dataset/__init__.py diff --git a/perception_eval/perception_eval/common/dataset/ground_truth.py b/perception_eval/perception_eval/dataset/ground_truth.py similarity index 83% rename from perception_eval/perception_eval/common/dataset/ground_truth.py rename to perception_eval/perception_eval/dataset/ground_truth.py index 9fd0e857..38d5a210 100644 --- a/perception_eval/perception_eval/common/dataset/ground_truth.py +++ b/perception_eval/perception_eval/dataset/ground_truth.py @@ -1,10 +1,14 @@ +from __future__ import annotations + from typing import Dict from typing import List from typing import Optional +from typing import TYPE_CHECKING import numpy as np -from ..object import ObjectType +if TYPE_CHECKING: + from perception_eval.object import ObjectType class FrameGroundTruth: @@ -40,8 +44,8 @@ def __init__( ego2map: Optional[np.ndarray] = None, raw_data: Optional[Dict[str, np.ndarray]] = None, ) -> None: - self.unix_time: int = unix_time - self.frame_name: str = frame_name - self.objects: List[ObjectType] = objects - self.ego2map: Optional[np.ndarray] = ego2map - self.raw_data: Optional[Dict[str, np.ndarray]] = raw_data + self.unix_time = unix_time + self.frame_name = frame_name + self.objects = objects + self.ego2map = ego2map + self.raw_data = raw_data diff --git a/perception_eval/perception_eval/common/dataset/load.py b/perception_eval/perception_eval/dataset/load.py similarity index 97% rename from perception_eval/perception_eval/common/dataset/load.py rename to perception_eval/perception_eval/dataset/load.py index 16602822..a60eac8a 100644 --- a/perception_eval/perception_eval/common/dataset/load.py +++ b/perception_eval/perception_eval/dataset/load.py @@ -11,19 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import logging from typing import Any from typing import List from typing import Optional from typing import Sequence +from typing import TYPE_CHECKING from typing import Union from nuimages import NuImages from nuscenes.nuscenes import NuScenes from nuscenes.prediction.helper import PredictHelper -from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelConverter from perception_eval.common.schema import FrameID from tqdm import tqdm @@ -31,6 +31,10 @@ from .utils import _sample_to_frame from .utils import _sample_to_frame_2d +if TYPE_CHECKING: + from perception_eval.common.evaluation_task import EvaluationTask + from perception_eval.common.label import LabelConverter + def load_all_datasets( dataset_paths: List[str], diff --git a/perception_eval/perception_eval/common/dataset/utils.py b/perception_eval/perception_eval/dataset/utils.py similarity index 98% rename from perception_eval/perception_eval/common/dataset/utils.py rename to perception_eval/perception_eval/dataset/utils.py index d5b25aec..f8205d85 100644 --- a/perception_eval/perception_eval/common/dataset/utils.py +++ b/perception_eval/perception_eval/dataset/utils.py @@ -20,6 +20,7 @@ from typing import Optional from typing import Sequence from typing import Tuple +from typing import TYPE_CHECKING from typing import Union from nuimages import NuImages @@ -28,21 +29,23 @@ from nuscenes.prediction.helper import PredictHelper from nuscenes.utils.data_classes import Box from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import Label -from perception_eval.common.label import LabelConverter -from perception_eval.common.label import LabelType from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import DynamicObject2D from perception_eval.common.schema import FrameID from perception_eval.common.schema import Visibility from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType +from perception_eval.object import DynamicObject +from perception_eval.object import DynamicObject2D from PIL import Image from pyquaternion.quaternion import Quaternion from .load import FrameGroundTruth +if TYPE_CHECKING: + from perception_eval.common.label import Label + from perception_eval.common.label import LabelConverter + from perception_eval.common.label import LabelType + ################################# # Dataset 3D # ################################# diff --git a/perception_eval/perception_eval/evaluation/__init__.py b/perception_eval/perception_eval/evaluation/__init__.py deleted file mode 100644 index 97cb7e05..00000000 --- a/perception_eval/perception_eval/evaluation/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022 TIER IV, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Union - -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.perception_frame_result import get_object_status # noqa -from perception_eval.evaluation.result.perception_frame_result import PerceptionFrameResult -from perception_eval.evaluation.sensing.sensing_frame_result import SensingFrameResult -from perception_eval.evaluation.sensing.sensing_result import DynamicObjectWithSensingResult - -# type aliases -ObjectResultType = Union[DynamicObjectWithPerceptionResult, DynamicObjectWithSensingResult] -FrameResultType = Union[PerceptionFrameResult, SensingFrameResult] diff --git a/perception_eval/perception_eval/manager/_evaluation_manager_base.py b/perception_eval/perception_eval/manager/_evaluation_manager_base.py index 8a16abee..040257a0 100644 --- a/perception_eval/perception_eval/manager/_evaluation_manager_base.py +++ b/perception_eval/perception_eval/manager/_evaluation_manager_base.py @@ -12,17 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABC from abc import abstractmethod from typing import List from typing import Optional +from typing import TYPE_CHECKING + +from perception_eval.dataset import get_now_frame +from perception_eval.dataset import load_all_datasets -from perception_eval.common.dataset import FrameGroundTruth -from perception_eval.common.dataset import get_now_frame -from perception_eval.common.dataset import load_all_datasets -from perception_eval.config import EvaluationConfigType -from perception_eval.evaluation import FrameResultType -from perception_eval.visualization import VisualizerType +if TYPE_CHECKING: + from perception_eval.config import EvaluationConfigType + from perception_eval.dataset import FrameGroundTruth + from perception_eval.result import FrameResultType + from perception_eval.visualization import VisualizerType class _EvaluationMangerBase(ABC): @@ -44,7 +49,7 @@ def __init__( super().__init__() self.evaluator_config = evaluation_config - self.ground_truth_frames: List[FrameGroundTruth] = load_all_datasets( + self.ground_truth_frames = load_all_datasets( dataset_paths=self.evaluator_config.dataset_paths, evaluation_task=self.evaluator_config.evaluation_task, label_converter=self.evaluator_config.label_converter, @@ -52,6 +57,8 @@ def __init__( load_raw_data=self.evaluator_config.load_raw_data, ) + self.frame_results: List[FrameResultType] = [] + @property def evaluation_task(self): return self.evaluator_config.evaluation_task @@ -104,7 +111,7 @@ def get_ground_truth_now_frame( Optional[FrameGroundTruth]: FrameGroundTruth instance at current frame. If there is no corresponding ground truth, returns None. """ - ground_truth_now_frame: FrameGroundTruth = get_now_frame( + ground_truth_now_frame = get_now_frame( ground_truth_frames=self.ground_truth_frames, unix_time=unix_time, threshold_min_time=threshold_min_time, diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 2bb70668..c43d576a 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -12,29 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from typing import List from typing import Tuple - -from perception_eval.common.dataset import FrameGroundTruth -from perception_eval.common.label import LabelType -from perception_eval.common.object import ObjectType -from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import PerceptionFrameResult -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.matching.objects_filter import filter_object_results -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics import MetricsScore -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig +from typing import TYPE_CHECKING + +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.matching.objects_filter import filter_object_results +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics import MetricsScore +from perception_eval.result import get_object_results +from perception_eval.result import PerceptionFrameResult from perception_eval.visualization import PerceptionVisualizer2D from perception_eval.visualization import PerceptionVisualizer3D -from perception_eval.visualization import PerceptionVisualizerType from ._evaluation_manager_base import _EvaluationMangerBase -from ..evaluation.result.object_result import DynamicObjectWithPerceptionResult -from ..evaluation.result.object_result import get_object_results + +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.config import PerceptionEvaluationConfig + from perception_eval.dataset import FrameGroundTruth + from perception_eval.object import ObjectType + from perception_eval.result import CriticalObjectFilterConfig + from perception_eval.result import DynamicObjectWithPerceptionResult + from perception_eval.result import PerceptionPassFailConfig + from perception_eval.visualization import PerceptionVisualizerType class PerceptionEvaluationManager(_EvaluationMangerBase): @@ -57,7 +61,6 @@ def __init__( evaluation_config: PerceptionEvaluationConfig, ) -> None: super().__init__(evaluation_config=evaluation_config) - self.frame_results: List[PerceptionFrameResult] = [] self.__visualizer = ( PerceptionVisualizer2D(self.evaluator_config) if self.evaluation_task.is_2d() @@ -167,7 +170,7 @@ def _filter_objects( **self.filtering_params, ) - object_results: List[DynamicObjectWithPerceptionResult] = get_object_results( + object_results = get_object_results( evaluation_task=self.evaluation_task, estimated_objects=estimated_objects, ground_truth_objects=frame_ground_truth.objects, @@ -204,7 +207,7 @@ def get_scene_result(self) -> MetricsScore: used_frame.append(int(frame.frame_name)) # Calculate score - scene_metrics_score: MetricsScore = MetricsScore( + scene_metrics_score = MetricsScore( config=self.metrics_config, used_frame=used_frame, ) diff --git a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py index 88f5f529..0a4b8c39 100644 --- a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py @@ -17,13 +17,13 @@ from typing import Tuple import numpy as np -from perception_eval.common.dataset import FrameGroundTruth -from perception_eval.common.object import DynamicObject from perception_eval.common.point import crop_pointcloud from perception_eval.config import SensingEvaluationConfig -from perception_eval.evaluation import SensingFrameResult -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.sensing.sensing_frame_config import SensingFrameConfig +from perception_eval.dataset import FrameGroundTruth +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.object import DynamicObject +from perception_eval.result import SensingFrameConfig +from perception_eval.result import SensingFrameResult from perception_eval.util.math import get_bbox_scale from perception_eval.visualization import SensingVisualizer @@ -47,7 +47,6 @@ def __init__( evaluation_config: SensingEvaluationConfig, ) -> None: super().__init__(evaluation_config) - self.frame_results: List[SensingFrameResult] = [] self.__visualizer = SensingVisualizer(self.evaluator_config) @property diff --git a/perception_eval/perception_eval/evaluation/matching/__init__.py b/perception_eval/perception_eval/matching/__init__.py similarity index 60% rename from perception_eval/perception_eval/evaluation/matching/__init__.py rename to perception_eval/perception_eval/matching/__init__.py index fa081594..0044d8d6 100644 --- a/perception_eval/perception_eval/evaluation/matching/__init__.py +++ b/perception_eval/perception_eval/matching/__init__.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from perception_eval.evaluation.matching.object_matching import CenterDistanceMatching -from perception_eval.evaluation.matching.object_matching import IOU2dMatching -from perception_eval.evaluation.matching.object_matching import IOU3dMatching -from perception_eval.evaluation.matching.object_matching import MatchingMethod -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.matching.object_matching import PlaneDistanceMatching +from .object_matching import CenterDistanceMatching +from .object_matching import IOU2dMatching +from .object_matching import IOU3dMatching +from .object_matching import MatchingMethod +from .object_matching import MatchingMode +from .object_matching import PlaneDistanceMatching __all__ = ( "CenterDistanceMatching", diff --git a/perception_eval/perception_eval/evaluation/matching/object_matching.py b/perception_eval/perception_eval/matching/object_matching.py similarity index 98% rename from perception_eval/perception_eval/evaluation/matching/object_matching.py rename to perception_eval/perception_eval/matching/object_matching.py index 0a57ceb5..fa3f7a7d 100644 --- a/perception_eval/perception_eval/evaluation/matching/object_matching.py +++ b/perception_eval/perception_eval/matching/object_matching.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABC from abc import abstractmethod from enum import Enum @@ -20,15 +22,18 @@ from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING -from perception_eval.common.object import distance_objects -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import ObjectType from perception_eval.common.point import distance_points_bev from perception_eval.common.point import get_point_left_right from perception_eval.common.point import polygon_to_list +from perception_eval.object import distance_objects +from perception_eval.object import DynamicObject from shapely.geometry import Polygon +if TYPE_CHECKING: + from perception_eval.object import ObjectType + class MatchingMode(Enum): """[summary] diff --git a/perception_eval/perception_eval/evaluation/matching/objects_filter.py b/perception_eval/perception_eval/matching/objects_filter.py similarity index 98% rename from perception_eval/perception_eval/evaluation/matching/objects_filter.py rename to perception_eval/perception_eval/matching/objects_filter.py index 594ba03c..23e4834d 100644 --- a/perception_eval/perception_eval/evaluation/matching/objects_filter.py +++ b/perception_eval/perception_eval/matching/objects_filter.py @@ -12,25 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Dict from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING from typing import Union import warnings import numpy as np from perception_eval.common.label import CommonLabel -from perception_eval.common.label import Label -from perception_eval.common.label import LabelType -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import ObjectType from perception_eval.common.schema import FrameID from perception_eval.common.status import MatchingStatus from perception_eval.common.threshold import get_label_threshold from perception_eval.common.threshold import LabelThreshold -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode +from perception_eval.object import DynamicObject +from perception_eval.result.perception.perception_result import DynamicObjectWithPerceptionResult + +if TYPE_CHECKING: + from perception_eval.common.label import Label + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.object import ObjectType def filter_object_results( @@ -383,7 +388,7 @@ def divide_tp_fp_objects( ) # confidence threshold - confidence_threshold_: Optional[float] = get_label_threshold( + confidence_threshold_ = get_label_threshold( semantic_label=object_result.estimated_object.semantic_label, target_labels=target_labels, threshold_list=confidence_threshold_list, @@ -423,7 +428,7 @@ def get_fn_objects( fn_objects: List[ObjectType] = [] for ground_truth_object in ground_truth_objects: - is_fn_object: bool = _is_fn_object( + is_fn_object = _is_fn_object( ground_truth_object=ground_truth_object, object_results=object_results, tp_object_results=tp_object_results, diff --git a/perception_eval/test/evaluation/metrics/detection/__init__.py b/perception_eval/perception_eval/metrics/__init__.py similarity index 80% rename from perception_eval/test/evaluation/metrics/detection/__init__.py rename to perception_eval/perception_eval/metrics/__init__.py index 9be33190..0310fa43 100644 --- a/perception_eval/test/evaluation/metrics/detection/__init__.py +++ b/perception_eval/perception_eval/metrics/__init__.py @@ -11,3 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from .metrics import MetricsScore +from .metrics_score_config import MetricsScoreConfig + +__all__ = ("MetricsScore", "MetricsScoreConfig") diff --git a/perception_eval/perception_eval/evaluation/metrics/classification/__init__.py b/perception_eval/perception_eval/metrics/classification/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/classification/__init__.py rename to perception_eval/perception_eval/metrics/classification/__init__.py diff --git a/perception_eval/perception_eval/evaluation/metrics/classification/accuracy.py b/perception_eval/perception_eval/metrics/classification/accuracy.py similarity index 96% rename from perception_eval/perception_eval/evaluation/metrics/classification/accuracy.py rename to perception_eval/perception_eval/metrics/classification/accuracy.py index 522b5c29..7170597a 100644 --- a/perception_eval/perception_eval/evaluation/metrics/classification/accuracy.py +++ b/perception_eval/perception_eval/metrics/classification/accuracy.py @@ -12,13 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from collections import OrderedDict from typing import Dict from typing import List from typing import Tuple +from typing import TYPE_CHECKING -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.result import DynamicObjectWithPerceptionResult class ClassificationAccuracy: diff --git a/perception_eval/perception_eval/evaluation/metrics/classification/classification_metrics_score.py b/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py similarity index 95% rename from perception_eval/perception_eval/evaluation/metrics/classification/classification_metrics_score.py rename to perception_eval/perception_eval/metrics/classification/classification_metrics_score.py index a4951cd2..f1b9ec57 100644 --- a/perception_eval/perception_eval/evaluation/metrics/classification/classification_metrics_score.py +++ b/perception_eval/perception_eval/metrics/classification/classification_metrics_score.py @@ -17,12 +17,14 @@ from typing import Dict from typing import List from typing import Tuple - -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult +from typing import TYPE_CHECKING from .accuracy import ClassificationAccuracy +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.result import DynamicObjectWithPerceptionResult + class ClassificationMetricsScore: """Metrics score class for classification evaluation. @@ -49,7 +51,7 @@ def __init__( object_results = object_results_dict[target_label] num_ground_truth = num_ground_truth_dict[target_label] - acc_: ClassificationAccuracy = ClassificationAccuracy( + acc_ = ClassificationAccuracy( object_results=object_results, num_ground_truth=num_ground_truth, target_labels=[target_label], diff --git a/perception_eval/perception_eval/evaluation/metrics/config/__init__.py b/perception_eval/perception_eval/metrics/config/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/config/__init__.py rename to perception_eval/perception_eval/metrics/config/__init__.py diff --git a/perception_eval/perception_eval/evaluation/metrics/config/_metrics_config_base.py b/perception_eval/perception_eval/metrics/config/_metrics_config_base.py similarity index 93% rename from perception_eval/perception_eval/evaluation/metrics/config/_metrics_config_base.py rename to perception_eval/perception_eval/metrics/config/_metrics_config_base.py index 5f23b892..7085b6b3 100644 --- a/perception_eval/perception_eval/evaluation/metrics/config/_metrics_config_base.py +++ b/perception_eval/perception_eval/metrics/config/_metrics_config_base.py @@ -12,15 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABC from abc import abstractmethod from typing import List from typing import Optional +from typing import TYPE_CHECKING -from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from perception_eval.common.threshold import set_thresholds +if TYPE_CHECKING: + from perception_eval.common.evaluation_task import EvaluationTask + from perception_eval.common.label import LabelType + class _MetricsConfigBase(ABC): """Abstract base class of MetricsConfig for each evaluation task. diff --git a/perception_eval/perception_eval/evaluation/metrics/config/classification_metrics_config.py b/perception_eval/perception_eval/metrics/config/classification_metrics_config.py similarity index 94% rename from perception_eval/perception_eval/evaluation/metrics/config/classification_metrics_config.py rename to perception_eval/perception_eval/metrics/config/classification_metrics_config.py index 910abf5a..606adb9a 100644 --- a/perception_eval/perception_eval/evaluation/metrics/config/classification_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/classification_metrics_config.py @@ -12,14 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import List from typing import Optional +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from ._metrics_config_base import _MetricsConfigBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + class ClassificationMetricsConfig(_MetricsConfigBase): """Configuration class for classification evaluation metrics. diff --git a/perception_eval/perception_eval/evaluation/metrics/config/detection_metrics_config.py b/perception_eval/perception_eval/metrics/config/detection_metrics_config.py similarity index 94% rename from perception_eval/perception_eval/evaluation/metrics/config/detection_metrics_config.py rename to perception_eval/perception_eval/metrics/config/detection_metrics_config.py index 1ec752f3..50f123e9 100644 --- a/perception_eval/perception_eval/evaluation/metrics/config/detection_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/detection_metrics_config.py @@ -12,14 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import List from typing import Optional +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from ._metrics_config_base import _MetricsConfigBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + class DetectionMetricsConfig(_MetricsConfigBase): """Configuration class for detection evaluation metrics. diff --git a/perception_eval/perception_eval/evaluation/metrics/config/prediction_metrics_config.py b/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py similarity index 94% rename from perception_eval/perception_eval/evaluation/metrics/config/prediction_metrics_config.py rename to perception_eval/perception_eval/metrics/config/prediction_metrics_config.py index 73b0f1f9..5af96f20 100644 --- a/perception_eval/perception_eval/evaluation/metrics/config/prediction_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/prediction_metrics_config.py @@ -12,13 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import List +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from ._metrics_config_base import _MetricsConfigBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + class PredictionMetricsConfig(_MetricsConfigBase): """Configuration class for prediction evaluation metrics. diff --git a/perception_eval/perception_eval/evaluation/metrics/config/tracking_metrics_config.py b/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py similarity index 94% rename from perception_eval/perception_eval/evaluation/metrics/config/tracking_metrics_config.py rename to perception_eval/perception_eval/metrics/config/tracking_metrics_config.py index 5a0280a2..e2e9bcea 100644 --- a/perception_eval/perception_eval/evaluation/metrics/config/tracking_metrics_config.py +++ b/perception_eval/perception_eval/metrics/config/tracking_metrics_config.py @@ -12,13 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import List +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from ._metrics_config_base import _MetricsConfigBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + class TrackingMetricsConfig(_MetricsConfigBase): """Configuration class for tracking evaluation metrics. diff --git a/perception_eval/perception_eval/evaluation/metrics/detection/__init__.py b/perception_eval/perception_eval/metrics/detection/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/detection/__init__.py rename to perception_eval/perception_eval/metrics/detection/__init__.py diff --git a/perception_eval/perception_eval/evaluation/metrics/detection/ap.py b/perception_eval/perception_eval/metrics/detection/ap.py similarity index 96% rename from perception_eval/perception_eval/evaluation/metrics/detection/ap.py rename to perception_eval/perception_eval/metrics/detection/ap.py index 4ff5b68e..b864963c 100644 --- a/perception_eval/perception_eval/evaluation/metrics/detection/ap.py +++ b/perception_eval/perception_eval/metrics/detection/ap.py @@ -12,24 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -from logging import getLogger +from __future__ import annotations + +import logging import os.path as osp from typing import Callable from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING from typing import Union import matplotlib.pyplot as plt import numpy as np -from perception_eval.common.label import LabelType from perception_eval.common.threshold import get_label_threshold -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAp -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAph -logger = getLogger(__name__) +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult + + from .tp_metrics import TPMetricsAp + from .tp_metrics import TPMetricsAph class Ap: @@ -240,7 +244,7 @@ def _calculate_tp_fp( # When result num is 0 if len(object_results) == 0: if self.num_ground_truth == 0: - logger.debug("The size of object_results is 0") + logging.debug("The size of object_results is 0") return [], [] else: tp_list: List[float] = [0.0] * self.num_ground_truth diff --git a/perception_eval/perception_eval/evaluation/metrics/detection/map.py b/perception_eval/perception_eval/metrics/detection/map.py similarity index 92% rename from perception_eval/perception_eval/evaluation/metrics/detection/map.py rename to perception_eval/perception_eval/metrics/detection/map.py index d1a20641..c2145d53 100644 --- a/perception_eval/perception_eval/evaluation/metrics/detection/map.py +++ b/perception_eval/perception_eval/metrics/detection/map.py @@ -12,15 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Dict from typing import List +from typing import TYPE_CHECKING + +from .ap import Ap +from .tp_metrics import TPMetricsAp +from .tp_metrics import TPMetricsAph -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.metrics.detection.ap import Ap -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAp -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAph +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult class Map: diff --git a/perception_eval/perception_eval/evaluation/metrics/detection/tp_metrics.py b/perception_eval/perception_eval/metrics/detection/tp_metrics.py similarity index 96% rename from perception_eval/perception_eval/evaluation/metrics/detection/tp_metrics.py rename to perception_eval/perception_eval/metrics/detection/tp_metrics.py index 545a59f0..af402bec 100644 --- a/perception_eval/perception_eval/evaluation/metrics/detection/tp_metrics.py +++ b/perception_eval/perception_eval/metrics/detection/tp_metrics.py @@ -12,13 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABCMeta from abc import abstractmethod from math import pi +from typing import TYPE_CHECKING import numpy as np from perception_eval.common.schema import FrameID -from perception_eval.evaluation import DynamicObjectWithPerceptionResult + +if TYPE_CHECKING: + from perception_eval.result import DynamicObjectWithPerceptionResult class TPMetrics(metaclass=ABCMeta): diff --git a/perception_eval/perception_eval/evaluation/metrics/metrics.py b/perception_eval/perception_eval/metrics/metrics.py similarity index 97% rename from perception_eval/perception_eval/evaluation/metrics/metrics.py rename to perception_eval/perception_eval/metrics/metrics.py index c3c34f96..8ee35743 100644 --- a/perception_eval/perception_eval/evaluation/metrics/metrics.py +++ b/perception_eval/perception_eval/metrics/metrics.py @@ -12,18 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Dict from typing import List +from typing import TYPE_CHECKING -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode +from perception_eval.matching import MatchingMode from .classification import ClassificationMetricsScore from .detection import Map from .metrics_score_config import MetricsScoreConfig from .tracking import TrackingMetricsScore +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.result import DynamicObjectWithPerceptionResult + class MetricsScore: """Metrics score class. diff --git a/perception_eval/perception_eval/evaluation/metrics/metrics_score_config.py b/perception_eval/perception_eval/metrics/metrics_score_config.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/metrics_score_config.py rename to perception_eval/perception_eval/metrics/metrics_score_config.py diff --git a/perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py b/perception_eval/perception_eval/metrics/prediction/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/prediction/__init__.py rename to perception_eval/perception_eval/metrics/prediction/__init__.py diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/__init__.py b/perception_eval/perception_eval/metrics/tracking/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/metrics/tracking/__init__.py rename to perception_eval/perception_eval/metrics/tracking/__init__.py diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/_metrics_base.py b/perception_eval/perception_eval/metrics/tracking/_metrics_base.py similarity index 95% rename from perception_eval/perception_eval/evaluation/metrics/tracking/_metrics_base.py rename to perception_eval/perception_eval/metrics/tracking/_metrics_base.py index 821c6865..c0088f22 100644 --- a/perception_eval/perception_eval/evaluation/metrics/tracking/_metrics_base.py +++ b/perception_eval/perception_eval/metrics/tracking/_metrics_base.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from abc import ABC from abc import abstractmethod from typing import Any @@ -19,11 +21,14 @@ from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetrics + from ..detection.tp_metrics import TPMetrics class _TrackingMetricsBase(ABC): diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/clear.py b/perception_eval/perception_eval/metrics/tracking/clear.py similarity index 97% rename from perception_eval/perception_eval/evaluation/metrics/tracking/clear.py rename to perception_eval/perception_eval/metrics/tracking/clear.py index d2e2227e..9a00154b 100644 --- a/perception_eval/perception_eval/evaluation/metrics/tracking/clear.py +++ b/perception_eval/perception_eval/metrics/tracking/clear.py @@ -12,20 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from collections import OrderedDict from typing import Dict from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING -from perception_eval.common.label import LabelType from perception_eval.common.threshold import get_label_threshold -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetrics -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAp from ._metrics_base import _TrackingMetricsBase +from ..detection.tp_metrics import TPMetricsAp + +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult + + from ..detection.tp_metrics import TPMetrics class CLEAR(_TrackingMetricsBase): diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/hota.py b/perception_eval/perception_eval/metrics/tracking/hota.py similarity index 91% rename from perception_eval/perception_eval/evaluation/metrics/tracking/hota.py rename to perception_eval/perception_eval/metrics/tracking/hota.py index ab6f61e2..244d2170 100644 --- a/perception_eval/perception_eval/evaluation/metrics/tracking/hota.py +++ b/perception_eval/perception_eval/metrics/tracking/hota.py @@ -12,18 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import Any from typing import List from typing import Optional - -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetrics -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAp +from typing import TYPE_CHECKING from ._metrics_base import _TrackingMetricsBase +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult + + from ..detection.tp_metrics import TPMetrics + from ..detection.tp_metrics import TPMetricsAp + class HOTA(_TrackingMetricsBase): """==== TODO ==== diff --git a/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py b/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py similarity index 93% rename from perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py rename to perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py index a9dad00d..f7f07fbb 100644 --- a/perception_eval/perception_eval/evaluation/metrics/tracking/tracking_metrics_score.py +++ b/perception_eval/perception_eval/metrics/tracking/tracking_metrics_score.py @@ -12,17 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from ctypes import Union from typing import Dict from typing import List from typing import Tuple - -from perception_eval.common.label import LabelType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode +from typing import TYPE_CHECKING from .clear import CLEAR +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.matching import MatchingMode + from perception_eval.result import DynamicObjectWithPerceptionResult + class TrackingMetricsScore: """Metrics score class for tracking evaluation. @@ -53,7 +57,7 @@ def __init__( ) -> None: assert len(target_labels) == len(matching_threshold_list) self.target_labels: List[LabelType] = target_labels - self.matching_mode: MatchingMode = matching_mode + self.matching_mode = matching_mode # CLEAR results for each class self.clears: List[CLEAR] = [] @@ -61,7 +65,7 @@ def __init__( for target_label, matching_threshold in zip(target_labels, matching_threshold_list): object_results = object_results_dict[target_label] num_ground_truth = num_ground_truth_dict[target_label] - clear_: CLEAR = CLEAR( + clear_ = CLEAR( object_results=object_results, num_ground_truth=num_ground_truth, target_labels=[target_label], diff --git a/perception_eval/perception_eval/common/object/__init__.py b/perception_eval/perception_eval/object/__init__.py similarity index 94% rename from perception_eval/perception_eval/common/object/__init__.py rename to perception_eval/perception_eval/object/__init__.py index 98947efa..595542c0 100644 --- a/perception_eval/perception_eval/common/object/__init__.py +++ b/perception_eval/perception_eval/object/__init__.py @@ -15,12 +15,12 @@ from typing import Union import numpy as np +from perception_eval.common.point import distance_points +from perception_eval.common.point import distance_points_bev from .object2d import DynamicObject2D from .object2d import Roi from .object3d import DynamicObject -from ..point import distance_points -from ..point import distance_points_bev ObjectType = Union[DynamicObject, DynamicObject2D] diff --git a/perception_eval/perception_eval/common/object/object2d.py b/perception_eval/perception_eval/object/object2d.py similarity index 100% rename from perception_eval/perception_eval/common/object/object2d.py rename to perception_eval/perception_eval/object/object2d.py diff --git a/perception_eval/perception_eval/common/object/object3d.py b/perception_eval/perception_eval/object/object3d.py similarity index 100% rename from perception_eval/perception_eval/common/object/object3d.py rename to perception_eval/perception_eval/object/object3d.py diff --git a/perception_eval/test/evaluation/metrics/tracking/__init__.py b/perception_eval/perception_eval/result/__init__.py similarity index 66% rename from perception_eval/test/evaluation/metrics/tracking/__init__.py rename to perception_eval/perception_eval/result/__init__.py index 9be33190..37981d15 100644 --- a/perception_eval/test/evaluation/metrics/tracking/__init__.py +++ b/perception_eval/perception_eval/result/__init__.py @@ -11,3 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Union + +from .perception import * # noqa +from .sensing import * # noqa + +# type aliases +ObjectResultType = Union[DynamicObjectWithPerceptionResult, DynamicObjectWithSensingResult] # noqa +FrameResultType = Union[PerceptionFrameResult, SensingFrameResult] # noqa diff --git a/perception_eval/perception_eval/result/perception/__init__.py b/perception_eval/perception_eval/result/perception/__init__.py new file mode 100644 index 00000000..39ab9b32 --- /dev/null +++ b/perception_eval/perception_eval/result/perception/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2023 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .perception_frame_config import CriticalObjectFilterConfig +from .perception_frame_config import PerceptionPassFailConfig +from .perception_frame_result import get_object_status +from .perception_frame_result import PerceptionFrameResult +from .perception_result import DynamicObjectWithPerceptionResult +from .perception_result import get_object_results + +__all__ = ( + "CriticalObjectFilterConfig", + "PerceptionPassFailConfig", + "get_object_status", + "PerceptionFrameResult", + "DynamicObjectWithPerceptionResult", + "get_object_results", +) diff --git a/perception_eval/perception_eval/evaluation/result/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py similarity index 96% rename from perception_eval/perception_eval/evaluation/result/perception_frame_config.py rename to perception_eval/perception_eval/result/perception/perception_frame_config.py index 6c8bce35..593397ef 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -18,13 +18,15 @@ from typing import Dict from typing import List from typing import Optional +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import check_thresholds -# from perception_eval.config import PerceptionEvaluationConfig +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.config import PerceptionEvaluationConfig class CriticalObjectFilterConfig: @@ -51,7 +53,7 @@ class CriticalObjectFilterConfig: def __init__( self, - evaluator_config, #: PerceptionEvaluationConfig, + evaluator_config: PerceptionEvaluationConfig, target_labels: List[str], ignore_attributes: Optional[List[str]] = None, max_x_position_list: Optional[List[float]] = None, @@ -145,7 +147,7 @@ class PerceptionPassFailConfig: def __init__( self, - evaluator_config, #: PerceptionEvaluationConfig, + evaluator_config: PerceptionEvaluationConfig, target_labels: Optional[List[str]], matching_threshold_list: Optional[List[float]] = None, confidence_threshold_list: Optional[List[float]] = None, diff --git a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py b/perception_eval/perception_eval/result/perception/perception_frame_result.py similarity index 82% rename from perception_eval/perception_eval/evaluation/result/perception_frame_result.py rename to perception_eval/perception_eval/result/perception/perception_frame_result.py index d582bd4a..ed7b928b 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_frame_result.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_result.py @@ -17,20 +17,24 @@ from typing import Dict from typing import List from typing import Optional +from typing import TYPE_CHECKING -from perception_eval.common.dataset import FrameGroundTruth -from perception_eval.common.label import LabelType -from perception_eval.common.object import ObjectType from perception_eval.common.status import GroundTruthStatus from perception_eval.common.status import MatchingStatus -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.metrics import MetricsScore -from perception_eval.evaluation.metrics import MetricsScoreConfig -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig -from perception_eval.evaluation.result.perception_pass_fail_result import PassFailResult +import perception_eval.matching.objects_filter as objects_filter +import perception_eval.metrics as metrics + +from .perception_pass_fail_result import PassFailResult + +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.dataset import FrameGroundTruth + from perception_eval.metrics import MetricsScoreConfig + from perception_eval.object import ObjectType + + from .perception_frame_config import CriticalObjectFilterConfig + from .perception_frame_config import PerceptionPassFailConfig + from .perception_result import DynamicObjectWithPerceptionResult class PerceptionFrameResult: @@ -67,19 +71,19 @@ def __init__( ) -> None: # TODO(ktro2828): rename `frame_name` into `frame_number` # frame information - self.frame_name: str = frame_ground_truth.frame_name - self.unix_time: int = unix_time - self.target_labels: List[LabelType] = target_labels + self.frame_name = frame_ground_truth.frame_name + self.unix_time = unix_time + self.target_labels = target_labels - self.object_results: List[DynamicObjectWithPerceptionResult] = object_results - self.frame_ground_truth: FrameGroundTruth = frame_ground_truth + self.object_results = object_results + self.frame_ground_truth = frame_ground_truth # init evaluation - self.metrics_score: MetricsScore = MetricsScore( + self.metrics_score = metrics.MetricsScore( metrics_config, used_frame=[int(self.frame_name)], ) - self.pass_fail_result: PassFailResult = PassFailResult( + self.pass_fail_result = PassFailResult( unix_time=unix_time, frame_number=frame_ground_truth.frame_name, critical_object_filter_config=critical_object_filter_config, @@ -99,11 +103,9 @@ def evaluate_frame( previous_result (Optional[PerceptionFrameResult]): The previous frame result. If None, set it as empty list []. Defaults to None. """ # Divide objects by label to dict - object_results_dict: Dict[LabelType, List[DynamicObjectWithPerceptionResult]] = divide_objects( - self.object_results, self.target_labels - ) + object_results_dict = objects_filter.divide_objects(self.object_results, self.target_labels) - num_ground_truth_dict: Dict[LabelType, int] = divide_objects_to_num( + num_ground_truth_dict = objects_filter.divide_objects_to_num( self.frame_ground_truth.objects, self.target_labels ) @@ -114,7 +116,9 @@ def evaluate_frame( if previous_result is None: previous_results_dict = {label: [] for label in self.target_labels} else: - previous_results_dict = divide_objects(previous_result.object_results, self.target_labels) + previous_results_dict = objects_filter.divide_objects( + previous_result.object_results, self.target_labels + ) tracking_results: Dict[LabelType, List[DynamicObjectWithPerceptionResult]] = object_results_dict.copy() for label, prev_results in previous_results_dict.items(): tracking_results[label] = [prev_results, tracking_results[label]] diff --git a/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py similarity index 84% rename from perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py rename to perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index 5e64049b..ec2c25e3 100644 --- a/perception_eval/perception_eval/evaluation/result/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -12,20 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING import warnings import numpy as np -from perception_eval.common.object import ObjectType -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.matching.objects_filter import get_negative_objects -from perception_eval.evaluation.matching.objects_filter import get_positive_objects -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig +from perception_eval.matching import MatchingMode +import perception_eval.matching.objects_filter as objects_filter + +if TYPE_CHECKING: + from perception_eval.object import ObjectType + + from .perception_frame_config import CriticalObjectFilterConfig + from .perception_frame_config import PerceptionPassFailConfig + from .perception_result import DynamicObjectWithPerceptionResult class PassFailResult: @@ -59,12 +63,12 @@ def __init__( frame_pass_fail_config: PerceptionPassFailConfig, ego2map: Optional[np.ndarray] = None, ) -> None: - self.unix_time: int = unix_time - self.frame_number: int = frame_number + self.unix_time = unix_time + self.frame_number = frame_number # TODO(ktro2828): merge CriticalObjectFilterConfig and FramePassFailConfig into one - self.critical_object_filter_config: CriticalObjectFilterConfig = critical_object_filter_config - self.frame_pass_fail_config: PerceptionPassFailConfig = frame_pass_fail_config - self.ego2map: Optional[np.ndarray] = ego2map + self.critical_object_filter_config = critical_object_filter_config + self.frame_pass_fail_config = frame_pass_fail_config + self.ego2map = ego2map self.critical_ground_truth_objects: List[ObjectType] = [] self.tn_objects: List[ObjectType] = [] @@ -84,7 +88,7 @@ def evaluate( ros_critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects must be evaluated at current frame. """ - self.critical_ground_truth_objects = filter_objects( + self.critical_ground_truth_objects = objects_filter.filter_objects( objects=ros_critical_ground_truth_objects, is_gt=True, ego2map=self.ego2map, @@ -95,7 +99,7 @@ def evaluate( critical_ground_truth_objects=self.critical_ground_truth_objects, ) - self.tn_objects, self.fn_objects = get_negative_objects( + self.tn_objects, self.fn_objects = objects_filter.get_negative_objects( self.critical_ground_truth_objects, object_results, self.frame_pass_fail_config.target_labels, @@ -147,7 +151,7 @@ def __get_positive_object_results( List[DynamicObjectWithPerceptionResult]: TP object results. List[DynamicObjectWithPerceptionResult]: FP object results. """ - tp_object_results, fp_object_results = get_positive_objects( + tp_object_results, fp_object_results = objects_filter.get_positive_objects( object_results=object_results, target_labels=self.frame_pass_fail_config.target_labels, matching_mode=MatchingMode.IOU2D diff --git a/perception_eval/perception_eval/evaluation/result/object_result.py b/perception_eval/perception_eval/result/perception/perception_result.py similarity index 91% rename from perception_eval/perception_eval/evaluation/result/object_result.py rename to perception_eval/perception_eval/result/perception/perception_result.py index c8a4f4ff..9e1440e5 100644 --- a/perception_eval/perception_eval/evaluation/result/object_result.py +++ b/perception_eval/perception_eval/result/perception/perception_result.py @@ -22,19 +22,19 @@ import numpy as np from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import LabelType -from perception_eval.common.object import distance_objects -from perception_eval.common.object import distance_objects_bev -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import DynamicObject2D -from perception_eval.common.object import ObjectType from perception_eval.common.status import MatchingStatus from perception_eval.common.threshold import get_label_threshold -from perception_eval.evaluation.matching import CenterDistanceMatching -from perception_eval.evaluation.matching import IOU2dMatching -from perception_eval.evaluation.matching import IOU3dMatching -from perception_eval.evaluation.matching import MatchingMethod -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.matching import PlaneDistanceMatching +from perception_eval.matching import CenterDistanceMatching +from perception_eval.matching import IOU2dMatching +from perception_eval.matching import IOU3dMatching +from perception_eval.matching import MatchingMethod +from perception_eval.matching import MatchingMode +from perception_eval.matching import PlaneDistanceMatching +from perception_eval.object import distance_objects +from perception_eval.object import distance_objects_bev +from perception_eval.object import DynamicObject +from perception_eval.object import DynamicObject2D +from perception_eval.object import ObjectType class DynamicObjectWithPerceptionResult: @@ -68,28 +68,28 @@ def __init__( ground_truth_object ), f"Input objects type must be same, but got {type(estimated_object)} and {type(ground_truth_object)}" - self.estimated_object: ObjectType = estimated_object - self.ground_truth_object: Optional[ObjectType] = ground_truth_object + self.estimated_object = estimated_object + self.ground_truth_object = ground_truth_object if isinstance(self.estimated_object, DynamicObject2D) and self.estimated_object.roi is None: self.center_distance = None self.iou_2d = None else: - self.center_distance: CenterDistanceMatching = CenterDistanceMatching( + self.center_distance = CenterDistanceMatching( self.estimated_object, self.ground_truth_object, ) - self.iou_2d: IOU2dMatching = IOU2dMatching( + self.iou_2d = IOU2dMatching( self.estimated_object, self.ground_truth_object, ) if isinstance(estimated_object, DynamicObject): - self.iou_3d: IOU3dMatching = IOU3dMatching( + self.iou_3d = IOU3dMatching( self.estimated_object, self.ground_truth_object, ) - self.plane_distance: PlaneDistanceMatching = PlaneDistanceMatching( + self.plane_distance = PlaneDistanceMatching( self.estimated_object, self.ground_truth_object, ) @@ -154,7 +154,7 @@ def is_result_correct( return self.is_label_correct # Whether is matching to ground truth - matching: Optional[MatchingMethod] = self.get_matching(matching_mode) + matching = self.get_matching(matching_mode) if matching is None: return self.is_label_correct @@ -307,7 +307,7 @@ def get_object_results( return _get_object_results_with_id(estimated_objects, ground_truth_objects) matching_method_module, maximize = _get_matching_module(matching_mode) - score_table: np.ndarray = _get_score_table( + score_table = _get_score_table( estimated_objects, ground_truth_objects, allow_matching_unknown, @@ -318,8 +318,8 @@ def get_object_results( # assign correspond GT to estimated objects object_results: List[DynamicObjectWithPerceptionResult] = [] - estimated_objects_: List[ObjectType] = estimated_objects.copy() - ground_truth_objects_: List[ObjectType] = ground_truth_objects.copy() + estimated_objects_ = estimated_objects.copy() + ground_truth_objects_ = ground_truth_objects.copy() num_estimation: int = score_table.shape[0] for _ in range(num_estimation): if np.isnan(score_table).all(): @@ -409,7 +409,7 @@ def _get_fp_object_results( """ object_results: List[DynamicObjectWithPerceptionResult] = [] for est_obj_ in estimated_objects: - object_result_: DynamicObjectWithPerceptionResult = DynamicObjectWithPerceptionResult( + object_result_ = DynamicObjectWithPerceptionResult( estimated_object=est_obj_, ground_truth_object=None, ) @@ -470,10 +470,9 @@ def _get_score_table( # fill matching score table, in shape (NumEst, NumGT) num_row: int = len(estimated_objects) num_col: int = len(ground_truth_objects) - score_table: np.ndarray = np.full((num_row, num_col), np.nan) + score_table = np.full((num_row, num_col), np.nan) for i, est_obj in enumerate(estimated_objects): for j, gt_obj in enumerate(ground_truth_objects): - is_label_ok: bool = False if gt_obj.semantic_label.is_fp(): is_label_ok = True elif allow_matching_unknown: @@ -481,7 +480,7 @@ def _get_score_table( else: is_label_ok = est_obj.semantic_label == gt_obj.semantic_label - is_same_frame_id: bool = est_obj.frame_id == gt_obj.frame_id + is_same_frame_id = est_obj.frame_id == gt_obj.frame_id if is_label_ok and is_same_frame_id: threshold: Optional[float] = get_label_threshold( diff --git a/perception_eval/perception_eval/evaluation/metrics/__init__.py b/perception_eval/perception_eval/result/sensing/__init__.py similarity index 69% rename from perception_eval/perception_eval/evaluation/metrics/__init__.py rename to perception_eval/perception_eval/result/sensing/__init__.py index 4109a90d..62ca1989 100644 --- a/perception_eval/perception_eval/evaluation/metrics/__init__.py +++ b/perception_eval/perception_eval/result/sensing/__init__.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from perception_eval.evaluation.metrics.metrics import MetricsScore -from perception_eval.evaluation.metrics.metrics_score_config import MetricsScoreConfig +from .sensing_frame_config import SensingFrameConfig +from .sensing_frame_result import SensingFrameResult +from .sensing_result import DynamicObjectWithSensingResult -__all__ = ("MetricsScore", "MetricsScoreConfig") +__all__ = ("SensingFrameConfig", "SensingFrameResult", "DynamicObjectWithSensingResult") diff --git a/perception_eval/perception_eval/evaluation/sensing/sensing_frame_config.py b/perception_eval/perception_eval/result/sensing/sensing_frame_config.py similarity index 100% rename from perception_eval/perception_eval/evaluation/sensing/sensing_frame_config.py rename to perception_eval/perception_eval/result/sensing/sensing_frame_config.py diff --git a/perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py b/perception_eval/perception_eval/result/sensing/sensing_frame_result.py similarity index 96% rename from perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py rename to perception_eval/perception_eval/result/sensing/sensing_frame_result.py index b9eb971a..b832ab4f 100644 --- a/perception_eval/perception_eval/evaluation/sensing/sensing_frame_result.py +++ b/perception_eval/perception_eval/result/sensing/sensing_frame_result.py @@ -17,10 +17,11 @@ from typing import Tuple import numpy as np -from perception_eval.common.object import DynamicObject from perception_eval.common.point import crop_pointcloud -from perception_eval.evaluation.sensing.sensing_frame_config import SensingFrameConfig -from perception_eval.evaluation.sensing.sensing_result import DynamicObjectWithSensingResult +from perception_eval.object import DynamicObject + +from .sensing_frame_config import SensingFrameConfig +from .sensing_result import DynamicObjectWithSensingResult class SensingFrameResult: diff --git a/perception_eval/perception_eval/evaluation/sensing/sensing_result.py b/perception_eval/perception_eval/result/sensing/sensing_result.py similarity index 98% rename from perception_eval/perception_eval/evaluation/sensing/sensing_result.py rename to perception_eval/perception_eval/result/sensing/sensing_result.py index 89905271..fd4cfd21 100644 --- a/perception_eval/perception_eval/evaluation/sensing/sensing_result.py +++ b/perception_eval/perception_eval/result/sensing/sensing_result.py @@ -15,8 +15,8 @@ from typing import Optional import numpy as np -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import Visibility +from perception_eval.object import DynamicObject class DynamicObjectWithSensingResult: diff --git a/perception_eval/perception_eval/tool/perception_analyzer2d.py b/perception_eval/perception_eval/tool/perception_analyzer2d.py index 748b7244..0261d318 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer2d.py +++ b/perception_eval/perception_eval/tool/perception_analyzer2d.py @@ -25,10 +25,10 @@ import numpy as np import pandas as pd from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.object import DynamicObject2D from perception_eval.common.status import MatchingStatus from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithPerceptionResult +from perception_eval.object import DynamicObject2D +from perception_eval.result import DynamicObjectWithPerceptionResult import yaml from .perception_analyzer_base import PerceptionAnalyzerBase diff --git a/perception_eval/perception_eval/tool/perception_analyzer3d.py b/perception_eval/perception_eval/tool/perception_analyzer3d.py index 2937bd02..823557a9 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer3d.py +++ b/perception_eval/perception_eval/tool/perception_analyzer3d.py @@ -23,10 +23,10 @@ import numpy as np import pandas as pd -from perception_eval.common.object import DynamicObject from perception_eval.common.status import MatchingStatus from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithPerceptionResult +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult from perception_eval.util.math import get_pose_transform_matrix from perception_eval.util.math import rotation_matrix_to_euler import yaml diff --git a/perception_eval/perception_eval/tool/perception_analyzer_base.py b/perception_eval/perception_eval/tool/perception_analyzer_base.py index 35b8ae4d..ce6e0d3d 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer_base.py +++ b/perception_eval/perception_eval/tool/perception_analyzer_base.py @@ -37,14 +37,14 @@ import numpy as np import pandas as pd from perception_eval.common.label import LabelType -from perception_eval.common.object import ObjectType from perception_eval.common.status import MatchingStatus from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation import PerceptionFrameResult -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.metrics.metrics import MetricsScore +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.metrics.metrics import MetricsScore +from perception_eval.object import ObjectType +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import PerceptionFrameResult from tqdm import tqdm from .utils import filter_df diff --git a/perception_eval/perception_eval/tool/utils.py b/perception_eval/perception_eval/tool/utils.py index 16b9839d..93ea18c0 100644 --- a/perception_eval/perception_eval/tool/utils.py +++ b/perception_eval/perception_eval/tool/utils.py @@ -27,11 +27,11 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import FrameID -from perception_eval.evaluation.metrics.metrics import MetricsScore -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.perception_frame_result import PerceptionFrameResult +from perception_eval.metrics import MetricsScore +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import PerceptionFrameResult class PlotAxes(Enum): diff --git a/perception_eval/perception_eval/util/debug.py b/perception_eval/perception_eval/util/debug.py index 34fa3c19..f8bb1378 100644 --- a/perception_eval/perception_eval/util/debug.py +++ b/perception_eval/perception_eval/util/debug.py @@ -24,9 +24,9 @@ from perception_eval.common.label import Label from perception_eval.common.label import LabelType from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import DynamicObject2D from perception_eval.common.shape import Shape +from perception_eval.object import DynamicObject +from perception_eval.object import DynamicObject2D from pyquaternion.quaternion import Quaternion diff --git a/perception_eval/perception_eval/visualization/eda_tool.py b/perception_eval/perception_eval/visualization/eda_tool.py index 8bd65410..8ab7685d 100644 --- a/perception_eval/perception_eval/visualization/eda_tool.py +++ b/perception_eval/perception_eval/visualization/eda_tool.py @@ -26,12 +26,12 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import LabelConverter from perception_eval.common.label import LabelType -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import divide_tp_fp_objects -from perception_eval.evaluation.matching.objects_filter import filter_object_results -from perception_eval.evaluation.matching.objects_filter import get_fn_objects +from perception_eval.matching import MatchingMode +from perception_eval.matching.objects_filter import divide_tp_fp_objects +from perception_eval.matching.objects_filter import filter_object_results +from perception_eval.matching.objects_filter import get_fn_objects +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult from plotly import graph_objects as go from plotly.graph_objs import Figure from plotly.subplots import make_subplots diff --git a/perception_eval/perception_eval/visualization/perception_visualizer2d.py b/perception_eval/perception_eval/visualization/perception_visualizer2d.py index 4348e273..f092bdf8 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer2d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer2d.py @@ -30,10 +30,10 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel from perception_eval.common.label import TrafficLightLabel -from perception_eval.common.object import DynamicObject2D from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation import PerceptionFrameResult +from perception_eval.object import DynamicObject2D +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import PerceptionFrameResult from perception_eval.visualization.color import ColorMap from PIL import Image from PIL.Image import Image as PILImage diff --git a/perception_eval/perception_eval/visualization/perception_visualizer3d.py b/perception_eval/perception_eval/visualization/perception_visualizer3d.py index ada77b43..118373bb 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer3d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer3d.py @@ -29,11 +29,11 @@ from matplotlib.transforms import Affine2D import numpy as np from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import FrameID from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithPerceptionResult -from perception_eval.evaluation import PerceptionFrameResult +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import PerceptionFrameResult from perception_eval.util.math import rotation_matrix_to_euler from perception_eval.visualization.color import ColorMap from PIL import Image diff --git a/perception_eval/perception_eval/visualization/sensing_visualizer.py b/perception_eval/perception_eval/visualization/sensing_visualizer.py index 2f0b2a12..0cdd5023 100644 --- a/perception_eval/perception_eval/visualization/sensing_visualizer.py +++ b/perception_eval/perception_eval/visualization/sensing_visualizer.py @@ -28,10 +28,10 @@ import matplotlib.pyplot as plt from matplotlib.transforms import Affine2D import numpy as np -from perception_eval.common.object import DynamicObject from perception_eval.config import SensingEvaluationConfig -from perception_eval.evaluation import DynamicObjectWithSensingResult -from perception_eval.evaluation import SensingFrameResult +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithSensingResult +from perception_eval.result import SensingFrameResult from perception_eval.visualization.color import ColorMap from PIL import Image from PIL.Image import Image as PILImage diff --git a/perception_eval/test/eda.py b/perception_eval/test/eda.py index 9a0f8dcd..9edb9f76 100644 --- a/perception_eval/test/eda.py +++ b/perception_eval/test/eda.py @@ -18,12 +18,12 @@ from typing import List from typing import Union -from perception_eval.common.dataset import FrameGroundTruth -from perception_eval.common.dataset import load_all_datasets from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import LabelConverter -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import FrameID +from perception_eval.dataset import FrameGroundTruth +from perception_eval.dataset import load_all_datasets +from perception_eval.object import DynamicObject from perception_eval.visualization.eda_tool import EDAVisualizer diff --git a/perception_eval/test/evaluation/metrics/prediction/__init__.py b/perception_eval/test/evaluation/metrics/prediction/__init__.py deleted file mode 100644 index 9be33190..00000000 --- a/perception_eval/test/evaluation/metrics/prediction/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2022 TIER IV, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/perception_eval/test/evaluation/result/__init__.py b/perception_eval/test/evaluation/result/__init__.py deleted file mode 100644 index 9be33190..00000000 --- a/perception_eval/test/evaluation/result/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2022 TIER IV, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/perception_eval/test/evaluation/sensing/__init__.py b/perception_eval/test/evaluation/sensing/__init__.py deleted file mode 100644 index 9be33190..00000000 --- a/perception_eval/test/evaluation/sensing/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2022 TIER IV, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/perception_eval/perception_eval/evaluation/result/__init__.py b/perception_eval/test/matching/__init__.py similarity index 100% rename from perception_eval/perception_eval/evaluation/result/__init__.py rename to perception_eval/test/matching/__init__.py diff --git a/perception_eval/perception_eval/evaluation/sensing/__init__.py b/perception_eval/test/matching/object_matching/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from perception_eval/perception_eval/evaluation/sensing/__init__.py rename to perception_eval/test/matching/object_matching/__init__.py diff --git a/perception_eval/test/evaluation/matching/object_matching/test_iou_2d_matching.py b/perception_eval/test/matching/object_matching/test_iou_2d_matching.py similarity index 96% rename from perception_eval/test/evaluation/matching/object_matching/test_iou_2d_matching.py rename to perception_eval/test/matching/object_matching/test_iou_2d_matching.py index b1f9858a..dc4e30a0 100644 --- a/perception_eval/test/evaluation/matching/object_matching/test_iou_2d_matching.py +++ b/perception_eval/test/matching/object_matching/test_iou_2d_matching.py @@ -18,9 +18,9 @@ from typing import Tuple import unittest -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import _get_area_intersection -from perception_eval.evaluation.matching.object_matching import IOU2dMatching +from perception_eval.matching import IOU2dMatching +from perception_eval.matching.object_matching import _get_area_intersection +from perception_eval.object import DynamicObject from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/matching/object_matching/test_iou_3d_matching.py b/perception_eval/test/matching/object_matching/test_iou_3d_matching.py similarity index 94% rename from perception_eval/test/evaluation/matching/object_matching/test_iou_3d_matching.py rename to perception_eval/test/matching/object_matching/test_iou_3d_matching.py index 17abed16..fdae8f18 100644 --- a/perception_eval/test/evaluation/matching/object_matching/test_iou_3d_matching.py +++ b/perception_eval/test/matching/object_matching/test_iou_3d_matching.py @@ -17,10 +17,10 @@ from typing import Tuple import unittest -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import _get_height_intersection -from perception_eval.evaluation.matching.object_matching import _get_volume_intersection -from perception_eval.evaluation.matching.object_matching import IOU3dMatching +from perception_eval.matching import IOU3dMatching +from perception_eval.matching.object_matching import _get_height_intersection +from perception_eval.matching.object_matching import _get_volume_intersection +from perception_eval.object import DynamicObject from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/matching/object_matching/test_plane_distance_matching.py b/perception_eval/test/matching/object_matching/test_plane_distance_matching.py similarity index 97% rename from perception_eval/test/evaluation/matching/object_matching/test_plane_distance_matching.py rename to perception_eval/test/matching/object_matching/test_plane_distance_matching.py index 9f77236a..1db0e657 100644 --- a/perception_eval/test/evaluation/matching/object_matching/test_plane_distance_matching.py +++ b/perception_eval/test/matching/object_matching/test_plane_distance_matching.py @@ -18,8 +18,8 @@ from typing import Tuple import unittest -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import PlaneDistanceMatching +from perception_eval.matching import PlaneDistanceMatching +from perception_eval.object import DynamicObject from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/matching/test_objects_filter.py b/perception_eval/test/matching/test_objects_filter.py similarity index 97% rename from perception_eval/test/evaluation/matching/test_objects_filter.py rename to perception_eval/test/matching/test_objects_filter.py index fd686471..f1af3493 100644 --- a/perception_eval/test/evaluation/matching/test_objects_filter.py +++ b/perception_eval/test/matching/test_objects_filter.py @@ -23,18 +23,18 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel from perception_eval.common.label import Label -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.matching.objects_filter import divide_tp_fp_objects -from perception_eval.evaluation.matching.objects_filter import filter_object_results -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.matching.objects_filter import get_fn_objects -from perception_eval.evaluation.matching.objects_filter import get_negative_objects -from perception_eval.evaluation.matching.objects_filter import get_positive_objects -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching import MatchingMode +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.matching.objects_filter import divide_tp_fp_objects +from perception_eval.matching.objects_filter import filter_object_results +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.matching.objects_filter import get_fn_objects +from perception_eval.matching.objects_filter import get_negative_objects +from perception_eval.matching.objects_filter import get_positive_objects +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/__init__.py b/perception_eval/test/metrics/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from perception_eval/test/evaluation/__init__.py rename to perception_eval/test/metrics/__init__.py diff --git a/perception_eval/test/evaluation/matching/__init__.py b/perception_eval/test/metrics/classification/__init__.py similarity index 100% rename from perception_eval/test/evaluation/matching/__init__.py rename to perception_eval/test/metrics/classification/__init__.py diff --git a/perception_eval/test/evaluation/metrics/classification/test_accuracy.py b/perception_eval/test/metrics/classification/test_accuracy.py similarity index 94% rename from perception_eval/test/evaluation/metrics/classification/test_accuracy.py rename to perception_eval/test/metrics/classification/test_accuracy.py index 0606a764..c0b4ddea 100644 --- a/perception_eval/test/evaluation/metrics/classification/test_accuracy.py +++ b/perception_eval/test/metrics/classification/test_accuracy.py @@ -21,9 +21,9 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.classification.accuracy import ClassificationAccuracy -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.classification.accuracy import ClassificationAccuracy +from perception_eval.result import get_object_results class AnswerAccuracy: diff --git a/perception_eval/test/evaluation/metrics/classification/test_classification_metrics_score.py b/perception_eval/test/metrics/classification/test_classification_metrics_score.py similarity index 88% rename from perception_eval/test/evaluation/metrics/classification/test_classification_metrics_score.py rename to perception_eval/test/metrics/classification/test_classification_metrics_score.py index 39baa833..f64798c2 100644 --- a/perception_eval/test/evaluation/metrics/classification/test_classification_metrics_score.py +++ b/perception_eval/test/metrics/classification/test_classification_metrics_score.py @@ -18,11 +18,11 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.classification import ClassificationMetricsScore -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.classification import ClassificationMetricsScore +from perception_eval.result import get_object_results class TestClassificationMetricsScore(unittest.TestCase): diff --git a/perception_eval/test/evaluation/matching/object_matching/__init__.py b/perception_eval/test/metrics/detection/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from perception_eval/test/evaluation/matching/object_matching/__init__.py rename to perception_eval/test/metrics/detection/__init__.py diff --git a/perception_eval/test/evaluation/metrics/detection/test_ap.py b/perception_eval/test/metrics/detection/test_ap.py similarity index 98% rename from perception_eval/test/evaluation/metrics/detection/test_ap.py rename to perception_eval/test/metrics/detection/test_ap.py index 1001c763..0044a497 100644 --- a/perception_eval/test/evaluation/metrics/detection/test_ap.py +++ b/perception_eval/test/metrics/detection/test_ap.py @@ -26,14 +26,14 @@ import numpy as np from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.detection.ap import Ap -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAp -from perception_eval.evaluation.metrics.detection.tp_metrics import TPMetricsAph -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching import MatchingMode +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.detection.ap import Ap +from perception_eval.metrics.detection.tp_metrics import TPMetricsAp +from perception_eval.metrics.detection.tp_metrics import TPMetricsAph +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/metrics/detection/test_map.py b/perception_eval/test/metrics/detection/test_map.py similarity index 98% rename from perception_eval/test/evaluation/metrics/detection/test_map.py rename to perception_eval/test/metrics/detection/test_map.py index d9158434..3e1c4767 100644 --- a/perception_eval/test/evaluation/metrics/detection/test_map.py +++ b/perception_eval/test/metrics/detection/test_map.py @@ -21,14 +21,14 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.detection.map import Map -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching.object_matching import MatchingMode +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.detection.map import Map +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/metrics/__init__.py b/perception_eval/test/metrics/prediction/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from perception_eval/test/evaluation/metrics/__init__.py rename to perception_eval/test/metrics/prediction/__init__.py diff --git a/perception_eval/test/evaluation/metrics/test_metrics_score_config.py b/perception_eval/test/metrics/test_metrics_score_config.py similarity index 82% rename from perception_eval/test/evaluation/metrics/test_metrics_score_config.py rename to perception_eval/test/metrics/test_metrics_score_config.py index 9cbdf94b..ff9162f0 100644 --- a/perception_eval/test/evaluation/metrics/test_metrics_score_config.py +++ b/perception_eval/test/metrics/test_metrics_score_config.py @@ -18,11 +18,11 @@ from typing import Tuple import unittest -from perception_eval.evaluation.metrics.config._metrics_config_base import _MetricsConfigBase -from perception_eval.evaluation.metrics.config.detection_metrics_config import DetectionMetricsConfig -from perception_eval.evaluation.metrics.config.tracking_metrics_config import TrackingMetricsConfig -from perception_eval.evaluation.metrics.metrics_score_config import MetricsParameterError -from perception_eval.evaluation.metrics.metrics_score_config import MetricsScoreConfig +from perception_eval.metrics.config._metrics_config_base import _MetricsConfigBase +from perception_eval.metrics.config.detection_metrics_config import DetectionMetricsConfig +from perception_eval.metrics.config.tracking_metrics_config import TrackingMetricsConfig +from perception_eval.metrics.metrics_score_config import MetricsParameterError +from perception_eval.metrics.metrics_score_config import MetricsScoreConfig class TestMetricsScoreConfig(unittest.TestCase): diff --git a/perception_eval/test/evaluation/metrics/classification/__init__.py b/perception_eval/test/metrics/tracking/__init__.py similarity index 100% rename from perception_eval/test/evaluation/metrics/classification/__init__.py rename to perception_eval/test/metrics/tracking/__init__.py diff --git a/perception_eval/test/evaluation/metrics/tracking/test_clear.py b/perception_eval/test/metrics/tracking/test_clear.py similarity index 98% rename from perception_eval/test/evaluation/metrics/tracking/test_clear.py rename to perception_eval/test/metrics/tracking/test_clear.py index 94bbfb6e..70b103a2 100644 --- a/perception_eval/test/evaluation/metrics/tracking/test_clear.py +++ b/perception_eval/test/metrics/tracking/test_clear.py @@ -24,12 +24,12 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.tracking.clear import CLEAR -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching import MatchingMode +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.tracking.clear import CLEAR +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py b/perception_eval/test/metrics/tracking/test_tracking_metrics_score.py similarity index 96% rename from perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py rename to perception_eval/test/metrics/tracking/test_tracking_metrics_score.py index b9345a10..45a375e5 100644 --- a/perception_eval/test/evaluation/metrics/tracking/test_tracking_metrics_score.py +++ b/perception_eval/test/metrics/tracking/test_tracking_metrics_score.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from test.evaluation.metrics.tracking.test_clear import AnswerCLEAR +from test.metrics.tracking.test_clear import AnswerCLEAR from test.util.dummy_object import make_dummy_data from test.util.object_diff import DiffTranslation from test.util.object_diff import DiffYaw @@ -23,14 +23,14 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.matching.objects_filter import divide_objects -from perception_eval.evaluation.matching.objects_filter import divide_objects_to_num -from perception_eval.evaluation.matching.objects_filter import filter_objects -from perception_eval.evaluation.metrics.tracking.tracking_metrics_score import TrackingMetricsScore -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching import MatchingMode +from perception_eval.matching.objects_filter import divide_objects +from perception_eval.matching.objects_filter import divide_objects_to_num +from perception_eval.matching.objects_filter import filter_objects +from perception_eval.metrics.tracking.tracking_metrics_score import TrackingMetricsScore +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/object/__init__.py b/perception_eval/test/object/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_eval/test/common/test_object.py b/perception_eval/test/object/test_object.py similarity index 98% rename from perception_eval/test/common/test_object.py rename to perception_eval/test/object/test_object.py index 09d24473..809ffae4 100644 --- a/perception_eval/test/common/test_object.py +++ b/perception_eval/test/object/test_object.py @@ -19,9 +19,9 @@ import unittest import numpy as np -from perception_eval.common.object import distance_objects -from perception_eval.common.object import distance_objects_bev -from perception_eval.common.object import DynamicObject +from perception_eval.object import distance_objects +from perception_eval.object import distance_objects_bev +from perception_eval.object import DynamicObject from perception_eval.util.debug import get_objects_with_difference from shapely.geometry import Polygon diff --git a/perception_eval/test/perception_field_analysis.py b/perception_eval/test/perception_field_analysis.py index a13bba33..93ef5c45 100644 --- a/perception_eval/test/perception_field_analysis.py +++ b/perception_eval/test/perception_field_analysis.py @@ -22,8 +22,6 @@ import numpy as np from perception_eval.tool import PerceptionAnalyzer3DField from perception_eval.tool import PerceptionFieldAxis -from perception_eval.tool import PerceptionFieldXY -from perception_eval.visualization.perception_visualizer3dfield import PerceptionFieldPlot from perception_eval.visualization.perception_visualizer3dfield import PerceptionFieldPlots diff --git a/perception_eval/test/perception_field_points_analysis.py b/perception_eval/test/perception_field_points_analysis.py index b95a86a6..2fec1de8 100644 --- a/perception_eval/test/perception_field_points_analysis.py +++ b/perception_eval/test/perception_field_points_analysis.py @@ -23,7 +23,7 @@ import numpy as np from perception_eval.tool import DataTableIdx from perception_eval.tool import PerceptionAnalyzer3DField -from perception_eval.visualization.perception_visualizer3dfield import PerceptionFieldPlot +from perception_eval.visualization import PerceptionFieldPlot class PerceptionLoadDatabaseResult: diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index dd5cadcd..6c69ab8a 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import argparse import logging import tempfile from typing import List +from typing import TYPE_CHECKING from perception_eval.common.label import AutowareLabel -from perception_eval.common.object import ObjectType from perception_eval.common.status import get_scene_rates from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import get_object_status -from perception_eval.evaluation import PerceptionFrameResult -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig from perception_eval.manager import PerceptionEvaluationManager +from perception_eval.result import CriticalObjectFilterConfig +from perception_eval.result import get_object_status +from perception_eval.result import PerceptionPassFailConfig from perception_eval.util.debug import get_objects_with_difference from perception_eval.util.logger_config import configure_logger +if TYPE_CHECKING: + from perception_eval.object import ObjectType + from perception_eval.result import PerceptionFrameResult + class FPValidationLsimMoc: def __init__(self, dataset_paths: List[int], result_root_directory: str) -> None: diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index a316f05b..8808eb05 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import argparse import logging import tempfile from typing import List +from typing import TYPE_CHECKING -from perception_eval.common.object import DynamicObject from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import PerceptionFrameResult -from perception_eval.evaluation.metrics import MetricsScore -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig from perception_eval.manager import PerceptionEvaluationManager +from perception_eval.result import CriticalObjectFilterConfig +from perception_eval.result import PerceptionFrameResult +from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer3D from perception_eval.util.debug import format_class_for_log from perception_eval.util.debug import get_objects_with_difference from perception_eval.util.logger_config import configure_logger +if TYPE_CHECKING: + from perception_eval.metrics import MetricsScore + from perception_eval.object import DynamicObject + class PerceptionLSimMoc: def __init__( @@ -106,7 +111,7 @@ def callback( # 1 frameの評価 # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam - critical_object_filter_config: CriticalObjectFilterConfig = CriticalObjectFilterConfig( + critical_object_filter_config = CriticalObjectFilterConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], ignore_attributes=["cycle_state.without_rider"], @@ -114,7 +119,7 @@ def callback( max_y_position_list=[30.0, 30.0, 30.0, 30.0], ) # Pass fail を決めるパラメータ - frame_pass_fail_config: PerceptionPassFailConfig = PerceptionPassFailConfig( + frame_pass_fail_config = PerceptionPassFailConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], matching_threshold_list=[2.0, 2.0, 2.0, 2.0], diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 6975cf41..dba83e3e 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import argparse import logging import tempfile from typing import List +from typing import TYPE_CHECKING from typing import Union -from perception_eval.common.object import DynamicObject2D from perception_eval.config import PerceptionEvaluationConfig -from perception_eval.evaluation import PerceptionFrameResult -from perception_eval.evaluation.metrics import MetricsScore -from perception_eval.evaluation.result.perception_frame_config import CriticalObjectFilterConfig -from perception_eval.evaluation.result.perception_frame_config import PerceptionPassFailConfig from perception_eval.manager import PerceptionEvaluationManager +from perception_eval.result import CriticalObjectFilterConfig +from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer2D from perception_eval.util.debug import get_objects_with_difference2d from perception_eval.util.logger_config import configure_logger +if TYPE_CHECKING: + from perception_eval.metrics import MetricsScore + from perception_eval.object import DynamicObject2D + from perception_eval.result import PerceptionFrameResult + class PerceptionLSimMoc: def __init__( diff --git a/perception_eval/test/evaluation/result/test_object_result.py b/perception_eval/test/result/test_object_result.py similarity index 94% rename from perception_eval/test/evaluation/result/test_object_result.py rename to perception_eval/test/result/test_object_result.py index 34b02676..c4faaf68 100644 --- a/perception_eval/test/evaluation/result/test_object_result.py +++ b/perception_eval/test/result/test_object_result.py @@ -20,9 +20,9 @@ import unittest from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.object import DynamicObject -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference diff --git a/perception_eval/test/evaluation/sensing/test_sensing_result.py b/perception_eval/test/result/test_sensing_result.py similarity index 92% rename from perception_eval/test/evaluation/sensing/test_sensing_result.py rename to perception_eval/test/result/test_sensing_result.py index 67f7d0ff..05c412bf 100644 --- a/perception_eval/test/evaluation/sensing/test_sensing_result.py +++ b/perception_eval/test/result/test_sensing_result.py @@ -15,7 +15,7 @@ from test.util.dummy_object import make_dummy_data import numpy as np -from perception_eval.evaluation.sensing.sensing_result import DynamicObjectWithSensingResult +from perception_eval.result import DynamicObjectWithSensingResult def test_get_nearest_point(): diff --git a/perception_eval/test/sensing_lsim.py b/perception_eval/test/sensing_lsim.py index 0d420916..b957b89d 100644 --- a/perception_eval/test/sensing_lsim.py +++ b/perception_eval/test/sensing_lsim.py @@ -20,11 +20,11 @@ from typing import Tuple import numpy as np -from perception_eval.common.dataset import FrameGroundTruth from perception_eval.config import SensingEvaluationConfig -from perception_eval.evaluation.sensing.sensing_frame_config import SensingFrameConfig -from perception_eval.evaluation.sensing.sensing_frame_result import SensingFrameResult +from perception_eval.dataset import FrameGroundTruth from perception_eval.manager import SensingEvaluationManager +from perception_eval.result import SensingFrameConfig +from perception_eval.result import SensingFrameResult from perception_eval.util.logger_config import configure_logger diff --git a/perception_eval/test/util/dummy_object.py b/perception_eval/test/util/dummy_object.py index 251adc7f..064bf41e 100644 --- a/perception_eval/test/util/dummy_object.py +++ b/perception_eval/test/util/dummy_object.py @@ -18,11 +18,11 @@ from perception_eval.common.label import AutowareLabel from perception_eval.common.label import Label -from perception_eval.common.object import DynamicObject -from perception_eval.common.object import DynamicObject2D from perception_eval.common.schema import FrameID from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType +from perception_eval.object import DynamicObject +from perception_eval.object import DynamicObject2D from pyquaternion.quaternion import Quaternion diff --git a/perception_eval/test/visualization/test_eda_tool.py b/perception_eval/test/visualization/test_eda_tool.py index d35fb920..cfebf71f 100644 --- a/perception_eval/test/visualization/test_eda_tool.py +++ b/perception_eval/test/visualization/test_eda_tool.py @@ -23,13 +23,13 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel from perception_eval.common.label import Label -from perception_eval.common.object import DynamicObject from perception_eval.common.schema import FrameID from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType -from perception_eval.evaluation.matching.object_matching import MatchingMode -from perception_eval.evaluation.result.object_result import DynamicObjectWithPerceptionResult -from perception_eval.evaluation.result.object_result import get_object_results +from perception_eval.matching import MatchingMode +from perception_eval.object import DynamicObject +from perception_eval.result import DynamicObjectWithPerceptionResult +from perception_eval.result import get_object_results from perception_eval.util.debug import get_objects_with_difference from perception_eval.visualization.eda_tool import EDAManager from perception_eval.visualization.eda_tool import EDAVisualizer From 5aa5fb1ddf93c1b254ef21ac75e83e67ced0925b Mon Sep 17 00:00:00 2001 From: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> Date: Tue, 19 Dec 2023 22:51:11 +0900 Subject: [PATCH 3/9] =?UTF-8?q?refactor:=20create=20new=20`label`=20direct?= =?UTF-8?q?ory=20and=20separate=20modules=20and=20renam=E2=80=A6=20(#112)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: create new `label` directory and separate modules and rename `Label` into `SemanticLabel` Signed-off-by: ktro2828 * docs: update dead links Signed-off-by: ktro2828 --------- Signed-off-by: ktro2828 --- docs/en/perception/label.md | 2 +- docs/ja/perception/label.md | 2 +- .../perception_eval/common/label/__init__.py | 17 ++ .../common/{label.py => label/converter.py} | 162 ++---------------- .../perception_eval/common/label/types.py | 141 +++++++++++++++ .../perception_eval/common/label/utils.py | 34 ++++ .../perception_eval/common/threshold.py | 16 +- .../perception_eval/dataset/utils.py | 4 +- .../matching/objects_filter.py | 10 +- .../perception_eval/object/object2d.py | 34 ++-- .../perception_eval/object/object3d.py | 37 ++-- perception_eval/perception_eval/util/debug.py | 12 +- perception_eval/test/common/test_threshold.py | 8 +- .../test/matching/test_objects_filter.py | 24 +-- perception_eval/test/util/dummy_object.py | 30 ++-- .../test/visualization/test_eda_tool.py | 18 +- 16 files changed, 305 insertions(+), 246 deletions(-) create mode 100644 perception_eval/perception_eval/common/label/__init__.py rename perception_eval/perception_eval/common/{label.py => label/converter.py} (68%) create mode 100644 perception_eval/perception_eval/common/label/types.py create mode 100644 perception_eval/perception_eval/common/label/utils.py diff --git a/docs/en/perception/label.md b/docs/en/perception/label.md index 34abf44e..00f70e4d 100644 --- a/docs/en/perception/label.md +++ b/docs/en/perception/label.md @@ -1,6 +1,6 @@ # Object Labels -- For the details, see [perception_eval.common.label.py](../../../perception_eval/perception_eval/common/label.py) +- For the details, see [perception_eval.common.label.types.py](../../../perception_eval/perception_eval/common/label/types.py) ## `AutowareLabel` diff --git a/docs/ja/perception/label.md b/docs/ja/perception/label.md index 6a89d2af..c2d8dfe9 100644 --- a/docs/ja/perception/label.md +++ b/docs/ja/perception/label.md @@ -1,6 +1,6 @@ # オブジェクト ラベル -- 詳細は[perception_eval.common.label.py](../../../perception_eval/perception_eval/common/label.py)を参照 +- 詳細は[perception_eval.common.label.types.py](../../../perception_eval/perception_eval/common/label/types.py)を参照 ## Autoware Label diff --git a/perception_eval/perception_eval/common/label/__init__.py b/perception_eval/perception_eval/common/label/__init__.py new file mode 100644 index 00000000..7c46f87a --- /dev/null +++ b/perception_eval/perception_eval/common/label/__init__.py @@ -0,0 +1,17 @@ +from .converter import LabelConverter +from .types import AutowareLabel +from .types import CommonLabel +from .types import LabelType +from .types import SemanticLabel +from .types import TrafficLightLabel +from .utils import set_target_lists + +__all__ = ( + "LabelConverter", + "AutowareLabel", + "CommonLabel", + "LabelType", + "SemanticLabel", + "TrafficLightLabel", + "set_target_lists", +) diff --git a/perception_eval/perception_eval/common/label.py b/perception_eval/perception_eval/common/label/converter.py similarity index 68% rename from perception_eval/perception_eval/common/label.py rename to perception_eval/perception_eval/common/label/converter.py index cc42c942..f2fa2748 100644 --- a/perception_eval/perception_eval/common/label.py +++ b/perception_eval/perception_eval/common/label/converter.py @@ -15,82 +15,21 @@ from __future__ import annotations from dataclasses import dataclass -from enum import Enum import logging from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING from typing import Union from perception_eval.common.evaluation_task import EvaluationTask +from .types import AutowareLabel +from .types import SemanticLabel +from .types import TrafficLightLabel -class AutowareLabel(Enum): - """[summary] - Autoware label enum. - See https://github.com/tier4/autoware_iv_msgs/blob/main/autoware_perception_msgs/msg/object_recognition/Semantic.msg - """ - - UNKNOWN = "unknown" - CAR = "car" - TRUCK = "truck" - BUS = "bus" - BICYCLE = "bicycle" - MOTORBIKE = "motorbike" - PEDESTRIAN = "pedestrian" - ANIMAL = "animal" - - # for FP validation - FP = "false_positive" - - def __str__(self) -> str: - return self.value - - -class TrafficLightLabel(Enum): - # except of classification - TRAFFIC_LIGHT = "traffic_light" - - # classification - GREEN = "green" - RED = "red" - YELLOW = "yellow" - RED_STRAIGHT = "red_straight" - RED_LEFT = "red_left" - RED_LEFT_STRAIGHT = "red_left_straight" - RED_LEFT_DIAGONAL = "red_left_diagonal" - RED_RIGHT = "red_right" - RED_RIGHT_STRAIGHT = "red_right_straight" - RED_RIGHT_DIAGONAL = "red_right_diagonal" - YELLOW_RIGHT = "yellow_right" - - # unknown is used in both detection and classification - UNKNOWN = "unknown" - - # for FP validation - FP = "false_positive" - - def __str__(self) -> str: - return self.value - - -class CommonLabel(Enum): - UNKNOWN = (AutowareLabel.UNKNOWN, TrafficLightLabel.UNKNOWN) - FP = (AutowareLabel.FP, TrafficLightLabel.FP) - - def __eq__(self, label: LabelType) -> bool: - return label in self.value - - def __str__(self) -> str: - if self == CommonLabel.UNKNOWN: - return "unknown" - elif self == CommonLabel.FP: - return "false_positive" - else: - raise ValueError(f"Unexpected element: {self}") - - -LabelType = Union[AutowareLabel, TrafficLightLabel] +if TYPE_CHECKING: + from .types import LabelType @dataclass @@ -108,60 +47,6 @@ class LabelInfo: num: int = 0 -class Label: - """ - Attributes: - label (LabelType): Corresponding label. - name (str): Label before converted. - attributes (List[str]): List of attributes. Defaults to []. - - Args: - label (LabelType): LabelType instance. - name (str): Original label name. - attributes (List[str]): List of attributes. Defaults to []. - """ - - def __init__(self, label: LabelType, name: str, attributes: List[str] = []) -> None: - self.label: LabelType = label - self.name: str = name - self.attributes: List[str] = attributes - - def contains(self, key: str) -> bool: - """Check whether self.name contains input attribute. - - Args: - key (str): Target name or attribute. - - Returns: - bool: Indicates whether input self.name contains input attribute. - """ - assert isinstance(key, str), f"Expected type is str, but got {type(key)}" - return key in self.name or key in self.attributes - - def contains_any(self, keys: List[str]) -> bool: - assert isinstance(keys, (list, tuple)), f"Expected type is sequence, but got {type(keys)}" - return any([self.contains(key) for key in keys]) - - def is_fp(self) -> bool: - """Returns `True`, if myself is `false_positive` label. - - Returns: - bool: Whether myself is `false_positive`. - """ - return self.label == CommonLabel.FP - - def is_unknown(self) -> bool: - """Returns `True`, if myself is `unknown` label. - - Returns: - bool: Whether myself is `unknown`. - """ - return self.label == CommonLabel.UNKNOWN - - def __eq__(self, other: Label) -> bool: - return self.label == other.label - - class LabelConverter: """A class to convert string label name to LabelType instance. @@ -208,7 +93,7 @@ def __init__( else: raise ValueError(f"Unexpected `label_prefix`: {label_prefix}") - self.label_infos: List[LabelInfo] = [LabelInfo(label, name) for label, name in pair_list] + self.label_infos = [LabelInfo(label, name) for label, name in pair_list] logging.debug(f"label {self.label_infos}") @@ -216,7 +101,7 @@ def convert_label( self, name: str, attributes: List[str] = [], - ) -> Label: + ) -> SemanticLabel: """Convert label name and attributes to Label instance. Args: @@ -227,18 +112,18 @@ def convert_label( Returns: Label: Converted label. """ - return_label: Optional[Label] = None + return_label: Optional[SemanticLabel] = None for label_info in self.label_infos: if name.lower() == label_info.name: if self.count_label_number: label_info.num += 1 if return_label is not None: logging.error(f"Label {name} is already converted to {return_label}.") - return_label = Label(label_info.label, name, attributes) + return_label = SemanticLabel(label_info.label, name, attributes) break if return_label is None: logging.warning(f"Label {name} is not registered.") - return_label = Label(self.label_type.UNKNOWN, name, attributes) + return_label = SemanticLabel(self.label_type.UNKNOWN, name, attributes) return return_label def convert_name(self, name: str) -> LabelType: @@ -372,28 +257,3 @@ def _get_traffic_light_paris( (TrafficLightLabel.FP, "false_positive"), ] return pair_list - - -def set_target_lists( - target_labels: Optional[List[str]], - label_converter: LabelConverter, -) -> List[LabelType]: - """Returns a LabelType list from a list of label names in string. - - If no label is specified, returns all LabelType elements. - - Args: - target_labels (List[str]): The target class to evaluate - label_converter (LabelConverter): Label Converter class - - Returns: - List[LabelType]: LabelType instance list. - - Examples: - >>> converter = LabelConverter(False, "autoware") - >>> set_target_lists(["car", "pedestrian"], converter) - [, ] - """ - if target_labels is None or len(target_labels) == 0: - return [label for label in label_converter.label_type] - return [label_converter.convert_name(name) for name in target_labels] diff --git a/perception_eval/perception_eval/common/label/types.py b/perception_eval/perception_eval/common/label/types.py new file mode 100644 index 00000000..0b439fca --- /dev/null +++ b/perception_eval/perception_eval/common/label/types.py @@ -0,0 +1,141 @@ +# Copyright 2022 TIER IV, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from enum import Enum +from typing import List +from typing import Union + + +class AutowareLabel(Enum): + """[summary] + Autoware label enum. + See https://github.com/tier4/autoware_iv_msgs/blob/main/autoware_perception_msgs/msg/object_recognition/Semantic.msg + """ + + UNKNOWN = "unknown" + CAR = "car" + TRUCK = "truck" + BUS = "bus" + BICYCLE = "bicycle" + MOTORBIKE = "motorbike" + PEDESTRIAN = "pedestrian" + ANIMAL = "animal" + + # for FP validation + FP = "false_positive" + + def __str__(self) -> str: + return self.value + + +class TrafficLightLabel(Enum): + # except of classification + TRAFFIC_LIGHT = "traffic_light" + + # classification + GREEN = "green" + RED = "red" + YELLOW = "yellow" + RED_STRAIGHT = "red_straight" + RED_LEFT = "red_left" + RED_LEFT_STRAIGHT = "red_left_straight" + RED_LEFT_DIAGONAL = "red_left_diagonal" + RED_RIGHT = "red_right" + RED_RIGHT_STRAIGHT = "red_right_straight" + RED_RIGHT_DIAGONAL = "red_right_diagonal" + YELLOW_RIGHT = "yellow_right" + + # unknown is used in both detection and classification + UNKNOWN = "unknown" + + # for FP validation + FP = "false_positive" + + def __str__(self) -> str: + return self.value + + +class CommonLabel(Enum): + UNKNOWN = (AutowareLabel.UNKNOWN, TrafficLightLabel.UNKNOWN) + FP = (AutowareLabel.FP, TrafficLightLabel.FP) + + def __eq__(self, label: LabelType) -> bool: + return label in self.value + + def __str__(self) -> str: + if self == CommonLabel.UNKNOWN: + return "unknown" + elif self == CommonLabel.FP: + return "false_positive" + else: + raise ValueError(f"Unexpected element: {self}") + + +LabelType = Union[AutowareLabel, TrafficLightLabel] + + +class SemanticLabel: + """ + Attributes: + label (LabelType): Corresponding label. + name (str): Label before converted. + attributes (List[str]): List of attributes. Defaults to []. + + Args: + label (LabelType): LabelType instance. + name (str): Original label name. + attributes (List[str]): List of attributes. Defaults to []. + """ + + def __init__(self, label: LabelType, name: str, attributes: List[str] = []) -> None: + self.label = label + self.name = name + self.attributes = attributes + + def contains(self, key: str) -> bool: + """Check whether self.name contains input attribute. + + Args: + key (str): Target name or attribute. + + Returns: + bool: Indicates whether input self.name contains input attribute. + """ + assert isinstance(key, str), f"Expected type is str, but got {type(key)}" + return key in self.name or key in self.attributes + + def contains_any(self, keys: List[str]) -> bool: + assert isinstance(keys, (list, tuple)), f"Expected type is sequence, but got {type(keys)}" + return any([self.contains(key) for key in keys]) + + def is_fp(self) -> bool: + """Returns `True`, if myself is `false_positive` label. + + Returns: + bool: Whether myself is `false_positive`. + """ + return self.label == CommonLabel.FP + + def is_unknown(self) -> bool: + """Returns `True`, if myself is `unknown` label. + + Returns: + bool: Whether myself is `unknown`. + """ + return self.label == CommonLabel.UNKNOWN + + def __eq__(self, other: SemanticLabel) -> bool: + return self.label == other.label diff --git a/perception_eval/perception_eval/common/label/utils.py b/perception_eval/perception_eval/common/label/utils.py new file mode 100644 index 00000000..7f58d6ec --- /dev/null +++ b/perception_eval/perception_eval/common/label/utils.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .converter import LabelConverter + from .types import LabelType + + +def set_target_lists( + target_labels: Optional[List[str]], + label_converter: LabelConverter, +) -> List[LabelType]: + """Returns a LabelType list from a list of label names in string. + + If no label is specified, returns all LabelType elements. + + Args: + target_labels (List[str]): The target class to evaluate + label_converter (LabelConverter): Label Converter class + + Returns: + List[LabelType]: LabelType instance list. + + Examples: + >>> converter = LabelConverter(False, "autoware") + >>> set_target_lists(["car", "pedestrian"], converter) + [, ] + """ + if target_labels is None or len(target_labels) == 0: + return [label for label in label_converter.label_type] + return [label_converter.convert_name(name) for name in target_labels] diff --git a/perception_eval/perception_eval/common/threshold.py b/perception_eval/perception_eval/common/threshold.py index 9bca17fe..2b4677de 100644 --- a/perception_eval/perception_eval/common/threshold.py +++ b/perception_eval/perception_eval/common/threshold.py @@ -12,13 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from numbers import Real from typing import List from typing import Optional +from typing import TYPE_CHECKING from typing import Union -from perception_eval.common.label import Label -from perception_eval.common.label import LabelType +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + from perception_eval.common.label import SemanticLabel class LabelThreshold: @@ -35,11 +39,11 @@ class LabelThreshold: def __init__( self, - semantic_label: Label, + semantic_label: SemanticLabel, target_labels: Optional[List[LabelType]], ) -> None: - self.semantic_label: Label = semantic_label - self.target_labels: Optional[List[LabelType]] = target_labels + self.semantic_label = semantic_label + self.target_labels = target_labels def get_label_threshold( self, @@ -69,7 +73,7 @@ def get_label_threshold( def get_label_threshold( - semantic_label: Label, + semantic_label: SemanticLabel, target_labels: Optional[List[LabelType]], threshold_list: Optional[List[float]], ) -> Optional[float]: diff --git a/perception_eval/perception_eval/dataset/utils.py b/perception_eval/perception_eval/dataset/utils.py index f8205d85..fdf19586 100644 --- a/perception_eval/perception_eval/dataset/utils.py +++ b/perception_eval/perception_eval/dataset/utils.py @@ -42,9 +42,9 @@ from .load import FrameGroundTruth if TYPE_CHECKING: - from perception_eval.common.label import Label from perception_eval.common.label import LabelConverter from perception_eval.common.label import LabelType + from perception_eval.common.label import SemanticLabel ################################# # Dataset 3D # @@ -152,7 +152,7 @@ def _convert_nuscenes_box_to_dynamic_object( object_box: Box, unix_time: int, evaluation_task: EvaluationTask, - semantic_label: Label, + semantic_label: SemanticLabel, instance_token: str, sample_token: str, visibility: Optional[Visibility] = None, diff --git a/perception_eval/perception_eval/matching/objects_filter.py b/perception_eval/perception_eval/matching/objects_filter.py index 23e4834d..e765d7e6 100644 --- a/perception_eval/perception_eval/matching/objects_filter.py +++ b/perception_eval/perception_eval/matching/objects_filter.py @@ -32,8 +32,8 @@ from perception_eval.result.perception.perception_result import DynamicObjectWithPerceptionResult if TYPE_CHECKING: - from perception_eval.common.label import Label from perception_eval.common.label import LabelType + from perception_eval.common.label import SemanticLabel from perception_eval.matching import MatchingMode from perception_eval.object import ObjectType @@ -137,7 +137,7 @@ def filter_object_results( def filter_objects( objects: List[ObjectType], is_gt: bool, - target_labels: Optional[List[Label]] = None, + target_labels: Optional[List[SemanticLabel]] = None, ignore_attributes: Optional[List[str]] = None, max_x_position_list: Optional[List[float]] = None, max_y_position_list: Optional[List[float]] = None, @@ -212,7 +212,7 @@ def filter_objects( def get_positive_objects( object_results: List[DynamicObjectWithPerceptionResult], - target_labels: List[Label], + target_labels: List[SemanticLabel], matching_mode: Optional[MatchingMode] = None, matching_threshold_list: Optional[List[float]] = None, ) -> Tuple[List[DynamicObjectWithPerceptionResult], List[DynamicObjectWithPerceptionResult]]: @@ -267,7 +267,7 @@ def get_positive_objects( def get_negative_objects( ground_truth_objects: List[ObjectType], object_results: List[DynamicObjectWithPerceptionResult], - target_labels: List[Label], + target_labels: List[LabelType], matching_mode: Optional[MatchingMode] = None, matching_threshold_list: Optional[List[float]] = None, ) -> Tuple[List[ObjectType], List[ObjectType]]: @@ -394,7 +394,7 @@ def divide_tp_fp_objects( threshold_list=confidence_threshold_list, ) if confidence_threshold_ is not None: - is_confidence: bool = object_result.estimated_object.semantic_score > confidence_threshold_ + is_confidence = object_result.estimated_object.semantic_score > confidence_threshold_ is_correct = is_correct and is_confidence if is_correct: diff --git a/perception_eval/perception_eval/object/object2d.py b/perception_eval/perception_eval/object/object2d.py index 678ec203..c211c7b2 100644 --- a/perception_eval/perception_eval/object/object2d.py +++ b/perception_eval/perception_eval/object/object2d.py @@ -17,13 +17,16 @@ from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING import numpy as np -from perception_eval.common.label import Label -from perception_eval.common.schema import FrameID -from perception_eval.common.schema import Visibility from shapely.geometry import Polygon +if TYPE_CHECKING: + from perception_eval.common.label import SemanticLabel + from perception_eval.common.schema import FrameID + from perception_eval.common.schema import Visibility + class Roi: """Region of Interest; ROI class. @@ -131,19 +134,19 @@ def __init__( unix_time: int, frame_id: FrameID, semantic_score: float, - semantic_label: Label, + semantic_label: SemanticLabel, roi: Optional[Tuple[int, int, int, int]] = None, uuid: Optional[str] = None, visibility: Optional[Visibility] = None, ) -> None: super().__init__() - self.unix_time: int = unix_time - self.frame_id: FrameID = frame_id - self.semantic_score: float = semantic_score - self.semantic_label: Label = semantic_label - self.roi: Optional[Roi] = Roi(roi) if roi is not None else None - self.uuid: Optional[str] = uuid - self.visibility: Optional[Visibility] = visibility + self.unix_time = unix_time + self.frame_id = frame_id + self.semantic_score = semantic_score + self.semantic_label = semantic_label + self.roi = Roi(roi) if roi is not None else None + self.uuid = uuid + self.visibility = visibility def get_corners(self) -> np.ndarray: """Returns the corners of bounding box in pixel. @@ -151,8 +154,7 @@ def get_corners(self) -> np.ndarray: Returns: numpy.ndarray: (top_left, top_right, bottom_right, bottom_left), in shape (4, 2). """ - if self.roi is None: - raise RuntimeError("self.roi is None.") + assert self.roi is not None, "self.roi is None." return self.roi.corners def get_area(self) -> int: @@ -161,8 +163,7 @@ def get_area(self) -> int: Returns: int: Area of bounding box[px]. """ - if self.roi is None: - raise RuntimeError("self.roi is None.") + assert self.roi is not None, "self.roi is None." return self.roi.area def get_polygon(self) -> Polygon: @@ -171,8 +172,7 @@ def get_polygon(self) -> Polygon: Returns: Polygon: Corners as Polygon. ((x0, y0), ..., (x0, y0)) """ - if self.roi is None: - raise RuntimeError("self.roi is None.") + assert self.roi is not None, "self.roi is None." corners: List[List[float]] = self.get_corners().tolist() corners.append(corners[0]) return Polygon(corners) diff --git a/perception_eval/perception_eval/object/object3d.py b/perception_eval/perception_eval/object/object3d.py index ddfb8c5a..5f43c2db 100644 --- a/perception_eval/perception_eval/object/object3d.py +++ b/perception_eval/perception_eval/object/object3d.py @@ -18,19 +18,22 @@ from typing import List from typing import Optional from typing import Tuple +from typing import TYPE_CHECKING import numpy as np -from perception_eval.common.label import Label from perception_eval.common.point import crop_pointcloud from perception_eval.common.point import polygon_to_list from perception_eval.common.schema import FrameID -from perception_eval.common.schema import Visibility -from perception_eval.common.shape import Shape -from perception_eval.common.shape import ShapeType from perception_eval.util.math import rotation_matrix_to_euler from pyquaternion import Quaternion from shapely.geometry import Polygon +if TYPE_CHECKING: + from perception_eval.common.label import SemanticLabel + from perception_eval.common.schema import Visibility + from perception_eval.common.shape import Shape + from perception_eval.common.shape import ShapeType + class ObjectState: """Object state class. @@ -57,7 +60,7 @@ def __init__( ) -> None: self.position: Tuple[float, float, float] = position self.orientation: Quaternion = orientation - self.shape: Shape = shape + self.shape = shape self.velocity: Optional[Tuple[float, float, float]] = velocity @property @@ -138,7 +141,7 @@ def __init__( shape: Shape, velocity: Optional[Tuple[float, float, float]], semantic_score: float, - semantic_label: Label, + semantic_label: SemanticLabel, pointcloud_num: Optional[int] = None, uuid: Optional[str] = None, tracked_positions: Optional[List[Tuple[float, float, float]]] = None, @@ -153,24 +156,24 @@ def __init__( visibility: Optional[Visibility] = None, ) -> None: # detection - self.unix_time: int = unix_time - self.frame_id: FrameID = frame_id - self.state: ObjectState = ObjectState( + self.unix_time = unix_time + self.frame_id = frame_id + self.state = ObjectState( position=position, orientation=orientation, shape=shape, velocity=velocity, ) - self.semantic_score: float = semantic_score - self.semantic_label: Label = semantic_label + self.semantic_score = semantic_score + self.semantic_label = semantic_label # for detection label for case evaluation # pointcloud number inside bounding box - self.pointcloud_num: Optional[int] = pointcloud_num + self.pointcloud_num = pointcloud_num # tracking - self.uuid: Optional[str] = uuid - self.tracked_path: Optional[List[ObjectState]] = self._set_states( + self.uuid = uuid + self.tracked_path = self._set_states( positions=tracked_positions, orientations=tracked_orientations, shapes=tracked_shapes, @@ -178,15 +181,15 @@ def __init__( ) # prediction - self.predicted_confidence: Optional[float] = predicted_confidence - self.predicted_path: Optional[List[ObjectState]] = self._set_states( + self.predicted_confidence = predicted_confidence + self.predicted_path = self._set_states( positions=predicted_positions, orientations=predicted_orientations, shapes=predicted_shapes, twists=predicted_twists, ) - self.visibility: Optional[Visibility] = visibility + self.visibility = visibility def __eq__(self, other: Optional[DynamicObject]) -> bool: """Check if other equals this object. diff --git a/perception_eval/perception_eval/util/debug.py b/perception_eval/perception_eval/util/debug.py index f8bb1378..f49f9b6a 100644 --- a/perception_eval/perception_eval/util/debug.py +++ b/perception_eval/perception_eval/util/debug.py @@ -21,8 +21,8 @@ import numpy as np from perception_eval.common.label import AutowareLabel -from perception_eval.common.label import Label from perception_eval.common.label import LabelType +from perception_eval.common.label import SemanticLabel from perception_eval.common.label import TrafficLightLabel from perception_eval.common.shape import Shape from perception_eval.object import DynamicObject @@ -187,10 +187,10 @@ def get_objects_with_difference( label = AutowareLabel.UNKNOWN elif isinstance(object_.semantic_label.label, TrafficLightLabel): label = TrafficLightLabel.UNKNOWN - semantic_label = Label(label, "unknown") + semantic_label = SemanticLabel(label, "unknown") if label_candidates is not None: - label: LabelType = random.choice(label_candidates) - semantic_label = Label(label, label.value) + label = random.choice(label_candidates) + semantic_label = SemanticLabel(label, label.value) else: semantic_label = object_.semantic_label @@ -243,13 +243,13 @@ def get_objects_with_difference2d( label = AutowareLabel.UNKNOWN elif isinstance(object_.semantic_label.label, TrafficLightLabel): label = TrafficLightLabel.UNKNOWN - semantic_label = Label(label, "unknown") + semantic_label = SemanticLabel(label, "unknown") else: semantic_label = object_.semantic_label if label_candidates is not None: label: LabelType = random.choice(label_candidates) - semantic_label = Label(label, label.value) + semantic_label = SemanticLabel(label, label.value) else: semantic_label = object_.semantic_label diff --git a/perception_eval/test/common/test_threshold.py b/perception_eval/test/common/test_threshold.py index 87465b00..d6c7091a 100644 --- a/perception_eval/test/common/test_threshold.py +++ b/perception_eval/test/common/test_threshold.py @@ -17,7 +17,7 @@ import unittest from perception_eval.common.label import AutowareLabel -from perception_eval.common.label import Label +from perception_eval.common.label import SemanticLabel from perception_eval.common.threshold import check_nested_thresholds from perception_eval.common.threshold import check_thresholds from perception_eval.common.threshold import get_label_threshold @@ -45,9 +45,9 @@ def test_get_label_threshold(self): # patterns: (semantic_label, ans_threshold) patterns: List[Tuple[AutowareLabel, float]] = [ - (Label(AutowareLabel.CAR, "car", []), 0.1), - (Label(AutowareLabel.BUS, "bus", []), 0.2), - (Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), 0.3), + (SemanticLabel(AutowareLabel.CAR, "car", []), 0.1), + (SemanticLabel(AutowareLabel.BUS, "bus", []), 0.2), + (SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), 0.3), ] for semantic_label, ans_threshold in patterns: with self.subTest("Test get_label_thresholds"): diff --git a/perception_eval/test/matching/test_objects_filter.py b/perception_eval/test/matching/test_objects_filter.py index f1af3493..1358ebef 100644 --- a/perception_eval/test/matching/test_objects_filter.py +++ b/perception_eval/test/matching/test_objects_filter.py @@ -22,7 +22,7 @@ import numpy as np from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.label import Label +from perception_eval.common.label import SemanticLabel from perception_eval.matching import MatchingMode from perception_eval.matching.objects_filter import divide_objects from perception_eval.matching.objects_filter import divide_objects_to_num @@ -204,8 +204,8 @@ def test_get_positive_objects(self) -> None: np.array([(0, 0), (1, 1), (2, 2)]), np.array([(3, None)]), { - 0: Label(AutowareLabel.UNKNOWN, "unknown", []), - 3: Label(AutowareLabel.ANIMAL, "animal", []), + 0: SemanticLabel(AutowareLabel.UNKNOWN, "unknown", []), + 3: SemanticLabel(AutowareLabel.ANIMAL, "animal", []), }, ), # (3) @@ -226,7 +226,7 @@ def test_get_positive_objects(self) -> None: 1.5, np.array([(2, 0), (1, 1), (3, 3)]), np.array([(0, None)]), - {2: Label(AutowareLabel.UNKNOWN, "unknown", [])}, + {2: SemanticLabel(AutowareLabel.UNKNOWN, "unknown", [])}, ), # (5) # TP: (Est[2], GT[0]), (Est[1], GT[1]), (Est[3], GT[3]) @@ -236,7 +236,7 @@ def test_get_positive_objects(self) -> None: 1.5, np.array([(2, 0), (1, 1), (3, 3)]), np.array([(0, None)]), - {2: Label(AutowareLabel.CAR, "car", [])}, + {2: SemanticLabel(AutowareLabel.CAR, "car", [])}, ), # (6) # TP: @@ -405,8 +405,8 @@ def test_divide_tp_fp_objects(self): np.array([(0, 0), (1, 1), (2, 2)]), np.array([(3, None)]), { - "0": Label(AutowareLabel.UNKNOWN, "unknown", []), - "3": Label(AutowareLabel.ANIMAL, "animal", []), + "0": SemanticLabel(AutowareLabel.UNKNOWN, "unknown", []), + "3": SemanticLabel(AutowareLabel.ANIMAL, "animal", []), }, ), # (3) @@ -427,7 +427,7 @@ def test_divide_tp_fp_objects(self): 1.5, np.array([(2, 0), (1, 1), (3, 3)]), np.array([(0, None)]), - {"2": Label(AutowareLabel.UNKNOWN, "unknown", [])}, + {"2": SemanticLabel(AutowareLabel.UNKNOWN, "unknown", [])}, ), # (5) # TP: (Est[2], GT[0]), (Est[1], GT[1]), (Est[3], GT[3]) @@ -437,7 +437,7 @@ def test_divide_tp_fp_objects(self): 1.5, np.array([(2, 0), (1, 1), (3, 3)]), np.array([(0, None)]), - {"2": Label(AutowareLabel.CAR, "car", [])}, + {"2": SemanticLabel(AutowareLabel.CAR, "car", [])}, ), # (6) # TP: @@ -607,8 +607,8 @@ def test_get_fn_objects_for_different_label(self): 0.0, [3], { - "0": Label(AutowareLabel.UNKNOWN, "unknown", []), - "3": Label(AutowareLabel.ANIMAL, "animal", []), + "0": SemanticLabel(AutowareLabel.UNKNOWN, "unknown", []), + "3": SemanticLabel(AutowareLabel.ANIMAL, "animal", []), }, ), # TODO(Shin-kyoto): 以下のtestも通る必要あり.現在,ground truth一つに対しestimated objectが複数紐づくため通らなくなっている @@ -621,7 +621,7 @@ def test_get_fn_objects_for_different_label(self): # Given difference (1.5, 0.0), there are no fn. (1.5, 0.0, [], {}), # Given difference (1.5, 0.0) and 1 labels changed, 1 estimated_objects are fp. - (1.5, 0.0, [], {"1": Label(AutowareLabel.UNKNOWN, "unknown", [])}), + (1.5, 0.0, [], {"1": SemanticLabel(AutowareLabel.UNKNOWN, "unknown", [])}), # Given difference (2.5, 0.0), all ground_truth_objects are fn (2.5, 0.0, [0, 1, 2, 3], {}), # Given difference (2.5, 2.5), all ground_truth_objects are fn diff --git a/perception_eval/test/util/dummy_object.py b/perception_eval/test/util/dummy_object.py index 064bf41e..e0423be3 100644 --- a/perception_eval/test/util/dummy_object.py +++ b/perception_eval/test/util/dummy_object.py @@ -17,7 +17,7 @@ from typing import Tuple from perception_eval.common.label import AutowareLabel -from perception_eval.common.label import Label +from perception_eval.common.label import SemanticLabel from perception_eval.common.schema import FrameID from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType @@ -52,7 +52,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.5, 1.5, 1.5)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16) if use_unique_id else "0", ), @@ -63,7 +63,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(0.5, 0.5, 0.5)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16) if use_unique_id else "0", ), @@ -74,7 +74,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16) if use_unique_id else "1", ), @@ -87,7 +87,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), pointcloud_num=10, @@ -99,7 +99,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), pointcloud_num=10, @@ -111,7 +111,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), pointcloud_num=10, @@ -123,7 +123,7 @@ def make_dummy_data( orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.MOTORBIKE, "motorbike", []), + semantic_label=SemanticLabel(AutowareLabel.MOTORBIKE, "motorbike", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), pointcloud_num=10, @@ -148,7 +148,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), roi=(100, 100, 200, 100) if use_roi else None, uuid=token_hex(16) if use_roi else "0", ), @@ -156,7 +156,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=0.9, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), roi=(0, 0, 50, 50) if use_roi else None, uuid=token_hex(16) if use_roi else "1", ), @@ -164,7 +164,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), roi=(200, 200, 200, 100) if use_roi else None, uuid=token_hex(16) if use_roi else "2", ), @@ -174,7 +174,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=1.0, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), roi=(100, 100, 200, 100) if use_roi else None, uuid=token_hex(16) if use_roi else "0", ), @@ -182,7 +182,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=1.0, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), roi=(0, 0, 50, 50) if use_roi else None, uuid=token_hex(16) if use_roi else "1", ), @@ -190,7 +190,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=1.0, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), roi=(200, 200, 200, 100) if use_roi else None, uuid=token_hex(16) if use_roi else "2", ), @@ -198,7 +198,7 @@ def make_dummy_data2d(use_roi: bool = True) -> Tuple[List[DynamicObject2D], List unix_time=100, frame_id=frame_id, semantic_score=1.0, - semantic_label=Label(AutowareLabel.MOTORBIKE, "motorbike", []), + semantic_label=SemanticLabel(AutowareLabel.MOTORBIKE, "motorbike", []), roi=(300, 100, 50, 50) if use_roi else None, uuid=token_hex(16) if use_roi else "3", ), diff --git a/perception_eval/test/visualization/test_eda_tool.py b/perception_eval/test/visualization/test_eda_tool.py index cfebf71f..49995b0b 100644 --- a/perception_eval/test/visualization/test_eda_tool.py +++ b/perception_eval/test/visualization/test_eda_tool.py @@ -22,7 +22,7 @@ from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import AutowareLabel -from perception_eval.common.label import Label +from perception_eval.common.label import SemanticLabel from perception_eval.common.schema import FrameID from perception_eval.common.shape import Shape from perception_eval.common.shape import ShapeType @@ -62,7 +62,7 @@ class TestEDAVisualizer: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), ), @@ -308,7 +308,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), velocity=(1.0, 1.0, 1.0), uuid=token_hex(16), ), @@ -329,7 +329,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(2.0, 4.0, 2.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), @@ -341,7 +341,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(2.0, 4.0, 2.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.CAR, "car", []), + semantic_label=SemanticLabel(AutowareLabel.CAR, "car", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), @@ -353,7 +353,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 2.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), @@ -365,7 +365,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 2.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.BICYCLE, "bicycle", []), + semantic_label=SemanticLabel(AutowareLabel.BICYCLE, "bicycle", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), @@ -377,7 +377,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), @@ -389,7 +389,7 @@ class TestEDAManager: orientation=Quaternion([0.0, 0.0, 0.0, 1.0]), shape=Shape(shape_type=ShapeType.BOUNDING_BOX, size=(1.0, 1.0, 1.0)), semantic_score=0.9, - semantic_label=Label(AutowareLabel.PEDESTRIAN, "pedestrian", []), + semantic_label=SemanticLabel(AutowareLabel.PEDESTRIAN, "pedestrian", []), velocity=(1.0, 1.0, 1.0), pointcloud_num=10, uuid=token_hex(16), From e7290a4c6193d692979d93567466d93065fd83d0 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Tue, 19 Dec 2023 23:28:41 +0900 Subject: [PATCH 4/9] refactor: rename CriticalFrameConfig to PerceptionFrameConfig Signed-off-by: ktro2828 --- .../perception_eval/dataset/ground_truth.py | 4 ++-- .../perception_eval/dataset/load.py | 4 ++-- .../perception_eval/dataset/utils.py | 10 ++++----- .../manager/perception_evaluation_manager.py | 16 ++++++-------- .../manager/sensing_evaluation_manager.py | 2 +- .../result/perception/__init__.py | 4 ++-- .../perception/perception_frame_config.py | 18 ++++++--------- .../perception/perception_frame_result.py | 22 +++++++++---------- .../perception/perception_pass_fail_result.py | 18 +++++++-------- .../result/sensing/sensing_frame_result.py | 4 ++-- .../tool/perception_analyzer_base.py | 12 +++++----- .../visualization/perception_visualizer2d.py | 2 +- .../visualization/perception_visualizer3d.py | 2 +- .../visualization/sensing_visualizer.py | 2 +- .../test/perception_fp_validation_lsim.py | 12 +++++----- perception_eval/test/perception_lsim.py | 12 +++++----- perception_eval/test/perception_lsim2d.py | 16 +++++++------- 17 files changed, 76 insertions(+), 84 deletions(-) diff --git a/perception_eval/perception_eval/dataset/ground_truth.py b/perception_eval/perception_eval/dataset/ground_truth.py index 38d5a210..22026da4 100644 --- a/perception_eval/perception_eval/dataset/ground_truth.py +++ b/perception_eval/perception_eval/dataset/ground_truth.py @@ -39,13 +39,13 @@ class FrameGroundTruth: def __init__( self, unix_time: int, - frame_name: str, + frame_number: int, objects: List[ObjectType], ego2map: Optional[np.ndarray] = None, raw_data: Optional[Dict[str, np.ndarray]] = None, ) -> None: self.unix_time = unix_time - self.frame_name = frame_name + self.frame_number = frame_number self.objects = objects self.ego2map = ego2map self.raw_data = raw_data diff --git a/perception_eval/perception_eval/dataset/load.py b/perception_eval/perception_eval/dataset/load.py index a60eac8a..545ec1fe 100644 --- a/perception_eval/perception_eval/dataset/load.py +++ b/perception_eval/perception_eval/dataset/load.py @@ -138,7 +138,7 @@ def _load_dataset( evaluation_task=evaluation_task, label_converter=label_converter, frame_ids=frame_ids, - frame_name=str(n), + frame_number=n, load_raw_data=load_raw_data, ) else: @@ -150,7 +150,7 @@ def _load_dataset( evaluation_task=evaluation_task, label_converter=label_converter, frame_id=frame_ids[0], - frame_name=str(n), + frame_number=n, load_raw_data=load_raw_data, ) dataset.append(frame) diff --git a/perception_eval/perception_eval/dataset/utils.py b/perception_eval/perception_eval/dataset/utils.py index fdf19586..5b2c21ff 100644 --- a/perception_eval/perception_eval/dataset/utils.py +++ b/perception_eval/perception_eval/dataset/utils.py @@ -58,7 +58,7 @@ def _sample_to_frame( evaluation_task: EvaluationTask, label_converter: LabelConverter, frame_id: FrameID, - frame_name: str, + frame_number: int, load_raw_data: bool, ) -> FrameGroundTruth: """Load FrameGroundTruth instance from sample record. @@ -70,7 +70,7 @@ def _sample_to_frame( evaluation_tasks (EvaluationTask): The evaluation task. label_converter (LabelConverter): LabelConvertor instance. frame_id (FrameID): FrameID instance. - frame_name (str): Name of frame, number of frame is used. + frame_number (int): The number of frame, number of frame is used. load_raw_data (bool): Whether load pointcloud/image data. Raises: @@ -137,7 +137,7 @@ def _sample_to_frame( frame = FrameGroundTruth( unix_time=unix_time_, - frame_name=frame_name, + frame_number=frame_number, objects=objects_, ego2map=ego2map, raw_data=raw_data, @@ -434,7 +434,7 @@ def _sample_to_frame_2d( evaluation_task: EvaluationTask, label_converter: LabelConverter, frame_ids: List[FrameID], - frame_name: str, + frame_number: int, load_raw_data: bool, ) -> FrameGroundTruth: """Returns FrameGroundTruth constructed with DynamicObject2D. @@ -518,7 +518,7 @@ def _sample_to_frame_2d( frame = FrameGroundTruth( unix_time=unix_time, - frame_name=frame_name, + frame_number=frame_number, objects=objects_, raw_data=raw_data, ) diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index c43d576a..9dfa1d26 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -35,8 +35,8 @@ from perception_eval.config import PerceptionEvaluationConfig from perception_eval.dataset import FrameGroundTruth from perception_eval.object import ObjectType - from perception_eval.result import CriticalObjectFilterConfig from perception_eval.result import DynamicObjectWithPerceptionResult + from perception_eval.result import PerceptionFrameConfig from perception_eval.result import PerceptionPassFailConfig from perception_eval.visualization import PerceptionVisualizerType @@ -84,8 +84,8 @@ def add_frame_result( unix_time: int, ground_truth_now_frame: FrameGroundTruth, estimated_objects: List[ObjectType], - ros_critical_ground_truth_objects: List[ObjectType], - critical_object_filter_config: CriticalObjectFilterConfig, + critical_ground_truth_objects: List[ObjectType], + frame_config: PerceptionFrameConfig, frame_pass_fail_config: PerceptionPassFailConfig, ) -> PerceptionFrameResult: """Get perception result at current frame. @@ -118,7 +118,7 @@ def add_frame_result( object_results=object_results, frame_ground_truth=ground_truth_now_frame, metrics_config=self.metrics_config, - critical_object_filter_config=critical_object_filter_config, + frame_config=frame_config, frame_pass_fail_config=frame_pass_fail_config, unix_time=unix_time, target_labels=self.target_labels, @@ -126,13 +126,11 @@ def add_frame_result( if len(self.frame_results) > 0: result.evaluate_frame( - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, + critical_ground_truth_objects=critical_ground_truth_objects, previous_result=self.frame_results[-1], ) else: - result.evaluate_frame( - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, - ) + result.evaluate_frame(critical_ground_truth_objects=critical_ground_truth_objects) self.frame_results.append(result) return result @@ -204,7 +202,7 @@ def get_scene_result(self) -> MetricsScore: for label in target_labels: all_frame_results[label].append(obj_result_dict[label]) all_num_gt[label] += num_gt_dict[label] - used_frame.append(int(frame.frame_name)) + used_frame.append(frame.frame_number) # Calculate score scene_metrics_score = MetricsScore( diff --git a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py index 0a4b8c39..e15eaa48 100644 --- a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py @@ -99,7 +99,7 @@ def add_frame_result( result = SensingFrameResult( sensing_frame_config=sensing_frame_config, unix_time=unix_time, - frame_name=ground_truth_now_frame.frame_name, + frame_number=ground_truth_now_frame.frame_number, ) result.evaluate_frame( diff --git a/perception_eval/perception_eval/result/perception/__init__.py b/perception_eval/perception_eval/result/perception/__init__.py index 39ab9b32..9664edb0 100644 --- a/perception_eval/perception_eval/result/perception/__init__.py +++ b/perception_eval/perception_eval/result/perception/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. -from .perception_frame_config import CriticalObjectFilterConfig +from .perception_frame_config import PerceptionFrameConfig from .perception_frame_config import PerceptionPassFailConfig from .perception_frame_result import get_object_status from .perception_frame_result import PerceptionFrameResult @@ -21,7 +21,7 @@ from .perception_result import get_object_results __all__ = ( - "CriticalObjectFilterConfig", + "PerceptionFrameConfig", "PerceptionPassFailConfig", "get_object_status", "PerceptionFrameResult", diff --git a/perception_eval/perception_eval/result/perception/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py index 593397ef..08f4fd24 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -29,7 +29,7 @@ from perception_eval.config import PerceptionEvaluationConfig -class CriticalObjectFilterConfig: +class PerceptionFrameConfig: """[summary] Config class for critical object filter @@ -149,10 +149,9 @@ def __init__( self, evaluator_config: PerceptionEvaluationConfig, target_labels: Optional[List[str]], - matching_threshold_list: Optional[List[float]] = None, - confidence_threshold_list: Optional[List[float]] = None, + thresholds: Optional[List[float]] = None, ) -> None: - """[summary] + """ Args: evaluator_config (PerceptionEvaluationConfig): Evaluation config target_labels (List[str]): Target list. If None or empty list is specified, all labels will be evaluated. @@ -167,14 +166,11 @@ def __init__( ) num_elements: int = len(self.target_labels) - if matching_threshold_list is None: - self.matching_threshold_list = None - else: - self.matching_threshold_list: List[float] = check_thresholds(matching_threshold_list, num_elements) - if confidence_threshold_list is None: - self.confidence_threshold_list = None + + if thresholds is None: + self.thresholds = None else: - self.confidence_threshold_list: List[float] = check_thresholds(confidence_threshold_list, num_elements) + self.thresholds = check_thresholds(thresholds, num_elements) class UseCaseThresholdsError(Exception): diff --git a/perception_eval/perception_eval/result/perception/perception_frame_result.py b/perception_eval/perception_eval/result/perception/perception_frame_result.py index ed7b928b..182d64a4 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_result.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_result.py @@ -32,7 +32,7 @@ from perception_eval.metrics import MetricsScoreConfig from perception_eval.object import ObjectType - from .perception_frame_config import CriticalObjectFilterConfig + from .perception_frame_config import PerceptionFrameConfig from .perception_frame_config import PerceptionPassFailConfig from .perception_result import DynamicObjectWithPerceptionResult @@ -43,7 +43,7 @@ class PerceptionFrameResult: Attributes: object_results (List[DynamicObjectWithPerceptionResult]): Filtered object results to each estimated object. frame_ground_truth (FrameGroundTruth): Filtered ground truth of frame. - frame_name (str): The file name of frame in the datasets. + frame_number (int): The file name of frame in the datasets. unix_time (int): The unix time for frame [us]. target_labels (List[AutowareLabel]): The list of target label. metrics_score (MetricsScore): Metrics score results. @@ -64,14 +64,12 @@ def __init__( object_results: List[DynamicObjectWithPerceptionResult], frame_ground_truth: FrameGroundTruth, metrics_config: MetricsScoreConfig, - critical_object_filter_config: CriticalObjectFilterConfig, + frame_config: PerceptionFrameConfig, frame_pass_fail_config: PerceptionPassFailConfig, unix_time: int, target_labels: List[LabelType], ) -> None: - # TODO(ktro2828): rename `frame_name` into `frame_number` - # frame information - self.frame_name = frame_ground_truth.frame_name + self.frame_number = frame_ground_truth.frame_number self.unix_time = unix_time self.target_labels = target_labels @@ -81,19 +79,19 @@ def __init__( # init evaluation self.metrics_score = metrics.MetricsScore( metrics_config, - used_frame=[int(self.frame_name)], + used_frame=[int(self.frame_number)], ) self.pass_fail_result = PassFailResult( unix_time=unix_time, - frame_number=frame_ground_truth.frame_name, - critical_object_filter_config=critical_object_filter_config, + frame_number=self.frame_number, + frame_config=frame_config, frame_pass_fail_config=frame_pass_fail_config, ego2map=frame_ground_truth.ego2map, ) def evaluate_frame( self, - ros_critical_ground_truth_objects: List[ObjectType], + critical_ground_truth_objects: List[ObjectType], previous_result: Optional[PerceptionFrameResult] = None, ) -> None: """[summary] @@ -130,7 +128,7 @@ def evaluate_frame( self.pass_fail_result.evaluate( object_results=self.object_results, - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, + critical_ground_truth_objects=critical_ground_truth_objects, ) @@ -145,7 +143,7 @@ def get_object_status(frame_results: List[PerceptionFrameResult]) -> List[Ground """ status_infos: List[GroundTruthStatus] = [] for frame_result in frame_results: - frame_num: int = int(frame_result.frame_name) + frame_num = int(frame_result.frame_number) # TP for tp_object_result in frame_result.pass_fail_result.tp_object_results: if tp_object_result.ground_truth_object.uuid not in status_infos: diff --git a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index ec2c25e3..6f7b2a3f 100644 --- a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -27,7 +27,7 @@ if TYPE_CHECKING: from perception_eval.object import ObjectType - from .perception_frame_config import CriticalObjectFilterConfig + from .perception_frame_config import PerceptionFrameConfig from .perception_frame_config import PerceptionPassFailConfig from .perception_result import DynamicObjectWithPerceptionResult @@ -59,14 +59,14 @@ def __init__( self, unix_time: int, frame_number: int, - critical_object_filter_config: CriticalObjectFilterConfig, + frame_config: PerceptionFrameConfig, frame_pass_fail_config: PerceptionPassFailConfig, ego2map: Optional[np.ndarray] = None, ) -> None: self.unix_time = unix_time self.frame_number = frame_number # TODO(ktro2828): merge CriticalObjectFilterConfig and FramePassFailConfig into one - self.critical_object_filter_config = critical_object_filter_config + self.frame_config = frame_config self.frame_pass_fail_config = frame_pass_fail_config self.ego2map = ego2map @@ -79,7 +79,7 @@ def __init__( def evaluate( self, object_results: List[DynamicObjectWithPerceptionResult], - ros_critical_ground_truth_objects: List[ObjectType], + critical_ground_truth_objects: List[ObjectType], ) -> None: """Evaluate object results' pass fail. @@ -88,11 +88,11 @@ def evaluate( ros_critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects must be evaluated at current frame. """ - self.critical_ground_truth_objects = objects_filter.filter_objects( - objects=ros_critical_ground_truth_objects, + self.critical_ground_truths = objects_filter.filter_objects( + objects=critical_ground_truth_objects, is_gt=True, ego2map=self.ego2map, - **self.critical_object_filter_config.filtering_params, + **self.frame_config.filtering_params, ) self.tp_object_results, self.fp_object_results = self.__get_positive_object_results( object_results=object_results, @@ -104,7 +104,7 @@ def evaluate( object_results, self.frame_pass_fail_config.target_labels, MatchingMode.IOU2D if self.frame_pass_fail_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - self.frame_pass_fail_config.matching_threshold_list, + self.frame_pass_fail_config.thresholds, ) def get_num_success(self) -> int: @@ -157,7 +157,7 @@ def __get_positive_object_results( matching_mode=MatchingMode.IOU2D if self.frame_pass_fail_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - matching_threshold_list=self.frame_pass_fail_config.matching_threshold_list, + matching_threshold_list=self.frame_pass_fail_config.thresholds, ) # filter by critical_ground_truth_objects diff --git a/perception_eval/perception_eval/result/sensing/sensing_frame_result.py b/perception_eval/perception_eval/result/sensing/sensing_frame_result.py index b832ab4f..32bb4bc6 100644 --- a/perception_eval/perception_eval/result/sensing/sensing_frame_result.py +++ b/perception_eval/perception_eval/result/sensing/sensing_frame_result.py @@ -53,14 +53,14 @@ def __init__( self, sensing_frame_config: SensingFrameConfig, unix_time: int, - frame_name: str, + frame_number: int, ) -> None: # Config self.sensing_frame_config = sensing_frame_config # Frame information self.unix_time: int = unix_time - self.frame_name: str = frame_name + self.frame_number: int = frame_number # Containers for results self.detection_success_results: List[DynamicObjectWithSensingResult] = [] diff --git a/perception_eval/perception_eval/tool/perception_analyzer_base.py b/perception_eval/perception_eval/tool/perception_analyzer_base.py index ce6e0d3d..417a991f 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer_base.py +++ b/perception_eval/perception_eval/tool/perception_analyzer_base.py @@ -432,7 +432,7 @@ def get_metrics_score(self, frame_results: List[PerceptionFrameResult]) -> Metri for label in target_labels: scene_results[label].append(obj_results_dict[label]) scene_num_gt[label] += num_gt_dict[label] - used_frame.append(int(frame.frame_name)) + used_frame.append(frame.frame_number) metrics_score: MetricsScore = MetricsScore( config=self.config.metrics_config, @@ -487,13 +487,13 @@ def add(self, frame_results: List[PerceptionFrameResult]) -> pd.DataFrame: if len(self) > 0: concat.append(self.df) - self.__ego2maps[str(self.num_scene)][str(frame.frame_name)] = frame.frame_ground_truth.ego2map + self.__ego2maps[str(self.num_scene)][str(frame.frame_number)] = frame.frame_ground_truth.ego2map tp_df = self.format2df( frame.pass_fail_result.tp_object_results, status=MatchingStatus.TP, start=start, - frame_num=int(frame.frame_name), + frame_num=frame.frame_number, ego2map=frame.frame_ground_truth.ego2map, ) if len(tp_df) > 0: @@ -504,7 +504,7 @@ def add(self, frame_results: List[PerceptionFrameResult]) -> pd.DataFrame: frame.pass_fail_result.fp_object_results, status=MatchingStatus.FP, start=start, - frame_num=int(frame.frame_name), + frame_num=frame.frame_number, ego2map=frame.frame_ground_truth.ego2map, ) if len(fp_df) > 0: @@ -515,7 +515,7 @@ def add(self, frame_results: List[PerceptionFrameResult]) -> pd.DataFrame: frame.pass_fail_result.tn_objects, status=MatchingStatus.TN, start=start, - frame_num=int(frame.frame_name), + frame_num=frame.frame_number, ego2map=frame.frame_ground_truth.ego2map, ) if len(tn_df) > 0: @@ -526,7 +526,7 @@ def add(self, frame_results: List[PerceptionFrameResult]) -> pd.DataFrame: frame.pass_fail_result.fn_objects, status=MatchingStatus.FN, start=start, - frame_num=int(frame.frame_name), + frame_num=frame.frame_number, ego2map=frame.frame_ground_truth.ego2map, ) if len(fn_df) > 0: diff --git a/perception_eval/perception_eval/visualization/perception_visualizer2d.py b/perception_eval/perception_eval/visualization/perception_visualizer2d.py index f092bdf8..82fc8f2e 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer2d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer2d.py @@ -237,7 +237,7 @@ def visualize_frame( axes[row, col].set_axis_off() axes[row, col].set_title(f"{camera_name}") - frame_number: str = frame_result.frame_ground_truth.frame_name + frame_number: int = frame_result.frame_number self.__figure.suptitle(f"Frame: {frame_number}") # Plot objects diff --git a/perception_eval/perception_eval/visualization/perception_visualizer3d.py b/perception_eval/perception_eval/visualization/perception_visualizer3d.py index 118373bb..6de69750 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer3d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer3d.py @@ -207,7 +207,7 @@ def visualize_frame( if axes is None: axes: Axes = plt.subplot() - frame_number: str = frame_result.frame_ground_truth.frame_name + frame_number: int = frame_result.frame_number axes.set_title(f"Frame: {frame_number} ({self.config.frame_ids[0].value})") axes.set_xlabel("x [m]") axes.set_ylabel("y [m]") diff --git a/perception_eval/perception_eval/visualization/sensing_visualizer.py b/perception_eval/perception_eval/visualization/sensing_visualizer.py index 0cdd5023..de1faefd 100644 --- a/perception_eval/perception_eval/visualization/sensing_visualizer.py +++ b/perception_eval/perception_eval/visualization/sensing_visualizer.py @@ -182,7 +182,7 @@ def visualize_frame( if axes is None: axes: Axes = plt.subplot() - frame_number: str = frame_result.frame_name + frame_number: int = frame_result.frame_number axes.set_title(f"Frame: {frame_number} ({self.config.frame_ids[0].value})") axes.set_xlabel("x [m]") axes.set_ylabel("y [m]") diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 6c69ab8a..916ca538 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -24,8 +24,8 @@ from perception_eval.common.status import get_scene_rates from perception_eval.config import PerceptionEvaluationConfig from perception_eval.manager import PerceptionEvaluationManager -from perception_eval.result import CriticalObjectFilterConfig from perception_eval.result import get_object_status +from perception_eval.result import PerceptionFrameConfig from perception_eval.result import PerceptionPassFailConfig from perception_eval.util.debug import get_objects_with_difference from perception_eval.util.logger_config import configure_logger @@ -69,9 +69,9 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: # Ideally, critical GT should be obtained in each frame. # In this mock, set it as a copy of `ground_truth_now_frame`. - ros_critical_ground_truth_objects = ground_truth_now_frame.objects + critical_ground_truth_objects = ground_truth_now_frame.objects - critical_object_filter_config = CriticalObjectFilterConfig( + frame_config = PerceptionFrameConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], max_x_position_list=[100.0, 100.0, 100.0, 100.0], @@ -81,15 +81,15 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: frame_pass_fail_config = PerceptionPassFailConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], - matching_threshold_list=[2.0, 2.0, 2.0, 2.0], + thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, - critical_object_filter_config=critical_object_filter_config, + critical_ground_truth_objects=critical_ground_truth_objects, + frame_config=frame_config, frame_pass_fail_config=frame_pass_fail_config, ) self.display(frame_result) diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index 8808eb05..0880b013 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -22,7 +22,7 @@ from perception_eval.config import PerceptionEvaluationConfig from perception_eval.manager import PerceptionEvaluationManager -from perception_eval.result import CriticalObjectFilterConfig +from perception_eval.result import PerceptionFrameConfig from perception_eval.result import PerceptionFrameResult from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer3D @@ -106,12 +106,12 @@ def callback( # ros_critical_ground_truth_objects : List[DynamicObject] = custom_critical_object_filter( # ground_truth_now_frame.objects # ) - ros_critical_ground_truth_objects = ground_truth_now_frame.objects + critical_ground_truth_objects = ground_truth_now_frame.objects # 1 frameの評価 # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam - critical_object_filter_config = CriticalObjectFilterConfig( + frame_config = PerceptionFrameConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], ignore_attributes=["cycle_state.without_rider"], @@ -122,15 +122,15 @@ def callback( frame_pass_fail_config = PerceptionPassFailConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], - matching_threshold_list=[2.0, 2.0, 2.0, 2.0], + thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, - critical_object_filter_config=critical_object_filter_config, + critical_ground_truth_objects=critical_ground_truth_objects, + frame_config=frame_config, frame_pass_fail_config=frame_pass_fail_config, ) self.visualize(frame_result) diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index dba83e3e..5d7fb984 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -23,7 +23,7 @@ from perception_eval.config import PerceptionEvaluationConfig from perception_eval.manager import PerceptionEvaluationManager -from perception_eval.result import CriticalObjectFilterConfig +from perception_eval.result import PerceptionFrameConfig from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer2D from perception_eval.util.debug import get_objects_with_difference2d @@ -107,7 +107,7 @@ def callback( # ros_critical_ground_truth_objects : List[DynamicObject] = custom_critical_object_filter( # ground_truth_now_frame.objects # ) - ros_critical_ground_truth_objects = ground_truth_now_frame.objects + critical_ground_truth_objects = ground_truth_now_frame.objects # 1 frameの評価 target_labels = ( @@ -116,27 +116,27 @@ def callback( else ["car", "bicycle", "pedestrian", "motorbike"] ) ignore_attributes = ["cycle_state.without_rider"] if self.label_prefix == "autoware" else None - matching_threshold_list = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] + thresholds = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam - critical_object_filter_config: CriticalObjectFilterConfig = CriticalObjectFilterConfig( + frame_config = PerceptionFrameConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=target_labels, ignore_attributes=ignore_attributes, ) # Pass fail を決めるパラメータ - frame_pass_fail_config: PerceptionPassFailConfig = PerceptionPassFailConfig( + frame_pass_fail_config = PerceptionPassFailConfig( evaluator_config=self.evaluator.evaluator_config, target_labels=target_labels, - matching_threshold_list=matching_threshold_list, + thresholds=thresholds, ) frame_result = self.evaluator.add_frame_result( unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - ros_critical_ground_truth_objects=ros_critical_ground_truth_objects, - critical_object_filter_config=critical_object_filter_config, + critical_ground_truth_objects=critical_ground_truth_objects, + frame_config=frame_config, frame_pass_fail_config=frame_pass_fail_config, ) self.visualize(frame_result) From 32c8cdc370f6a214b62a9d288c060df58d2d621d Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Tue, 19 Dec 2023 23:42:58 +0900 Subject: [PATCH 5/9] refactor: remove `PerceptionPassFailConfig` Signed-off-by: ktro2828 --- .../manager/perception_evaluation_manager.py | 8 +-- .../result/perception/__init__.py | 2 - .../perception/perception_frame_config.py | 55 +++---------------- .../perception/perception_frame_result.py | 3 - .../perception/perception_pass_fail_result.py | 30 ++++------ .../test/perception_fp_validation_lsim.py | 7 --- perception_eval/test/perception_lsim.py | 7 --- perception_eval/test/perception_lsim2d.py | 7 --- 8 files changed, 21 insertions(+), 98 deletions(-) diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 9dfa1d26..91408795 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -37,7 +37,6 @@ from perception_eval.object import ObjectType from perception_eval.result import DynamicObjectWithPerceptionResult from perception_eval.result import PerceptionFrameConfig - from perception_eval.result import PerceptionPassFailConfig from perception_eval.visualization import PerceptionVisualizerType @@ -86,7 +85,6 @@ def add_frame_result( estimated_objects: List[ObjectType], critical_ground_truth_objects: List[ObjectType], frame_config: PerceptionFrameConfig, - frame_pass_fail_config: PerceptionPassFailConfig, ) -> PerceptionFrameResult: """Get perception result at current frame. @@ -101,10 +99,9 @@ def add_frame_result( ground_truth_now_frame (FrameGroundTruth): FrameGroundTruth instance that has the closest timestamp with `unix_time`. estimated_objects (List[ObjectType]): Estimated objects list. - ros_critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects filtered by ROS + critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects filtered by ROS node to evaluate pass fail result. - critical_object_filter_config (CriticalObjectFilterConfig): Parameter config to filter objects. - frame_pass_fail_config (PerceptionPassFailConfig):Parameter config to evaluate pass/fail. + frame_config (PerceptionFrameConfig): Parameter config for frame. Returns: PerceptionFrameResult: Evaluation result. @@ -119,7 +116,6 @@ def add_frame_result( frame_ground_truth=ground_truth_now_frame, metrics_config=self.metrics_config, frame_config=frame_config, - frame_pass_fail_config=frame_pass_fail_config, unix_time=unix_time, target_labels=self.target_labels, ) diff --git a/perception_eval/perception_eval/result/perception/__init__.py b/perception_eval/perception_eval/result/perception/__init__.py index 9664edb0..199dab14 100644 --- a/perception_eval/perception_eval/result/perception/__init__.py +++ b/perception_eval/perception_eval/result/perception/__init__.py @@ -14,7 +14,6 @@ from .perception_frame_config import PerceptionFrameConfig -from .perception_frame_config import PerceptionPassFailConfig from .perception_frame_result import get_object_status from .perception_frame_result import PerceptionFrameResult from .perception_result import DynamicObjectWithPerceptionResult @@ -22,7 +21,6 @@ __all__ = ( "PerceptionFrameConfig", - "PerceptionPassFailConfig", "get_object_status", "PerceptionFrameResult", "DynamicObjectWithPerceptionResult", diff --git a/perception_eval/perception_eval/result/perception/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py index 08f4fd24..5b5ea011 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -20,17 +20,15 @@ from typing import Optional from typing import TYPE_CHECKING -from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import check_thresholds if TYPE_CHECKING: - from perception_eval.common.label import LabelType from perception_eval.config import PerceptionEvaluationConfig class PerceptionFrameConfig: - """[summary] + """ Config class for critical object filter Attributes: @@ -63,9 +61,9 @@ def __init__( min_point_numbers: Optional[List[int]] = None, confidence_threshold_list: Optional[List[float]] = None, target_uuids: Optional[List[str]] = None, + thresholds: Optional[List[float]] = None, ) -> None: - """[summary] - + """ Args: evaluator_config (PerceptionEvaluationConfig): Evaluation config target_labels (List[str]): The list of target label. @@ -83,11 +81,10 @@ def __init__( The list of confidence threshold for each label. Defaults to None. target_uuids (Optional[List[str]]): The list of target uuid. Defaults to None. """ - self.target_labels: List[LabelType] = set_target_lists( - target_labels, - evaluator_config.label_converter, - ) - self.ignore_attributes: Optional[List[str]] = ignore_attributes + self.evaluation_task = evaluator_config.evaluation_task + + self.target_labels = set_target_lists(target_labels, evaluator_config.label_converter) + self.ignore_attributes = ignore_attributes num_elements: int = len(self.target_labels) if max_x_position_list and max_y_position_list: @@ -132,47 +129,9 @@ def __init__( "target_uuids": self.target_uuids, } - -class PerceptionPassFailConfig: - """[summary] - Config filter for pass fail to frame result - - Attributes: - self.evaluation_task (EvaluationTask): Evaluation task. - self.target_labels (List[str]): The list of target label. - self.matching_distance_list (Optional[List[float]]): The threshold list for Pass/Fail. - For 2D evaluation, IOU2D, for 3D evaluation, PLANEDISTANCE will be used. - self.confidence_threshold_list (Optional[List[float]]): The list of confidence threshold. - """ - - def __init__( - self, - evaluator_config: PerceptionEvaluationConfig, - target_labels: Optional[List[str]], - thresholds: Optional[List[float]] = None, - ) -> None: - """ - Args: - evaluator_config (PerceptionEvaluationConfig): Evaluation config - target_labels (List[str]): Target list. If None or empty list is specified, all labels will be evaluated. - matching_threshold_list (List[float]): The threshold list for Pass/Fail. - For 2D evaluation, IOU2D, for 3D evaluation, PLANEDISTANCE will be used. Defaults to None. - confidence_threshold_list (Optional[List[float]]): The list of confidence threshold. Defaults to None. - """ - self.evaluation_task: EvaluationTask = evaluator_config.evaluation_task - self.target_labels: List[LabelType] = set_target_lists( - target_labels, - evaluator_config.label_converter, - ) - num_elements: int = len(self.target_labels) if thresholds is None: self.thresholds = None else: self.thresholds = check_thresholds(thresholds, num_elements) - - -class UseCaseThresholdsError(Exception): - def __init__(self, message) -> None: - super().__init__(message) diff --git a/perception_eval/perception_eval/result/perception/perception_frame_result.py b/perception_eval/perception_eval/result/perception/perception_frame_result.py index 182d64a4..ce862f80 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_result.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_result.py @@ -33,7 +33,6 @@ from perception_eval.object import ObjectType from .perception_frame_config import PerceptionFrameConfig - from .perception_frame_config import PerceptionPassFailConfig from .perception_result import DynamicObjectWithPerceptionResult @@ -65,7 +64,6 @@ def __init__( frame_ground_truth: FrameGroundTruth, metrics_config: MetricsScoreConfig, frame_config: PerceptionFrameConfig, - frame_pass_fail_config: PerceptionPassFailConfig, unix_time: int, target_labels: List[LabelType], ) -> None: @@ -85,7 +83,6 @@ def __init__( unix_time=unix_time, frame_number=self.frame_number, frame_config=frame_config, - frame_pass_fail_config=frame_pass_fail_config, ego2map=frame_ground_truth.ego2map, ) diff --git a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index 6f7b2a3f..f19bad5b 100644 --- a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -28,7 +28,6 @@ from perception_eval.object import ObjectType from .perception_frame_config import PerceptionFrameConfig - from .perception_frame_config import PerceptionPassFailConfig from .perception_result import DynamicObjectWithPerceptionResult @@ -36,10 +35,7 @@ class PassFailResult: """Class for keeping TP/FP/TN/FP object results and GT objects for critical GT objects. Attributes: - critical_object_filter_config (CriticalObjectFilterConfig): Critical object filter config. - frame_pass_fail_config (PerceptionPassFailConfig): Frame pass fail config. - critical_ground_truth_objects (Optional[List[DynamicObject]]): Critical ground truth objects - must be evaluated at current frame. + frame_config (PerceptionFrameConfig): Critical object filter config. tn_objects (List[ObjectType]): TN ground truth objects list. fn_objects (List[ObjectType]): FN ground truth objects list. fp_object_results (List[DynamicObjectWithPerceptionResult]): FP object results list. @@ -49,7 +45,6 @@ class PassFailResult: unix_time (int): UNIX timestamp. frame_number (int): The Number of frame. critical_object_filter_config (CriticalObjectFilterConfig): Critical object filter config. - frame_pass_fail_config (PerceptionPassFailConfig): Frame pass fail config. frame_id (str): `base_link` or `map`. ego2map (Optional[numpy.ndarray]): Array of 4x4 matrix to transform coordinates from ego to map. Defaults to None. @@ -60,14 +55,11 @@ def __init__( unix_time: int, frame_number: int, frame_config: PerceptionFrameConfig, - frame_pass_fail_config: PerceptionPassFailConfig, ego2map: Optional[np.ndarray] = None, ) -> None: self.unix_time = unix_time self.frame_number = frame_number - # TODO(ktro2828): merge CriticalObjectFilterConfig and FramePassFailConfig into one self.frame_config = frame_config - self.frame_pass_fail_config = frame_pass_fail_config self.ego2map = ego2map self.critical_ground_truth_objects: List[ObjectType] = [] @@ -88,7 +80,7 @@ def evaluate( ros_critical_ground_truth_objects (List[ObjectType]): Critical ground truth objects must be evaluated at current frame. """ - self.critical_ground_truths = objects_filter.filter_objects( + critical_ground_truth_objects = objects_filter.filter_objects( objects=critical_ground_truth_objects, is_gt=True, ego2map=self.ego2map, @@ -96,17 +88,19 @@ def evaluate( ) self.tp_object_results, self.fp_object_results = self.__get_positive_object_results( object_results=object_results, - critical_ground_truth_objects=self.critical_ground_truth_objects, + critical_ground_truth_objects=critical_ground_truth_objects, ) self.tn_objects, self.fn_objects = objects_filter.get_negative_objects( - self.critical_ground_truth_objects, + critical_ground_truth_objects, object_results, - self.frame_pass_fail_config.target_labels, - MatchingMode.IOU2D if self.frame_pass_fail_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - self.frame_pass_fail_config.thresholds, + self.frame_config.target_labels, + MatchingMode.IOU2D if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, + self.frame_config.thresholds, ) + self.critical_ground_truth_objects = critical_ground_truth_objects + def get_num_success(self) -> int: """Returns the number of success. @@ -153,11 +147,11 @@ def __get_positive_object_results( """ tp_object_results, fp_object_results = objects_filter.get_positive_objects( object_results=object_results, - target_labels=self.frame_pass_fail_config.target_labels, + target_labels=self.frame_config.target_labels, matching_mode=MatchingMode.IOU2D - if self.frame_pass_fail_config.evaluation_task.is_2d() + if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - matching_threshold_list=self.frame_pass_fail_config.thresholds, + matching_threshold_list=self.frame_config.thresholds, ) # filter by critical_ground_truth_objects diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 916ca538..02680c8d 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -26,7 +26,6 @@ from perception_eval.manager import PerceptionEvaluationManager from perception_eval.result import get_object_status from perception_eval.result import PerceptionFrameConfig -from perception_eval.result import PerceptionPassFailConfig from perception_eval.util.debug import get_objects_with_difference from perception_eval.util.logger_config import configure_logger @@ -76,11 +75,6 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: target_labels=["car", "bicycle", "pedestrian", "motorbike"], max_x_position_list=[100.0, 100.0, 100.0, 100.0], max_y_position_list=[100.0, 100.0, 100.0, 100.0], - ) - - frame_pass_fail_config = PerceptionPassFailConfig( - evaluator_config=self.evaluator.evaluator_config, - target_labels=["car", "bicycle", "pedestrian", "motorbike"], thresholds=[2.0, 2.0, 2.0, 2.0], ) @@ -90,7 +84,6 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: estimated_objects=estimated_objects, critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, - frame_pass_fail_config=frame_pass_fail_config, ) self.display(frame_result) diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index 0880b013..d9c408d0 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -24,7 +24,6 @@ from perception_eval.manager import PerceptionEvaluationManager from perception_eval.result import PerceptionFrameConfig from perception_eval.result import PerceptionFrameResult -from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer3D from perception_eval.util.debug import format_class_for_log from perception_eval.util.debug import get_objects_with_difference @@ -117,11 +116,6 @@ def callback( ignore_attributes=["cycle_state.without_rider"], max_x_position_list=[30.0, 30.0, 30.0, 30.0], max_y_position_list=[30.0, 30.0, 30.0, 30.0], - ) - # Pass fail を決めるパラメータ - frame_pass_fail_config = PerceptionPassFailConfig( - evaluator_config=self.evaluator.evaluator_config, - target_labels=["car", "bicycle", "pedestrian", "motorbike"], thresholds=[2.0, 2.0, 2.0, 2.0], ) @@ -131,7 +125,6 @@ def callback( estimated_objects=estimated_objects, critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, - frame_pass_fail_config=frame_pass_fail_config, ) self.visualize(frame_result) diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 5d7fb984..5c8c8885 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -24,7 +24,6 @@ from perception_eval.config import PerceptionEvaluationConfig from perception_eval.manager import PerceptionEvaluationManager from perception_eval.result import PerceptionFrameConfig -from perception_eval.result import PerceptionPassFailConfig from perception_eval.tool import PerceptionAnalyzer2D from perception_eval.util.debug import get_objects_with_difference2d from perception_eval.util.logger_config import configure_logger @@ -123,11 +122,6 @@ def callback( evaluator_config=self.evaluator.evaluator_config, target_labels=target_labels, ignore_attributes=ignore_attributes, - ) - # Pass fail を決めるパラメータ - frame_pass_fail_config = PerceptionPassFailConfig( - evaluator_config=self.evaluator.evaluator_config, - target_labels=target_labels, thresholds=thresholds, ) @@ -137,7 +131,6 @@ def callback( estimated_objects=estimated_objects, critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, - frame_pass_fail_config=frame_pass_fail_config, ) self.visualize(frame_result) From 03ec74719df5e90bc3509a615c99c5bb2f138a88 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Thu, 28 Dec 2023 14:22:19 +0900 Subject: [PATCH 6/9] feat: define classes for config parameters Signed-off-by: ktro2828 --- ...nfig_base.py => evaluation_config_base.py} | 55 ++++---- .../perception_eval/config/params/__init__.py | 8 ++ .../config/params/filter_param.py | 113 ++++++++++++++++ .../config/params/label_param.py | 25 ++++ .../config/params/metrics_param.py | 78 +++++++++++ .../config/perception_evaluation_config.py | 126 ++---------------- .../config/sensing_evaluation_config.py | 47 ++----- ...ger_base.py => evaluation_manager_base.py} | 38 +++--- .../manager/perception_evaluation_manager.py | 44 +++--- .../manager/sensing_evaluation_manager.py | 23 ++-- .../metrics/metrics_score_config.py | 46 ++++--- .../tool/perception_analyzer2d.py | 4 +- .../tool/perception_analyzer3d.py | 20 +-- .../tool/perception_analyzer3dfield.py | 6 +- .../tool/perception_analyzer_base.py | 4 +- .../visualization/perception_visualizer3d.py | 6 +- .../test_perception_evaluation_config.py | 6 +- .../config/test_sensing_evaluation_config.py | 6 +- .../test/metrics/test_metrics_score_config.py | 62 ++++++--- .../test/perception_fp_validation_lsim.py | 6 +- perception_eval/test/perception_lsim.py | 16 +-- perception_eval/test/perception_lsim2d.py | 24 ++-- perception_eval/test/sensing_lsim.py | 8 +- 23 files changed, 441 insertions(+), 330 deletions(-) rename perception_eval/perception_eval/config/{_evaluation_config_base.py => evaluation_config_base.py} (77%) create mode 100644 perception_eval/perception_eval/config/params/__init__.py create mode 100644 perception_eval/perception_eval/config/params/filter_param.py create mode 100644 perception_eval/perception_eval/config/params/label_param.py create mode 100644 perception_eval/perception_eval/config/params/metrics_param.py rename perception_eval/perception_eval/manager/{_evaluation_manager_base.py => evaluation_manager_base.py} (79%) diff --git a/perception_eval/perception_eval/config/_evaluation_config_base.py b/perception_eval/perception_eval/config/evaluation_config_base.py similarity index 77% rename from perception_eval/perception_eval/config/_evaluation_config_base.py rename to perception_eval/perception_eval/config/evaluation_config_base.py index 7138eab0..70154b04 100644 --- a/perception_eval/perception_eval/config/_evaluation_config_base.py +++ b/perception_eval/perception_eval/config/evaluation_config_base.py @@ -29,13 +29,19 @@ from perception_eval.common.evaluation_task import set_task from perception_eval.common.label import LabelConverter +from perception_eval.common.label import set_target_lists from perception_eval.common.schema import FrameID +from .params import LabelParam + if TYPE_CHECKING: from perception_eval.common.evaluation_task import EvaluationTask + from .params import FilterParamType + from .params import MetricsParamType + -class _EvaluationConfigBase(ABC): +class EvaluationConfigBase(ABC): """Abstract base class for evaluation config Directory structure to save log and visualization result is following @@ -82,24 +88,23 @@ def __init__( dataset_paths: List[str], frame_id: Union[str, Sequence[str]], result_root_directory: str, - evaluation_config_dict: Dict[str, Any], + config_dict: Dict[str, Any], load_raw_data: bool = False, ) -> None: super().__init__() # Check tasks are supported - self.evaluation_task: EvaluationTask = self._check_tasks(evaluation_config_dict) - self.evaluation_config_dict: Dict[str, Any] = evaluation_config_dict + self.evaluation_task: EvaluationTask = self._check_tasks(config_dict) # Labels - self.label_params = self._extract_label_params(evaluation_config_dict) + self.label_param = LabelParam.from_dict(config_dict) self.label_converter = LabelConverter( self.evaluation_task, - self.label_params["merge_similar_labels"], - self.label_params["label_prefix"], - self.label_params["count_label_number"], + merge_similar_labels=self.label_param.merge_similar_labels, + label_prefix=self.label_param.label_prefix, + count_label_number=self.label_param.count_label_number, ) - - self.filtering_params, self.metrics_params = self._extract_params(evaluation_config_dict) + self.target_labels = set_target_lists(config_dict.get("target_labels"), self.label_converter) + self.filter_param, self.metrics_param = self._extract_params(config_dict) # dataset self.dataset_paths: List[str] = dataset_paths @@ -126,7 +131,7 @@ def __init__( def support_tasks(self) -> List[str]: return self._support_tasks - def _check_tasks(self, evaluation_config_dict: Dict[str, Any]) -> EvaluationTask: + def _check_tasks(self, cfg: Dict[str, Any]) -> EvaluationTask: """Check if specified tasks are supported. Args: @@ -138,32 +143,20 @@ def _check_tasks(self, evaluation_config_dict: Dict[str, Any]) -> EvaluationTask Raises: ValueError: If the keys of input config are unsupported. """ - task: str = evaluation_config_dict["evaluation_task"] - if task not in self.support_tasks: - raise ValueError(f"Unsupported task: {task}\nSupported tasks: {self.support_tasks}") + task_name: str = cfg["evaluation_task"] + if task_name not in self.support_tasks: + raise ValueError(f"Unsupported task: {task_name}\nSupported tasks: {self.support_tasks}") + return set_task(task_name) - # evaluation task - evaluation_task: EvaluationTask = set_task(task) - return evaluation_task - - @staticmethod @abstractmethod - def _extract_label_params(evaluation_config_dict: Dict[str, Any]) -> Dict[str, Any]: - pass - - @abstractmethod - def _extract_params( - self, - evaluation_config_dict: Dict[str, Any], - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """Extract filtering and metrics parameters from evaluation config. + def _extract_params(self, cfg: Dict[str, Any]) -> Tuple[FilterParamType, MetricsParamType]: + """ Args: - evaluation_config_dict (Dict[str, Any]) + evaluation_config_dict (Dict[str, Any]): _description_ Returns: - filter_params (Dict[str, Any]): filtering parameters. - metrics_params (Dict[str, Any]): metrics parameters. + Tuple[FilterParamType, MetricsParamType]: _description_ """ pass diff --git a/perception_eval/perception_eval/config/params/__init__.py b/perception_eval/perception_eval/config/params/__init__.py new file mode 100644 index 00000000..20bc855e --- /dev/null +++ b/perception_eval/perception_eval/config/params/__init__.py @@ -0,0 +1,8 @@ +from typing import Union + +from .filter_param import * # noqa +from .label_param import * # noqa +from .metrics_param import * # noqa + +FilterParamType = Union[PerceptionFilterParam, SensingFilterParam] # noqa +MetricsParamType = Union[PerceptionMetricsParam, SensingMetricsParam] # noqa diff --git a/perception_eval/perception_eval/config/params/filter_param.py b/perception_eval/perception_eval/config/params/filter_param.py new file mode 100644 index 00000000..e0056f5f --- /dev/null +++ b/perception_eval/perception_eval/config/params/filter_param.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +from abc import ABC +from abc import abstractmethod +from dataclasses import asdict +from dataclasses import dataclass +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +from perception_eval.common.evaluation_task import EvaluationTask +from perception_eval.common.threshold import set_thresholds + +if TYPE_CHECKING: + from perception_eval.common.label import LabelType + +__all__ = ("PerceptionFilterParam", "SensingFilterParam") + + +class FilterParamBase(ABC): + @classmethod + @abstractmethod + def from_dict( + cls, + cfg: Dict[str, Any], + evaluation_task: EvaluationTask, + target_labels: List[LabelType], + ) -> FilterParamBase: + pass + + def as_dict(self) -> Dict[str, Any]: + return asdict(self) + + +@dataclass +class PerceptionFilterParam(FilterParamBase): + evaluation_task: EvaluationTask + target_labels: List[LabelType] + max_x_position_list: Optional[List[float]] = None + max_y_position_list: Optional[List[float]] = None + min_distance_list: Optional[List[float]] = None + max_distance_list: Optional[List[float]] = None + min_point_numbers: Optional[List[float]] = None + confidence_threshold_list: Optional[List[float]] = None + target_uuids: Optional[List[str]] = None + ignore_attributes: Optional[List[str]] = None + + @classmethod + def from_dict( + cls, + cfg: Dict[str, Any], + evaluation_task: EvaluationTask, + target_labels: List[LabelType], + ) -> PerceptionFilterParam: + max_x_position: Optional[float] = cfg.get("max_x_position") + max_y_position: Optional[float] = cfg.get("max_y_position") + max_distance: Optional[float] = cfg.get("max_distance") + min_distance: Optional[float] = cfg.get("min_distance") + + num_elements: int = len(target_labels) + max_x_position_list = None + max_y_position_list = None + min_distance_list = None + max_distance_list = None + if max_x_position and max_y_position: + max_x_position_list: List[float] = set_thresholds(max_x_position, num_elements, False) + max_y_position_list: List[float] = set_thresholds(max_y_position, num_elements, False) + elif max_distance and min_distance: + max_distance_list: List[float] = set_thresholds(max_distance, num_elements, False) + min_distance_list: List[float] = [min_distance] * len(target_labels) + elif not evaluation_task.is_2d(): + raise RuntimeError("Either max x/y position or max/min distance should be specified") + + min_point_numbers: Optional[List[int]] = cfg.get("min_point_numbers") + if min_point_numbers is not None: + min_point_numbers = set_thresholds(min_point_numbers, num_elements, False) + + if evaluation_task == EvaluationTask.DETECTION and min_point_numbers is None: + raise RuntimeError("In detection task, min point numbers must be specified") + + confidence_threshold: Optional[float] = cfg.get("confidence_threshold") + if confidence_threshold is not None: + confidence_threshold_list: List[float] = set_thresholds(confidence_threshold, num_elements, False) + else: + confidence_threshold_list = None + + target_uuids: Optional[List[str]] = cfg.get("target_uuids") + ignore_attributes: Optional[List[str]] = cfg.get("ignore_attributes") + + return cls( + evaluation_task, + target_labels, + max_x_position_list, + max_y_position_list, + min_distance_list, + max_distance_list, + min_point_numbers, + confidence_threshold_list, + target_uuids, + ignore_attributes, + ) + + +@dataclass +class SensingFilterParam(FilterParamBase): + target_uuids: Optional[List[str]] = None + + @classmethod + def from_dict(cls, cfg: Dict[str, Any], **kwargs) -> SensingFilterParam: + target_uuids: Optional[List[str]] = cfg.get("target_uuids") + return cls(target_uuids) diff --git a/perception_eval/perception_eval/config/params/label_param.py b/perception_eval/perception_eval/config/params/label_param.py new file mode 100644 index 00000000..58fa601c --- /dev/null +++ b/perception_eval/perception_eval/config/params/label_param.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from dataclasses import asdict +from dataclasses import dataclass +from typing import Any +from typing import Dict + +__all__ = ("LabelParam",) + + +@dataclass +class LabelParam: + label_prefix: str + merge_similar_labels: bool = False + count_label_number: bool = True + + @classmethod + def from_dict(cls, cfg: Dict[str, Any]) -> LabelParam: + label_prefix: str = cfg.get("label_prefix", "autoware") + merge_similar_labels: bool = cfg.get("merge_similar_labels", False) + count_label_number: bool = cfg.get("count_label_number", True) + return cls(label_prefix, merge_similar_labels, count_label_number) + + def as_dict(self) -> Dict[str, Any]: + return asdict(self) diff --git a/perception_eval/perception_eval/config/params/metrics_param.py b/perception_eval/perception_eval/config/params/metrics_param.py new file mode 100644 index 00000000..617b43d7 --- /dev/null +++ b/perception_eval/perception_eval/config/params/metrics_param.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from abc import ABC +from abc import abstractmethod +from dataclasses import asdict +from dataclasses import dataclass +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +if TYPE_CHECKING: + from perception_eval.common.evaluation_task import EvaluationTask + from perception_eval.common.label import LabelType + + +__all__ = ("PerceptionMetricsParam", "SensingMetricsParam") + + +class MetricsParamBase(ABC): + @classmethod + @abstractmethod + def from_dict( + cls, + cfg: Dict[str, Any], + evaluation_task: EvaluationTask, + target_labels: List[LabelType], + ) -> MetricsParamBase: + pass + + def as_dict(self) -> Dict[str, Any]: + return asdict(self) + + +@dataclass +class PerceptionMetricsParam(MetricsParamBase): + evaluation_task: EvaluationTask + target_labels: List[LabelType] + center_distance_thresholds: Optional[Union[List[float], List[List[float]]]] = None + plane_distance_thresholds: Optional[Union[List[float], List[List[float]]]] = None + iou_2d_thresholds: Optional[Union[List[float], List[List[float]]]] = None + iou_3d_thresholds: Optional[Union[List[float], List[List[float]]]] = None + + @classmethod + def from_dict( + cls, + cfg: Dict[str, Any], + evaluation_task: EvaluationTask, + target_labels: List[LabelType], + ) -> MetricsParamBase: + center_distance_thresholds = cfg.get("center_distance_thresholds") + plane_distance_thresholds = cfg.get("plane_distance_thresholds") + iou_2d_thresholds = cfg.get("iou_2d_thresholds") + iou_3d_thresholds = cfg.get("iou_3d_thresholds") + return cls( + evaluation_task, + target_labels, + center_distance_thresholds, + plane_distance_thresholds, + iou_2d_thresholds, + iou_3d_thresholds, + ) + + +@dataclass +class SensingMetricsParam(MetricsParamBase): + box_scale_0m: float = 1.0 + box_scale_100m: float = 1.0 + min_points_threshold: int = 1 + + @classmethod + def from_dict(cls, cfg: Dict[str, Any], **kwargs) -> MetricsParamBase: + box_scale_0m = cfg.get("box_scale_0m", 1.0) + box_scale_100m = cfg.get("box_scale_100m", 1.0) + min_point_numbers = cfg.get("min_points_threshold", 1) + return cls(box_scale_0m, box_scale_100m, min_point_numbers) diff --git a/perception_eval/perception_eval/config/perception_evaluation_config.py b/perception_eval/perception_eval/config/perception_evaluation_config.py index ccc010d9..d6ea1688 100644 --- a/perception_eval/perception_eval/config/perception_evaluation_config.py +++ b/perception_eval/perception_eval/config/perception_evaluation_config.py @@ -15,24 +15,18 @@ from typing import Any from typing import Dict from typing import List -from typing import Optional from typing import Sequence from typing import Tuple -from typing import TYPE_CHECKING from typing import Union -from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import set_target_lists -from perception_eval.common.threshold import set_thresholds from perception_eval.metrics import MetricsScoreConfig -from ._evaluation_config_base import _EvaluationConfigBase +from .evaluation_config_base import EvaluationConfigBase +from .params import PerceptionFilterParam +from .params import PerceptionMetricsParam -if TYPE_CHECKING: - from perception_eval.common.label import LabelType - -class PerceptionEvaluationConfig(_EvaluationConfigBase): +class PerceptionEvaluationConfig(EvaluationConfigBase): """Configuration class for perception evaluation. Directory structure to save log and visualization result is following @@ -59,7 +53,7 @@ class PerceptionEvaluationConfig(_EvaluationConfigBase): dataset_paths (List[str]): Dataset paths list. frame_id (Union[str, Sequence[str]]): FrameID(s) in string, where objects are with respect. result_root_directory (str): Directory path to save result. - evaluation_config_dict (Dict[str, Dict[str, Any]]): Dict that items are evaluation config for each task. + config_dict (Dict[str, Dict[str, Any]]): Dict that items are evaluation config for each task. load_raw_data (bool): Whether load pointcloud/image data. Defaults to False. """ @@ -79,116 +73,20 @@ def __init__( dataset_paths: List[str], frame_id: Union[str, Sequence[str]], result_root_directory: str, - evaluation_config_dict: Dict[str, Any], + config_dict: Dict[str, Any], load_raw_data: bool = False, ) -> None: super().__init__( dataset_paths=dataset_paths, frame_id=frame_id, result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=load_raw_data, ) - self.metrics_config = MetricsScoreConfig(self.evaluation_task, **self.metrics_params) - - @staticmethod - def _extract_label_params(evaluation_config_dict: Dict[str, Any]) -> Dict[str, Any]: - e_cfg = evaluation_config_dict.copy() - l_params: Dict[str, Any] = { - "label_prefix": e_cfg["label_prefix"], - "merge_similar_labels": e_cfg.get("merge_similar_labels", False), - "allow_matching_unknown": e_cfg.get("allow_matching_unknown", False), - "count_label_number": e_cfg.get("count_label_number", True), - } - return l_params - - def _extract_params( - self, - evaluation_config_dict: Dict[str, Any], - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """Extract and divide parameters from evaluation_config_dict into filtering and metrics parameters. - - Args: - evaluation_config_dict (Dict[str, Any]): Dict that items are evaluation config for each task. - - Returns: - f_params (Dict[str, Any]): Parameters for filtering. - m_params (Dict[str, Any]): Parameters for metrics. - l_params (Dict[str, Any]): Parameters for label. - """ - e_cfg = evaluation_config_dict.copy() - - # Covert labels to autoware labels for Metrics - target_labels: List[LabelType] = set_target_lists( - e_cfg.get("target_labels"), - self.label_converter, - ) - self.target_labels: List[LabelType] = target_labels - - max_x_position: Optional[float] = e_cfg.get("max_x_position") - max_y_position: Optional[float] = e_cfg.get("max_y_position") - max_distance: Optional[float] = e_cfg.get("max_distance") - min_distance: Optional[float] = e_cfg.get("min_distance") - - num_elements: int = len(target_labels) - if max_x_position and max_y_position: - max_x_position_list: List[float] = set_thresholds(max_x_position, num_elements, False) - max_y_position_list: List[float] = set_thresholds(max_y_position, num_elements, False) - max_distance_list = None - min_distance_list = None - elif max_distance and min_distance: - max_distance_list: List[float] = set_thresholds(max_distance, num_elements, False) - min_distance_list: List[float] = [min_distance] * len(target_labels) - max_x_position_list = None - max_y_position_list = None - elif self.evaluation_task.is_2d(): - max_x_position_list = None - max_y_position_list = None - max_distance_list = None - min_distance_list = None - else: - raise RuntimeError("Either max x/y position or max/min distance should be specified") - - max_matchable_radii: Optional[Union[float, List[float]]] = e_cfg.get("max_matchable_radii") - if max_matchable_radii is not None: - max_matchable_radii: List[float] = set_thresholds(max_matchable_radii, num_elements, False) - - min_point_numbers: Optional[List[int]] = e_cfg.get("min_point_numbers") - if min_point_numbers is not None: - min_point_numbers: List[int] = set_thresholds(min_point_numbers, num_elements, False) - - if self.evaluation_task == EvaluationTask.DETECTION and min_point_numbers is None: - raise RuntimeError("In detection task, min point numbers must be specified") - - conf_thresh: Optional[float] = e_cfg.get("confidence_threshold") - if conf_thresh is not None: - confidence_threshold_list: List[float] = set_thresholds(conf_thresh, num_elements, False) - else: - confidence_threshold_list = None - - target_uuids: Optional[List[str]] = e_cfg.get("target_uuids") - ignore_attributes: Optional[List[str]] = e_cfg.get("ignore_attributes") - - f_params: Dict[str, Any] = { - "target_labels": target_labels, - "ignore_attributes": ignore_attributes, - "max_x_position_list": max_x_position_list, - "max_y_position_list": max_y_position_list, - "max_distance_list": max_distance_list, - "min_distance_list": min_distance_list, - "max_matchable_radii": max_matchable_radii, - "min_point_numbers": min_point_numbers, - "confidence_threshold_list": confidence_threshold_list, - "target_uuids": target_uuids, - } - - m_params: Dict[str, Any] = { - "target_labels": target_labels, - "center_distance_thresholds": e_cfg.get("center_distance_thresholds"), - "plane_distance_thresholds": e_cfg.get("plane_distance_thresholds"), - "iou_2d_thresholds": e_cfg.get("iou_2d_thresholds"), - "iou_3d_thresholds": e_cfg.get("iou_3d_thresholds"), - } + self.metrics_config = MetricsScoreConfig(self.metrics_param) - return f_params, m_params + def _extract_params(self, cfg: Dict[str, Any]) -> Tuple[PerceptionFilterParam, PerceptionMetricsParam]: + filter_param = PerceptionFilterParam.from_dict(cfg, self.evaluation_task, self.target_labels) + metrics_param = PerceptionMetricsParam.from_dict(cfg, self.evaluation_task, self.target_labels) + return filter_param, metrics_param diff --git a/perception_eval/perception_eval/config/sensing_evaluation_config.py b/perception_eval/perception_eval/config/sensing_evaluation_config.py index 06537259..4c1f76e7 100644 --- a/perception_eval/perception_eval/config/sensing_evaluation_config.py +++ b/perception_eval/perception_eval/config/sensing_evaluation_config.py @@ -19,10 +19,12 @@ from typing import Tuple from typing import Union -from ._evaluation_config_base import _EvaluationConfigBase +from .evaluation_config_base import EvaluationConfigBase +from .params import SensingFilterParam +from .params import SensingMetricsParam -class SensingEvaluationConfig(_EvaluationConfigBase): +class SensingEvaluationConfig(EvaluationConfigBase): """The class of config for sensing evaluation. Directory structure to save log and visualization result is following @@ -48,7 +50,7 @@ class SensingEvaluationConfig(_EvaluationConfigBase): dataset_paths (List[str]): Dataset paths list. frame_id (Union[str, Sequence[str]]): FrameID(s) in string, where objects are with respect. result_root_directory (str): Directory path to save result. - evaluation_config_dict (Dict[str, Dict[str, Any]]): Dict that items are evaluation config for each task. + config_dict (Dict[str, Dict[str, Any]]): Dict that items are evaluation config for each task. load_raw_data (bool): Whether load pointcloud/image data. Defaults to False. """ @@ -59,45 +61,18 @@ def __init__( dataset_paths: List[str], frame_id: Union[str, Sequence[str]], result_root_directory: str, - evaluation_config_dict: Dict[str, Dict[str, Any]], + config_dict: Dict[str, Dict[str, Any]], load_raw_data: bool = False, ) -> None: super().__init__( dataset_paths=dataset_paths, frame_id=frame_id, result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=load_raw_data, ) - @staticmethod - def _extract_label_params(evaluation_config_dict: Dict[str, Any]) -> Dict[str, Any]: - e_cfg: Dict[str, Any] = evaluation_config_dict.copy() - l_params: Dict[str, Any] = { - "label_prefix": e_cfg.get("label_prefix", "autoware"), - "merge_similar_labels": e_cfg.get("merge_similar_labels", False), - "allow_matching_unknown": True, - "count_label_number": e_cfg.get("count_label_number", True), - } - return l_params - - def _extract_params( - self, - evaluation_config_dict: Dict[str, Any], - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """Extract parameters. - Args: - evaluation_config_dict (Dict[str, Any]): Configuration as dict. - Returns: - f_params (Dict[str, Any]): Parameters for filtering. - m_params (Dict[str, Any]): Parameters for metrics. - """ - e_cfg: Dict[str, Any] = evaluation_config_dict.copy() - f_params: Dict[str, Any] = {"target_uuids": e_cfg.get("target_uuids", None)} - m_params: Dict[str, Any] = { - "box_scale_0m": e_cfg.get("box_scale_0m", 1.0), - "box_scale_100m": e_cfg.get("box_scale_100m", 1.0), - "min_points_threshold": e_cfg.get("min_points_threshold", 1), - } - - return f_params, m_params + def _extract_params(self, cfg: Dict[str, Any]) -> Tuple[SensingFilterParam, SensingMetricsParam]: + filter_param = SensingFilterParam.from_dict(cfg) + metrics_param = SensingMetricsParam.from_dict(cfg) + return filter_param, metrics_param diff --git a/perception_eval/perception_eval/manager/_evaluation_manager_base.py b/perception_eval/perception_eval/manager/evaluation_manager_base.py similarity index 79% rename from perception_eval/perception_eval/manager/_evaluation_manager_base.py rename to perception_eval/perception_eval/manager/evaluation_manager_base.py index 040257a0..ae0ff112 100644 --- a/perception_eval/perception_eval/manager/_evaluation_manager_base.py +++ b/perception_eval/perception_eval/manager/evaluation_manager_base.py @@ -25,12 +25,15 @@ if TYPE_CHECKING: from perception_eval.config import EvaluationConfigType + from perception_eval.config.params import FilterParamType + from perception_eval.config.params import LabelParam + from perception_eval.config.params import MetricsParamType from perception_eval.dataset import FrameGroundTruth from perception_eval.result import FrameResultType from perception_eval.visualization import VisualizerType -class _EvaluationMangerBase(ABC): +class EvaluationMangerBase(ABC): """Abstract base class for EvaluationManager. Attributes: @@ -42,38 +45,39 @@ class _EvaluationMangerBase(ABC): """ @abstractmethod - def __init__( - self, - evaluation_config: EvaluationConfigType, - ) -> None: + def __init__(self, config: EvaluationConfigType) -> None: super().__init__() - self.evaluator_config = evaluation_config + self.config = config self.ground_truth_frames = load_all_datasets( - dataset_paths=self.evaluator_config.dataset_paths, - evaluation_task=self.evaluator_config.evaluation_task, - label_converter=self.evaluator_config.label_converter, - frame_id=self.evaluator_config.frame_ids, - load_raw_data=self.evaluator_config.load_raw_data, + dataset_paths=self.config.dataset_paths, + evaluation_task=self.config.evaluation_task, + label_converter=self.config.label_converter, + frame_id=self.config.frame_ids, + load_raw_data=self.config.load_raw_data, ) self.frame_results: List[FrameResultType] = [] @property def evaluation_task(self): - return self.evaluator_config.evaluation_task + return self.config.evaluation_task @property def frame_ids(self): - return self.evaluator_config.frame_ids + return self.config.frame_ids + + @property + def label_param(self) -> LabelParam: + return self.config.label_param @property - def filtering_params(self): - return self.evaluator_config.filtering_params + def filter_param(self) -> FilterParamType: + return self.config.filter_param @property - def metrics_params(self): - return self.evaluator_config.metrics_params + def metrics_param(self) -> MetricsParamType: + return self.config.metrics_param @property @abstractmethod diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 91408795..10652a2c 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -28,7 +28,7 @@ from perception_eval.visualization import PerceptionVisualizer2D from perception_eval.visualization import PerceptionVisualizer3D -from ._evaluation_manager_base import _EvaluationMangerBase +from .evaluation_manager_base import EvaluationMangerBase if TYPE_CHECKING: from perception_eval.common.label import LabelType @@ -40,7 +40,7 @@ from perception_eval.visualization import PerceptionVisualizerType -class PerceptionEvaluationManager(_EvaluationMangerBase): +class PerceptionEvaluationManager(EvaluationMangerBase): """A manager class to evaluate perception task. Attributes: @@ -55,24 +55,19 @@ class PerceptionEvaluationManager(_EvaluationMangerBase): evaluator_config (PerceptionEvaluatorConfig): Configuration for perception evaluation. """ - def __init__( - self, - evaluation_config: PerceptionEvaluationConfig, - ) -> None: - super().__init__(evaluation_config=evaluation_config) + def __init__(self, config: PerceptionEvaluationConfig) -> None: + super().__init__(config=config) self.__visualizer = ( - PerceptionVisualizer2D(self.evaluator_config) - if self.evaluation_task.is_2d() - else PerceptionVisualizer3D(self.evaluator_config) + PerceptionVisualizer2D(self.config) if self.evaluation_task.is_2d() else PerceptionVisualizer3D(self.config) ) @property def target_labels(self) -> List[LabelType]: - return self.evaluator_config.target_labels + return self.config.target_labels @property def metrics_config(self): - return self.evaluator_config.metrics_config + return self.config.metrics_config @property def visualizer(self) -> PerceptionVisualizerType: @@ -154,14 +149,14 @@ def _filter_objects( objects=estimated_objects, is_gt=False, ego2map=frame_ground_truth.ego2map, - **self.filtering_params, + **self.filter_param.as_dict(), ) frame_ground_truth.objects = filter_objects( objects=frame_ground_truth.objects, is_gt=True, ego2map=frame_ground_truth.ego2map, - **self.filtering_params, + **self.filter_param.as_dict(), ) object_results = get_object_results( @@ -169,15 +164,15 @@ def _filter_objects( estimated_objects=estimated_objects, ground_truth_objects=frame_ground_truth.objects, target_labels=self.target_labels, - allow_matching_unknown=self.evaluator_config.label_params["allow_matching_unknown"], - matchable_thresholds=self.filtering_params["max_matchable_radii"], + # allow_matching_unknown=self.label_param["allow_matching_unknown"], TODO + # matchable_thresholds=self.filtering_params["max_matchable_radii"], ) - if self.evaluator_config.filtering_params.get("target_uuids"): + if self.filter_param.target_uuids is not None: object_results = filter_object_results( object_results=object_results, ego2map=frame_ground_truth.ego2map, - target_uuids=self.filtering_params["target_uuids"], + target_uuids=self.filter_param.target_uuids, ) return object_results, frame_ground_truth @@ -201,17 +196,14 @@ def get_scene_result(self) -> MetricsScore: used_frame.append(frame.frame_number) # Calculate score - scene_metrics_score = MetricsScore( - config=self.metrics_config, - used_frame=used_frame, - ) - if self.evaluator_config.metrics_config.detection_config is not None: + scene_metrics_score = MetricsScore(config=self.metrics_config, used_frame=used_frame) + if self.config.metrics_config.detection_config is not None: scene_metrics_score.evaluate_detection(all_frame_results, all_num_gt) - if self.evaluator_config.metrics_config.tracking_config is not None: + if self.config.metrics_config.tracking_config is not None: scene_metrics_score.evaluate_tracking(all_frame_results, all_num_gt) - if self.evaluator_config.metrics_config.prediction_config is not None: + if self.config.metrics_config.prediction_config is not None: pass - if self.evaluator_config.metrics_config.classification_config is not None: + if self.config.metrics_config.classification_config is not None: scene_metrics_score.evaluate_classification(all_frame_results, all_num_gt) return scene_metrics_score diff --git a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py index e15eaa48..cd040b73 100644 --- a/perception_eval/perception_eval/manager/sensing_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/sensing_evaluation_manager.py @@ -27,10 +27,10 @@ from perception_eval.util.math import get_bbox_scale from perception_eval.visualization import SensingVisualizer -from ._evaluation_manager_base import _EvaluationMangerBase +from .evaluation_manager_base import EvaluationMangerBase -class SensingEvaluationManager(_EvaluationMangerBase): +class SensingEvaluationManager(EvaluationMangerBase): """A manager class to evaluate sensing task. Attributes: @@ -42,12 +42,9 @@ class SensingEvaluationManager(_EvaluationMangerBase): evaluation_config (SensingEvaluationConfig): Configuration for sensing evaluation. """ - def __init__( - self, - evaluation_config: SensingEvaluationConfig, - ) -> None: - super().__init__(evaluation_config) - self.__visualizer = SensingVisualizer(self.evaluator_config) + def __init__(self, config: SensingEvaluationConfig) -> None: + super().__init__(config) + self.__visualizer = SensingVisualizer(self.config) @property def visualizer(self) -> SensingVisualizer: @@ -79,8 +76,8 @@ def add_frame_result( """ if sensing_frame_config is None: sensing_frame_config = SensingFrameConfig( - **self.filtering_params, - **self.metrics_params, + **self.filter_param.as_dict(), + **self.metrics_param.as_dict(), ) # Crop pointcloud for non-detection area @@ -160,15 +157,13 @@ def crop_pointcloud( ) # Crop pointcloud for non-detection outside of objects' bbox - box_scale_0m: float = self.evaluator_config.metrics_params["box_scale_0m"] - box_scale_100m: float = self.evaluator_config.metrics_params["box_scale_100m"] for i, points in enumerate(cropped_pointcloud): outside_points: np.ndarray = points.copy() for ground_truth in ground_truth_objects: bbox_scale: float = get_bbox_scale( distance=ground_truth.get_distance(ego2map=ego2map), - box_scale_0m=box_scale_0m, - box_scale_100m=box_scale_100m, + box_scale_0m=self.metrics_param.box_scale_0m, + box_scale_100m=self.metrics_param.box_scale_100m, ) outside_points: np.ndarray = ground_truth.crop_pointcloud( pointcloud=outside_points, diff --git a/perception_eval/perception_eval/metrics/metrics_score_config.py b/perception_eval/perception_eval/metrics/metrics_score_config.py index bcb16ee5..6260cbbc 100644 --- a/perception_eval/perception_eval/metrics/metrics_score_config.py +++ b/perception_eval/perception_eval/metrics/metrics_score_config.py @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from inspect import signature from typing import Any from typing import Dict -from typing import List from typing import Optional from typing import Set +from typing import TYPE_CHECKING from perception_eval.common.evaluation_task import EvaluationTask -from perception_eval.common.label import LabelType from .config._metrics_config_base import _MetricsConfigBase from .config.classification_metrics_config import ClassificationMetricsConfig @@ -28,6 +29,9 @@ from .config.prediction_metrics_config import PredictionMetricsConfig from .config.tracking_metrics_config import TrackingMetricsConfig +if TYPE_CHECKING: + from perception_eval.config.params import PerceptionMetricsParam + class MetricsScoreConfig: """A configuration class for each evaluation task metrics. @@ -44,7 +48,7 @@ class MetricsScoreConfig: **cfg: Configuration data. """ - def __init__(self, evaluation_task: EvaluationTask, **cfg) -> None: + def __init__(self, params: PerceptionMetricsParam) -> None: self.detection_config: Optional[DetectionMetricsConfig] = None self.tracking_config: Optional[TrackingMetricsConfig] = None self.classification_config: Optional[ClassificationMetricsConfig] = None @@ -52,45 +56,47 @@ def __init__(self, evaluation_task: EvaluationTask, **cfg) -> None: # NOTE: prediction_config is under construction self.prediction_config = None - self.evaluation_task: EvaluationTask = evaluation_task - self.target_labels: List[LabelType] = cfg["target_labels"] + self.evaluation_task = params.evaluation_task + self.target_labels = params.target_labels if self.evaluation_task in (EvaluationTask.DETECTION2D, EvaluationTask.DETECTION): - self._check_parameters(DetectionMetricsConfig, cfg) - self.detection_config = DetectionMetricsConfig(**cfg) + inputs = self._extract_params(DetectionMetricsConfig, params) + self.detection_config = DetectionMetricsConfig(**inputs) elif self.evaluation_task in (EvaluationTask.TRACKING2D, EvaluationTask.TRACKING): - self._check_parameters(TrackingMetricsConfig, cfg) - self.tracking_config = TrackingMetricsConfig(**cfg) + inputs = self._extract_params(TrackingMetricsConfig, params) + self.tracking_config = TrackingMetricsConfig(**inputs) # NOTE: In tracking, evaluate mAP too # TODO: Check and extract parameters for detection from parameters for tracking - self.detection_config = DetectionMetricsConfig(**cfg) + self.detection_config = DetectionMetricsConfig(**inputs) elif self.evaluation_task == EvaluationTask.PREDICTION: - self._check_parameters(PredictionMetricsConfig, cfg) + inputs = self._extract_params(PredictionMetricsConfig, params) raise NotImplementedError("Prediction config is under construction") # TODO # self.evaluation_tasks.append(task) elif self.evaluation_task == EvaluationTask.CLASSIFICATION2D: - self._check_parameters(ClassificationMetricsConfig, cfg) - self.classification_config = ClassificationMetricsConfig(**cfg) + inputs = self._extract_params(ClassificationMetricsConfig, params) + self.classification_config = ClassificationMetricsConfig(**inputs) @staticmethod - def _check_parameters(config: _MetricsConfigBase, params: Dict[str, Any]): + def _extract_params(config: _MetricsConfigBase, params: PerceptionMetricsParam) -> Dict[str, Any]: """Check if input parameters are valid. Args: config (_MetricsConfigBase): Metrics score instance. - params (Dict[str, any]): Parameters for metrics. + params (PerceptionMetricsParam): Parameters for metrics. Raises: KeyError: When got invalid parameter names. """ - valid_parameters: Set = set(signature(config).parameters) - input_params: Set = set(params.keys()) - if not input_params <= valid_parameters: + input_params_dict = params.as_dict() + valid_params: Set = set(signature(config).parameters) + input_params: Set = set(input_params_dict.keys()) + if valid_params > input_params: raise MetricsParameterError( f"MetricsConfig for '{config.evaluation_task}'\n" - f"Unexpected parameters: {input_params - valid_parameters} \n" - f"Usage: {valid_parameters} \n" + f"Unexpected parameters: {input_params - valid_params} \n" + f"Usage: {valid_params} \n" ) + return {key: input_params_dict[key] for key in valid_params} class MetricsParameterError(Exception): diff --git a/perception_eval/perception_eval/tool/perception_analyzer2d.py b/perception_eval/perception_eval/tool/perception_analyzer2d.py index 0261d318..3633d812 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer2d.py +++ b/perception_eval/perception_eval/tool/perception_analyzer2d.py @@ -61,8 +61,8 @@ class PerceptionAnalyzer2D(PerceptionAnalyzerBase): evaluation_config (PerceptionEvaluationConfig): Config used in evaluation. """ - def __init__(self, evaluation_config: PerceptionEvaluationConfig) -> None: - super().__init__(evaluation_config=evaluation_config) + def __init__(self, config: PerceptionEvaluationConfig) -> None: + super().__init__(config=config) if not self.config.evaluation_task.is_2d(): raise RuntimeError("Evaluation task must be 2D.") diff --git a/perception_eval/perception_eval/tool/perception_analyzer3d.py b/perception_eval/perception_eval/tool/perception_analyzer3d.py index 823557a9..6b19ae9f 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer3d.py +++ b/perception_eval/perception_eval/tool/perception_analyzer3d.py @@ -74,19 +74,23 @@ class PerceptionAnalyzer3D(PerceptionAnalyzerBase): num_area_division (int): Number to divide area. Defaults to 1. """ - def __init__( - self, - evaluation_config: PerceptionEvaluationConfig, - num_area_division: int = 1, - ) -> None: - super().__init__(evaluation_config=evaluation_config) + def __init__(self, config: PerceptionEvaluationConfig, num_area_division: int = 1) -> None: + super().__init__(config=config) if not self.config.evaluation_task.is_3d(): raise RuntimeError("Evaluation task must be 3D.") self.__num_area_division: int = num_area_division - max_x: float = self.config.evaluation_config_dict.get("max_x_position", 100.0) - max_y: float = self.config.evaluation_config_dict.get("max_y_position", 100.0) + max_x_position_list = self.config.filter_param.max_x_position_list + max_y_position_list = self.config.filter_param.max_y_position_list + max_distance_list = self.config.filter_param.max_distance_list + if max_x_position_list is not None and max_y_position_list is not None: + max_x = max(max_x_position_list) + max_y = max(max_y_position_list) + elif max_distance_list is not None: + max_x = max_y = max(max_distance_list) + else: + max_x = max_y = 100.0 self.__upper_rights, self.__bottom_lefts = generate_area_points( self.num_area_division, max_x=max_x, max_y=max_y ) diff --git a/perception_eval/perception_eval/tool/perception_analyzer3dfield.py b/perception_eval/perception_eval/tool/perception_analyzer3dfield.py index 3fcd4b6a..233a5b92 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer3dfield.py +++ b/perception_eval/perception_eval/tool/perception_analyzer3dfield.py @@ -167,7 +167,7 @@ class DataTableIdx(IntEnum): class PerceptionFieldXY: def __init__( self, - evaluation_config: PerceptionEvaluationConfig, + config: PerceptionEvaluationConfig, axis_x: PerceptionFieldAxis, axis_y: PerceptionFieldAxis, ) -> None: @@ -175,11 +175,11 @@ def __init__( Initializes a PerceptionFieldXY object. Args: - evaluation_config (PerceptionEvaluationConfig): The configuration for perception evaluation. + config (PerceptionEvaluationConfig): The configuration for perception evaluation. axis_x (PerceptionFieldAxis): The x-axis configuration for the perception field. axis_y (PerceptionFieldAxis): The y-axis configuration for the perception field. """ - self.__config: PerceptionEvaluationConfig = evaluation_config + self.__config: PerceptionEvaluationConfig = config # Set statistics parameters self.config_statistics_min_numb: int = STATISTICS_MIN_NUMB diff --git a/perception_eval/perception_eval/tool/perception_analyzer_base.py b/perception_eval/perception_eval/tool/perception_analyzer_base.py index 417a991f..b2c860bd 100644 --- a/perception_eval/perception_eval/tool/perception_analyzer_base.py +++ b/perception_eval/perception_eval/tool/perception_analyzer_base.py @@ -77,8 +77,8 @@ class PerceptionAnalyzerBase(ABC): evaluation_config (PerceptionEvaluationConfig): Config used in evaluation. """ - def __init__(self, evaluation_config: PerceptionEvaluationConfig) -> None: - self.__config = evaluation_config + def __init__(self, config: PerceptionEvaluationConfig) -> None: + self.__config = config self.__plot_dir: str = os.path.join(self.__config.result_root_directory, "plot") if not os.path.exists(self.__plot_dir): diff --git a/perception_eval/perception_eval/visualization/perception_visualizer3d.py b/perception_eval/perception_eval/visualization/perception_visualizer3d.py index 6de69750..a1c44cea 100644 --- a/perception_eval/perception_eval/visualization/perception_visualizer3d.py +++ b/perception_eval/perception_eval/visualization/perception_visualizer3d.py @@ -67,9 +67,9 @@ def __init__( self.__figure, self.__axes = plt.subplots(figsize=self.__figsize) self.__animation_frames: List[PILImage] = [] - max_x_position_list = config.filtering_params.get("max_x_position_list") - max_y_position_list = config.filtering_params.get("max_y_position_list") - max_distance_list = config.filtering_params.get("max_distance_list") + max_x_position_list = config.filter_param.max_distance_list + max_y_position_list = config.filter_param.max_y_position_list + max_distance_list = config.filter_param.max_distance_list if max_x_position_list is not None and max_y_position_list is not None: self.__xlim: float = max(max_x_position_list) self.__ylim: float = max(max_y_position_list) diff --git a/perception_eval/test/config/test_perception_evaluation_config.py b/perception_eval/test/config/test_perception_evaluation_config.py index e2b0020d..e7a1507e 100644 --- a/perception_eval/test/config/test_perception_evaluation_config.py +++ b/perception_eval/test/config/test_perception_evaluation_config.py @@ -27,7 +27,7 @@ def setUp(self) -> None: def test_check_tasks(self): """Test if it can detect the exception.""" - evaluation_config_dict = { + config_dict = { "target_labels": ["car", "bicycle", "pedestrian", "motorbike"], "max_x_position": 102.4, "max_y_position": 102.4, @@ -53,10 +53,10 @@ def test_check_tasks(self): for n, (frame_id, evaluation_task) in enumerate(patterns): with self.subTest(f"Test if it can detect the exception of task keys: {n + 1}"): with self.assertRaises(ValueError): - evaluation_config_dict.update(evaluation_task) + config_dict.update(evaluation_task) _ = PerceptionEvaluationConfig( dataset_paths="/tmp", frame_id=frame_id, result_root_directory="/tmp", - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, ) diff --git a/perception_eval/test/config/test_sensing_evaluation_config.py b/perception_eval/test/config/test_sensing_evaluation_config.py index da74bde6..c9ba5536 100644 --- a/perception_eval/test/config/test_sensing_evaluation_config.py +++ b/perception_eval/test/config/test_sensing_evaluation_config.py @@ -24,7 +24,7 @@ class TestSensingEvaluationConfig(unittest.TestCase): def test_check_tasks(self): """Test if it can detect the exception.""" - evaluation_config_dict = { + config_dict = { "evaluation_task": "sensing", "target_uuids": ["1b40c0876c746f96ac679a534e1037a2"], "box_scale_0m": 1.0, @@ -41,10 +41,10 @@ def test_check_tasks(self): for n, (frame_id, evaluation_task) in enumerate(patterns): with self.subTest(f"Test if it can detect the exception of task keys: {n + 1}"): with self.assertRaises(ValueError): - evaluation_config_dict.update(evaluation_task) + config_dict.update(evaluation_task) _ = SensingEvaluationConfig( dataset_paths="/tmp/path", frame_id=frame_id, result_root_directory="/tmp/path", - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, ) diff --git a/perception_eval/test/metrics/test_metrics_score_config.py b/perception_eval/test/metrics/test_metrics_score_config.py index ff9162f0..c0659cdc 100644 --- a/perception_eval/test/metrics/test_metrics_score_config.py +++ b/perception_eval/test/metrics/test_metrics_score_config.py @@ -18,12 +18,16 @@ from typing import Tuple import unittest +from perception_eval.common.evaluation_task import EvaluationTask +from perception_eval.common.label import AutowareLabel +from perception_eval.config.params import PerceptionMetricsParam from perception_eval.metrics.config._metrics_config_base import _MetricsConfigBase from perception_eval.metrics.config.detection_metrics_config import DetectionMetricsConfig from perception_eval.metrics.config.tracking_metrics_config import TrackingMetricsConfig -from perception_eval.metrics.metrics_score_config import MetricsParameterError from perception_eval.metrics.metrics_score_config import MetricsScoreConfig +# from perception_eval.metrics.metrics_score_config import MetricsParameterError + class TestMetricsScoreConfig(unittest.TestCase): def test_check_parameters(self): @@ -34,33 +38,49 @@ def test_check_parameters(self): Check if the exception is raised when wrong key is specified. """ patterns: List[Tuple[_MetricsConfigBase, Dict[str, Any]]] = [ - (DetectionMetricsConfig, {"foo": 0.0}), ( DetectionMetricsConfig, - { - "target_labels": ["car"], - "center_distance_thresholds": [[1.0]], - "plane_distance_thresholds": [[1.0]], - "iou_bev_thresholds": [[1.0]], - "iou_3d_thresholds": [[1.0]], - "foo": 1.0, - }, + PerceptionMetricsParam.from_dict( + cfg={"foo": 1.0}, + evaluation_task=EvaluationTask.DETECTION, + target_labels=[AutowareLabel.CAR], + ), + ), + ( + DetectionMetricsConfig, + PerceptionMetricsParam.from_dict( + cfg={ + "center_distance_thresholds": [[1.0]], + "plane_distance_thresholds": [[1.0]], + "iou_bev_thresholds": [[1.0]], + }, + evaluation_task=EvaluationTask.DETECTION, + target_labels=[AutowareLabel.CAR], + ), + ), + ( + TrackingMetricsConfig, + PerceptionMetricsParam.from_dict( + cfg={"foo": 1.0}, + evaluation_task=EvaluationTask.TRACKING, + target_labels=[AutowareLabel.CAR], + ), ), - (TrackingMetricsConfig, {"foo": 0.0}), ( TrackingMetricsConfig, - { - "target_labels": ["car"], - "center_distance_thresholds": [[1.0]], - "plane_distance_thresholds": [[1.0]], - "iou_bev_thresholds": [[1.0]], - "iou_3d_thresholds": [[1.0]], - "foo": 1.0, - }, + PerceptionMetricsParam.from_dict( + cfg={ + "center_distance_thresholds": [[1.0]], + "plane_distance_thresholds": [[1.0]], + "iou_bev_thresholds": [[1.0]], + }, + evaluation_task=EvaluationTask.TRACKING, + target_labels=[AutowareLabel.CAR], + ), ), ] for n, (config, params) in enumerate(patterns): with self.subTest(f"Test if it can detect the exception of parameters: {n + 1}"): - with self.assertRaises(MetricsParameterError): - MetricsScoreConfig._check_parameters(config, params) + # with self.assertRaises(MetricsParameterError, msg=f"{n + 1}"): + MetricsScoreConfig._extract_params(config, params) diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 02680c8d..781a5673 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -36,7 +36,7 @@ class FPValidationLsimMoc: def __init__(self, dataset_paths: List[int], result_root_directory: str) -> None: - evaluation_config_dict = { + config_dict = { "evaluation_task": "fp_validation", "target_labels": ["car", "bicycle", "pedestrian", "motorbike"], "max_x_position": 102.4, @@ -51,7 +51,7 @@ def __init__(self, dataset_paths: List[int], result_root_directory: str) -> None dataset_paths=dataset_paths, frame_id="base_link", result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=True, ) @@ -71,7 +71,7 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: critical_ground_truth_objects = ground_truth_now_frame.objects frame_config = PerceptionFrameConfig( - evaluator_config=self.evaluator.evaluator_config, + evaluator_config=self.evaluator.config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], max_x_position_list=[100.0, 100.0, 100.0, 100.0], max_y_position_list=[100.0, 100.0, 100.0, 100.0], diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index d9c408d0..c59e9b52 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -41,7 +41,7 @@ def __init__( evaluation_task: str, result_root_directory: str, ): - evaluation_config_dict = { + config_dict = { "evaluation_task": evaluation_task, # ラベル,max x/y,マッチング閾値 (detection/tracking/predictionで共通) "target_labels": ["car", "bicycle", "pedestrian", "motorbike"], @@ -81,7 +81,7 @@ def __init__( dataset_paths=dataset_paths, frame_id="base_link" if evaluation_task == "detection" else "map", result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=True, ) @@ -91,7 +91,7 @@ def __init__( file_log_level=logging.INFO, ) - self.evaluator = PerceptionEvaluationManager(evaluation_config=evaluation_config) + self.evaluator = PerceptionEvaluationManager(evaluation_config) def callback( self, @@ -111,7 +111,7 @@ def callback( # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam frame_config = PerceptionFrameConfig( - evaluator_config=self.evaluator.evaluator_config, + evaluator_config=self.evaluator.config, target_labels=["car", "bicycle", "pedestrian", "motorbike"], ignore_attributes=["cycle_state.without_rider"], max_x_position_list=[30.0, 30.0, 30.0, 30.0], @@ -231,13 +231,13 @@ def visualize(self, frame_result: PerceptionFrameResult): f"{format_class_for_log(detection_final_metric_score.maps[0], 100)}", ) - if detection_lsim.evaluator.evaluator_config.load_raw_data: + if detection_lsim.evaluator.config.load_raw_data: # Visualize all frame results. logging.info("Start visualizing detection results") detection_lsim.evaluator.visualize_all() # Detection performance report - detection_analyzer = PerceptionAnalyzer3D(detection_lsim.evaluator.evaluator_config) + detection_analyzer = PerceptionAnalyzer3D(detection_lsim.evaluator.config) detection_analyzer.add(detection_lsim.evaluator.frame_results) score_df, error_df = detection_analyzer.analyze() if score_df is not None: @@ -310,13 +310,13 @@ def visualize(self, frame_result: PerceptionFrameResult): f"{format_class_for_log(tracking_final_metric_score.tracking_scores[0], 100)}" ) - if tracking_lsim.evaluator.evaluator_config.load_raw_data: + if tracking_lsim.evaluator.config.load_raw_data: # Visualize all frame results logging.info("Start visualizing tracking results") tracking_lsim.evaluator.visualize_all() # Tracking performance report - tracking_analyzer = PerceptionAnalyzer3D(tracking_lsim.evaluator.evaluator_config) + tracking_analyzer = PerceptionAnalyzer3D(tracking_lsim.evaluator.config) tracking_analyzer.add(tracking_lsim.evaluator.frame_results) score_df, error_df = tracking_analyzer.analyze() if score_df is not None: diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 5c8c8885..ddeb5c2f 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -47,7 +47,7 @@ def __init__( self.label_prefix = label_prefix if evaluation_task in ("detection2d", "tracking2d"): - evaluation_config_dict = { + config_dict = { "evaluation_task": evaluation_task, "center_distance_thresholds": [ 100, @@ -56,12 +56,12 @@ def __init__( "iou_2d_thresholds": [0.5], # = [[0.5, 0.5, 0.5, 0.5]] } elif evaluation_task == "classification2d": - evaluation_config_dict = {"evaluation_task": evaluation_task} + config_dict = {"evaluation_task": evaluation_task} else: raise ValueError(f"Unexpected evaluation task: {evaluation_task}") # If target_labels = None, all labels will be evaluated. - evaluation_config_dict.update( + config_dict.update( dict( target_labels=["green", "red", "yellow", "unknown"] if label_prefix == "traffic_light" @@ -69,7 +69,7 @@ def __init__( ignore_attributes=["cycle_state.without_rider"] if label_prefix == "autoware" else None, ) ) - evaluation_config_dict.update( + config_dict.update( dict( allow_matching_unknown=True, merge_similar_labels=False, @@ -82,7 +82,7 @@ def __init__( dataset_paths=dataset_paths, frame_id=camera_type, result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=True, ) @@ -92,7 +92,7 @@ def __init__( file_log_level=logging.INFO, ) - self.evaluator = PerceptionEvaluationManager(evaluation_config=evaluation_config) + self.evaluator = PerceptionEvaluationManager(evaluation_config) def callback( self, @@ -119,7 +119,7 @@ def callback( # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam frame_config = PerceptionFrameConfig( - evaluator_config=self.evaluator.evaluator_config, + evaluator_config=self.evaluator.config, target_labels=target_labels, ignore_attributes=ignore_attributes, thresholds=thresholds, @@ -234,11 +234,11 @@ def visualize(self, frame_result: PerceptionFrameResult): # Visualize all frame results. logging.info("Start visualizing detection results") - if detection_lsim.evaluator.evaluator_config.load_raw_data: + if detection_lsim.evaluator.config.load_raw_data: detection_lsim.evaluator.visualize_all() # Detection performance report - detection_analyzer = PerceptionAnalyzer2D(detection_lsim.evaluator.evaluator_config) + detection_analyzer = PerceptionAnalyzer2D(detection_lsim.evaluator.config) detection_analyzer.add(detection_lsim.evaluator.frame_results) score_df, conf_mat_df = detection_analyzer.analyze() if score_df is not None: @@ -270,11 +270,11 @@ def visualize(self, frame_result: PerceptionFrameResult): # final result tracking_final_metric_score = tracking_lsim.get_final_result() - if tracking_lsim.evaluator.evaluator_config.load_raw_data: + if tracking_lsim.evaluator.config.load_raw_data: tracking_lsim.evaluator.visualize_all() # Tracking performance report - tracking_analyzer = PerceptionAnalyzer2D(tracking_lsim.evaluator.evaluator_config) + tracking_analyzer = PerceptionAnalyzer2D(tracking_lsim.evaluator.config) tracking_analyzer.add(tracking_lsim.evaluator.frame_results) score_df, conf_mat_df = tracking_analyzer.analyze() if score_df is not None: @@ -306,7 +306,7 @@ def visualize(self, frame_result: PerceptionFrameResult): classification_final_metric_score = classification_lsim.get_final_result() # Classification performance report - classification_analyzer = PerceptionAnalyzer2D(classification_lsim.evaluator.evaluator_config) + classification_analyzer = PerceptionAnalyzer2D(classification_lsim.evaluator.config) classification_analyzer.add(classification_lsim.evaluator.frame_results) score_df, conf_mat_df = classification_analyzer.analyze() if score_df is not None: diff --git a/perception_eval/test/sensing_lsim.py b/perception_eval/test/sensing_lsim.py index b957b89d..78fa8c9f 100644 --- a/perception_eval/test/sensing_lsim.py +++ b/perception_eval/test/sensing_lsim.py @@ -38,7 +38,7 @@ class SensingLSimMoc: def __init__(self, dataset_paths: List[str], result_root_directory: str): # sensing - evaluation_config_dict = { + config_dict = { "evaluation_task": "sensing", # object uuids to be detected # "target_uuids": ["1b40c0876c746f96ac679a534e1037a2"], @@ -49,11 +49,11 @@ def __init__(self, dataset_paths: List[str], result_root_directory: str): "min_points_threshold": 1, } - evaluation_config: SensingEvaluationConfig = SensingEvaluationConfig( + evaluation_config = SensingEvaluationConfig( dataset_paths=dataset_paths, frame_id="base_link", result_root_directory=result_root_directory, - evaluation_config_dict=evaluation_config_dict, + config_dict=config_dict, load_raw_data=True, ) @@ -63,7 +63,7 @@ def __init__(self, dataset_paths: List[str], result_root_directory: str): file_log_level=logging.INFO, ) - self.evaluator = SensingEvaluationManager(evaluation_config=evaluation_config) + self.evaluator = SensingEvaluationManager(evaluation_config) def callback( self, From a72959c6f94da9ca63b2ded9827c98fb6d4f5383 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Thu, 28 Dec 2023 16:19:47 +0900 Subject: [PATCH 7/9] refactor: update constructor of `PerceptionFrameResult` Signed-off-by: ktro2828 --- .../manager/perception_evaluation_manager.py | 18 ++++++++---- .../perception/perception_frame_result.py | 29 ++++++++++++------- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 10652a2c..0c9dcd92 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -15,6 +15,7 @@ from __future__ import annotations from typing import List +from typing import Optional from typing import Tuple from typing import TYPE_CHECKING @@ -78,8 +79,8 @@ def add_frame_result( unix_time: int, ground_truth_now_frame: FrameGroundTruth, estimated_objects: List[ObjectType], - critical_ground_truth_objects: List[ObjectType], - frame_config: PerceptionFrameConfig, + critical_ground_truth_objects: Optional[List[ObjectType]] = None, + frame_config: Optional[PerceptionFrameConfig] = None, ) -> PerceptionFrameResult: """Get perception result at current frame. @@ -106,13 +107,18 @@ def add_frame_result( ground_truth_now_frame, ) + if critical_ground_truth_objects is None: + critical_ground_truth_objects = ground_truth_now_frame.objects.copy() + + if frame_config is None: + frame_config = PerceptionFrameConfig(self.config) + result = PerceptionFrameResult( + unix_time=unix_time, + frame_config=frame_config, + metrics_config=self.metrics_config, object_results=object_results, frame_ground_truth=ground_truth_now_frame, - metrics_config=self.metrics_config, - frame_config=frame_config, - unix_time=unix_time, - target_labels=self.target_labels, ) if len(self.frame_results) > 0: diff --git a/perception_eval/perception_eval/result/perception/perception_frame_result.py b/perception_eval/perception_eval/result/perception/perception_frame_result.py index ce862f80..8f752e6c 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_result.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_result.py @@ -60,38 +60,45 @@ class PerceptionFrameResult: def __init__( self, + unix_time: int, + frame_config: PerceptionFrameConfig, + metrics_config: MetricsScoreConfig, object_results: List[DynamicObjectWithPerceptionResult], frame_ground_truth: FrameGroundTruth, - metrics_config: MetricsScoreConfig, - frame_config: PerceptionFrameConfig, - unix_time: int, - target_labels: List[LabelType], ) -> None: - self.frame_number = frame_ground_truth.frame_number self.unix_time = unix_time - self.target_labels = target_labels + self.frame_config = frame_config + self.metrics_config = metrics_config self.object_results = object_results self.frame_ground_truth = frame_ground_truth # init evaluation self.metrics_score = metrics.MetricsScore( - metrics_config, + self.metrics_config, used_frame=[int(self.frame_number)], ) self.pass_fail_result = PassFailResult( - unix_time=unix_time, + unix_time=self.unix_time, frame_number=self.frame_number, - frame_config=frame_config, - ego2map=frame_ground_truth.ego2map, + frame_config=self.frame_config, + ego2map=self.frame_ground_truth.ego2map, ) + @property + def target_labels(self) -> List[LabelType]: + return self.frame_config.target_labels + + @property + def frame_number(self) -> int: + return self.frame_ground_truth.frame_number + def evaluate_frame( self, critical_ground_truth_objects: List[ObjectType], previous_result: Optional[PerceptionFrameResult] = None, ) -> None: - """[summary] + """ Evaluate a frame from the pair of estimated objects and ground truth objects Args: ros_critical_ground_truth_objects (List[ObjectType]): The list of Ground truth objects filtered by ROS node. From 903601c61aa63faf8b7c0b1f563d85b5ec4e6daa Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Thu, 28 Dec 2023 16:40:52 +0900 Subject: [PATCH 8/9] feat: update `PerceptionFrameConfig` Signed-off-by: ktro2828 --- .../manager/perception_evaluation_manager.py | 4 +- .../perception/perception_frame_config.py | 89 ++++++++----------- .../perception/perception_pass_fail_result.py | 2 +- .../test/perception_fp_validation_lsim.py | 2 +- perception_eval/test/perception_lsim.py | 2 +- perception_eval/test/perception_lsim2d.py | 2 +- 6 files changed, 44 insertions(+), 57 deletions(-) diff --git a/perception_eval/perception_eval/manager/perception_evaluation_manager.py b/perception_eval/perception_eval/manager/perception_evaluation_manager.py index 0c9dcd92..14cc76fe 100644 --- a/perception_eval/perception_eval/manager/perception_evaluation_manager.py +++ b/perception_eval/perception_eval/manager/perception_evaluation_manager.py @@ -79,8 +79,8 @@ def add_frame_result( unix_time: int, ground_truth_now_frame: FrameGroundTruth, estimated_objects: List[ObjectType], - critical_ground_truth_objects: Optional[List[ObjectType]] = None, frame_config: Optional[PerceptionFrameConfig] = None, + critical_ground_truth_objects: Optional[List[ObjectType]] = None, ) -> PerceptionFrameResult: """Get perception result at current frame. @@ -111,7 +111,7 @@ def add_frame_result( critical_ground_truth_objects = ground_truth_now_frame.objects.copy() if frame_config is None: - frame_config = PerceptionFrameConfig(self.config) + frame_config = PerceptionFrameConfig.from_eval_cfg(self.config) result = PerceptionFrameResult( unix_time=unix_time, diff --git a/perception_eval/perception_eval/result/perception/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py index 5b5ea011..ad229281 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -14,16 +14,17 @@ from __future__ import annotations -from typing import Any -from typing import Dict from typing import List from typing import Optional from typing import TYPE_CHECKING +from typing import Union from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import check_thresholds +from perception_eval.config.params import PerceptionFilterParam if TYPE_CHECKING: + from perception_eval.common.label import LabelType from perception_eval.config import PerceptionEvaluationConfig @@ -52,15 +53,15 @@ class PerceptionFrameConfig: def __init__( self, evaluator_config: PerceptionEvaluationConfig, - target_labels: List[str], - ignore_attributes: Optional[List[str]] = None, + target_labels: List[Union[str, LabelType]], max_x_position_list: Optional[List[float]] = None, max_y_position_list: Optional[List[float]] = None, - max_distance_list: Optional[List[float]] = None, min_distance_list: Optional[List[float]] = None, + max_distance_list: Optional[List[float]] = None, min_point_numbers: Optional[List[int]] = None, confidence_threshold_list: Optional[List[float]] = None, target_uuids: Optional[List[str]] = None, + ignore_attributes: Optional[List[str]] = None, thresholds: Optional[List[float]] = None, ) -> None: """ @@ -82,56 +83,42 @@ def __init__( target_uuids (Optional[List[str]]): The list of target uuid. Defaults to None. """ self.evaluation_task = evaluator_config.evaluation_task - - self.target_labels = set_target_lists(target_labels, evaluator_config.label_converter) - self.ignore_attributes = ignore_attributes - - num_elements: int = len(self.target_labels) - if max_x_position_list and max_y_position_list: - self.max_x_position_list: List[float] = check_thresholds(max_x_position_list, num_elements) - self.max_y_position_list: List[float] = check_thresholds(max_y_position_list, num_elements) - self.max_distance_list = None - self.min_distance_list = None - elif max_distance_list and min_distance_list: - self.max_distance_list: List[float] = check_thresholds(max_distance_list, num_elements) - self.min_distance_list: List[float] = check_thresholds(min_distance_list, num_elements) - self.max_x_position_list = None - self.max_y_position_list = None - elif evaluator_config.evaluation_task.is_2d(): - self.max_x_position_list = None - self.max_y_position_list = None - self.max_distance_list = None - self.min_distance_list = None + if all([isinstance(label, str) for label in target_labels]): + self.target_labels = set_target_lists(target_labels, evaluator_config.label_converter) else: - raise RuntimeError("Either max x/y position or max/min distance should be specified") - - if min_point_numbers is None: - self.min_point_numbers = None - else: - self.min_point_numbers: List[int] = check_thresholds(min_point_numbers, num_elements) - - if confidence_threshold_list is None: - self.confidence_threshold_list = None - else: - self.confidence_threshold_list: List[float] = check_thresholds(confidence_threshold_list, num_elements) - - self.target_uuids: Optional[List[str]] = target_uuids - - self.filtering_params: Dict[str, Any] = { - "target_labels": self.target_labels, - "ignore_attributes": self.ignore_attributes, - "max_x_position_list": self.max_x_position_list, - "max_y_position_list": self.max_y_position_list, - "max_distance_list": self.max_distance_list, - "min_distance_list": self.min_distance_list, - "min_point_numbers": self.min_point_numbers, - "confidence_threshold_list": self.confidence_threshold_list, - "target_uuids": self.target_uuids, - } + self.target_labels = target_labels + + self.filter_param = PerceptionFilterParam( + evaluation_task=self.evaluation_task, + target_labels=self.target_labels, + max_x_position_list=max_x_position_list, + max_y_position_list=max_y_position_list, + min_distance_list=min_distance_list, + max_distance_list=max_distance_list, + min_point_numbers=min_point_numbers, + confidence_threshold_list=confidence_threshold_list, + target_uuids=target_uuids, + ignore_attributes=ignore_attributes, + ) num_elements: int = len(self.target_labels) - if thresholds is None: self.thresholds = None else: self.thresholds = check_thresholds(thresholds, num_elements) + + @classmethod + def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameConfig: + return cls( + evaluator_config=eval_cfg, + target_labels=eval_cfg.target_labels, + max_x_position_list=eval_cfg.filter_param.max_x_position_list, + max_y_position_list=eval_cfg.filter_param.max_y_position_list, + min_distance_list=eval_cfg.filter_param.min_distance_list, + max_distance_list=eval_cfg.filter_param.max_distance_list, + min_point_numbers=eval_cfg.filter_param.min_point_numbers, + confidence_threshold_list=eval_cfg.filter_param.confidence_threshold_list, + target_uuids=eval_cfg.filter_param.target_uuids, + ignore_attributes=eval_cfg.filter_param.ignore_attributes, + thresholds=eval_cfg.metrics_param.plane_distance_thresholds, + ) diff --git a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index f19bad5b..e05bd828 100644 --- a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -84,7 +84,7 @@ def evaluate( objects=critical_ground_truth_objects, is_gt=True, ego2map=self.ego2map, - **self.frame_config.filtering_params, + **self.frame_config.filter_param.as_dict(), ) self.tp_object_results, self.fp_object_results = self.__get_positive_object_results( object_results=object_results, diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 781a5673..6bddbd46 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -82,8 +82,8 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, + critical_ground_truth_objects=critical_ground_truth_objects, ) self.display(frame_result) diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index c59e9b52..63dcd874 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -123,8 +123,8 @@ def callback( unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, + critical_ground_truth_objects=critical_ground_truth_objects, ) self.visualize(frame_result) diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index ddeb5c2f..9398eb2c 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -129,8 +129,8 @@ def callback( unix_time=unix_time, ground_truth_now_frame=ground_truth_now_frame, estimated_objects=estimated_objects, - critical_ground_truth_objects=critical_ground_truth_objects, frame_config=frame_config, + critical_ground_truth_objects=critical_ground_truth_objects, ) self.visualize(frame_result) From c5803bee0958ce0011046c935ebc9c5ba995ea58 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Fri, 29 Dec 2023 13:10:35 +0900 Subject: [PATCH 9/9] feat: rename `thresholds` into `success_thresholds` Signed-off-by: ktro2828 --- .../perception/perception_frame_config.py | 20 ++++++++++++++----- .../perception/perception_pass_fail_result.py | 4 ++-- .../test/perception_fp_validation_lsim.py | 2 +- perception_eval/test/perception_lsim.py | 2 +- perception_eval/test/perception_lsim2d.py | 4 ++-- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/perception_eval/perception_eval/result/perception/perception_frame_config.py b/perception_eval/perception_eval/result/perception/perception_frame_config.py index ad229281..2e310d80 100644 --- a/perception_eval/perception_eval/result/perception/perception_frame_config.py +++ b/perception_eval/perception_eval/result/perception/perception_frame_config.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING from typing import Union +from perception_eval.common.evaluation_task import EvaluationTask from perception_eval.common.label import set_target_lists from perception_eval.common.threshold import check_thresholds from perception_eval.config.params import PerceptionFilterParam @@ -62,7 +63,7 @@ def __init__( confidence_threshold_list: Optional[List[float]] = None, target_uuids: Optional[List[str]] = None, ignore_attributes: Optional[List[str]] = None, - thresholds: Optional[List[float]] = None, + success_thresholds: Optional[List[float]] = None, ) -> None: """ Args: @@ -102,13 +103,22 @@ def __init__( ) num_elements: int = len(self.target_labels) - if thresholds is None: - self.thresholds = None + if success_thresholds is None: + self.success_thresholds = None else: - self.thresholds = check_thresholds(thresholds, num_elements) + self.success_thresholds = check_thresholds(success_thresholds, num_elements) @classmethod def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameConfig: + if eval_cfg.evaluation_task.is_3d(): + success_thresholds = eval_cfg.metrics_param.plane_distance_thresholds + else: + success_thresholds = ( + None + if eval_cfg.evaluation_task == EvaluationTask.CLASSIFICATION2D + else eval_cfg.metrics_param.iou_2d_thresholds + ) + return cls( evaluator_config=eval_cfg, target_labels=eval_cfg.target_labels, @@ -120,5 +130,5 @@ def from_eval_cfg(cls, eval_cfg: PerceptionEvaluationConfig) -> PerceptionFrameC confidence_threshold_list=eval_cfg.filter_param.confidence_threshold_list, target_uuids=eval_cfg.filter_param.target_uuids, ignore_attributes=eval_cfg.filter_param.ignore_attributes, - thresholds=eval_cfg.metrics_param.plane_distance_thresholds, + success_thresholds=success_thresholds, ) diff --git a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py index e05bd828..4a0201fd 100644 --- a/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py +++ b/perception_eval/perception_eval/result/perception/perception_pass_fail_result.py @@ -96,7 +96,7 @@ def evaluate( object_results, self.frame_config.target_labels, MatchingMode.IOU2D if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - self.frame_config.thresholds, + self.frame_config.success_thresholds, ) self.critical_ground_truth_objects = critical_ground_truth_objects @@ -151,7 +151,7 @@ def __get_positive_object_results( matching_mode=MatchingMode.IOU2D if self.frame_config.evaluation_task.is_2d() else MatchingMode.PLANEDISTANCE, - matching_threshold_list=self.frame_config.thresholds, + matching_threshold_list=self.frame_config.success_thresholds, ) # filter by critical_ground_truth_objects diff --git a/perception_eval/test/perception_fp_validation_lsim.py b/perception_eval/test/perception_fp_validation_lsim.py index 6bddbd46..1e0a7bd8 100644 --- a/perception_eval/test/perception_fp_validation_lsim.py +++ b/perception_eval/test/perception_fp_validation_lsim.py @@ -75,7 +75,7 @@ def callback(self, unix_time: int, estimated_objects: List[ObjectType]) -> None: target_labels=["car", "bicycle", "pedestrian", "motorbike"], max_x_position_list=[100.0, 100.0, 100.0, 100.0], max_y_position_list=[100.0, 100.0, 100.0, 100.0], - thresholds=[2.0, 2.0, 2.0, 2.0], + success_thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( diff --git a/perception_eval/test/perception_lsim.py b/perception_eval/test/perception_lsim.py index 63dcd874..2a987567 100644 --- a/perception_eval/test/perception_lsim.py +++ b/perception_eval/test/perception_lsim.py @@ -116,7 +116,7 @@ def callback( ignore_attributes=["cycle_state.without_rider"], max_x_position_list=[30.0, 30.0, 30.0, 30.0], max_y_position_list=[30.0, 30.0, 30.0, 30.0], - thresholds=[2.0, 2.0, 2.0, 2.0], + success_thresholds=[2.0, 2.0, 2.0, 2.0], ) frame_result = self.evaluator.add_frame_result( diff --git a/perception_eval/test/perception_lsim2d.py b/perception_eval/test/perception_lsim2d.py index 9398eb2c..48b4b5a8 100644 --- a/perception_eval/test/perception_lsim2d.py +++ b/perception_eval/test/perception_lsim2d.py @@ -115,14 +115,14 @@ def callback( else ["car", "bicycle", "pedestrian", "motorbike"] ) ignore_attributes = ["cycle_state.without_rider"] if self.label_prefix == "autoware" else None - thresholds = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] + success_thresholds = None if self.evaluation_task == "classification2d" else [0.5, 0.5, 0.5, 0.5] # 距離などでUC評価objectを選別するためのインターフェイス(PerceptionEvaluationManager初期化時にConfigを設定せず、関数受け渡しにすることで動的に変更可能なInterface) # どれを注目物体とするかのparam frame_config = PerceptionFrameConfig( evaluator_config=self.evaluator.config, target_labels=target_labels, ignore_attributes=ignore_attributes, - thresholds=thresholds, + success_thresholds=success_thresholds, ) frame_result = self.evaluator.add_frame_result(