diff --git a/.driving_log_replayer.cspell.json b/.driving_log_replayer.cspell.json index 43ce9a545..b47bfda09 100644 --- a/.driving_log_replayer.cspell.json +++ b/.driving_log_replayer.cspell.json @@ -22,6 +22,7 @@ "sideshift", "Koji", "Minoda", - "pyproject" + "pyproject", + "CENTERDISTANCE" ] } diff --git a/driving_log_replayer/driving_log_replayer/criteria/__init__.py b/driving_log_replayer/driving_log_replayer/criteria/__init__.py new file mode 100644 index 000000000..b5ff191fd --- /dev/null +++ b/driving_log_replayer/driving_log_replayer/criteria/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2023 TIER IV.inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .perception import CriteriaLevel +from .perception import CriteriaMethod +from .perception import PerceptionCriteria + +__all__ = ("CriteriaLevel", "CriteriaMethod", "PerceptionCriteria") diff --git a/driving_log_replayer/driving_log_replayer/criteria/perception.py b/driving_log_replayer/driving_log_replayer/criteria/perception.py new file mode 100644 index 000000000..05fb88f20 --- /dev/null +++ b/driving_log_replayer/driving_log_replayer/criteria/perception.py @@ -0,0 +1,325 @@ +# Copyright (c) 2023 TIER IV.inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from abc import ABC +from abc import abstractmethod +from enum import Enum +from numbers import Number + +from perception_eval.common.evaluation_task import EvaluationTask +from perception_eval.evaluation import PerceptionFrameResult +from perception_eval.evaluation.matching import MatchingMode +from typing_extensions import Self + + +class SuccessFail(Enum): + """Enum object represents evaluated result is success or fail.""" + + SUCCESS = "Success" + FAIL = "Fail" + + def __str__(self) -> str: + return self.value + + def is_success(self) -> bool: + """ + Return whether success or fail. + + Returns + ------- + bool: Success or fail. + """ + return self == SuccessFail.SUCCESS + + +class CriteriaLevel(Enum): + """ + Enum object represents criteria level. + + PERFECT == 100.0 + HARD >= 75.0 + NORMAL >= 50.0 + EASY >= 25.0 + + CUSTOM >= SCORE YOU SPECIFIED [0.0, 100.0] + """ + + PERFECT = 100.0 + HARD = 75.0 + NORMAL = 50.0 + EASY = 25.0 + + CUSTOM = None + + def is_valid(self, score: Number) -> bool: + """ + Return whether the score satisfied the level. + + Args: + ---- + score (Number): Calculated score. + + Returns: + ------- + bool: Whether the score satisfied the level. + """ + return score >= self.value + + @classmethod + def from_str(cls, value: str) -> Self: + """ + Construct instance from. + + Args: + ---- + value (str): _description_ + + Returns: + ------- + CriteriaLevel: _description_ + """ + name: str = value.upper() + assert name != "CUSTOM", "If you want to use custom level, use from_number." + assert name in cls.__members__, "value must be PERFECT, HARD, NORMAL, or EASY" + return cls.__members__[name] + + @classmethod + def from_number(cls, value: Number) -> Self: + """ + Construct `CriteriaLevel.CUSTOM` with custom value. + + Args: + ---- + value (Number): Level value which is must be [0.0, 100.0]. + + Returns: + ------- + CriteriaLevel: `CriteriaLevel.CUSTOM` with custom value. + """ + min_range = 0.0 + max_range = 100.0 + assert ( + min_range <= value <= max_range + ), f"Custom level must be [0.0, 100.0], but got {value}." + cls.CUSTOM._value_ = float(value) + return cls.CUSTOM + + +class CriteriaMethod(Enum): + """ + Enum object represents methods of criteria . + + - NUM_TP: Number of TP (or TN). + - METRICS_SCORE: Accuracy score for classification, otherwise mAP score is used. + """ + + NUM_TP = "num_tp" + METRICS_SCORE = "metrics_score" + + @classmethod + def from_str(cls, value: str) -> Self: + """ + Construct instance from name in string. + + Args: + ---- + value (str): Name of enum. + + Returns: + ------- + CriteriaMode: `CriteriaMode` instance. + """ + name: str = value.upper() + assert name in cls.__members__, "value must be NUM_TP or METRICS_SCORE" + return cls.__members__[name] + + +class CriteriaMethodImpl(ABC): + """ + Class to define implementation for each criteria. + + Args: + ---- + level (CriteriaLevel): Level of criteria. + """ + + def __init__(self, level: CriteriaLevel) -> None: + super().__init__() + self.level: CriteriaLevel = level + + def get_result(self, frame: PerceptionFrameResult) -> SuccessFail: + """ + Return `SuccessFail` instance from the frame result. + + Args: + ---- + frame (PerceptionFrameResult): Frame result. + + Returns: + ------- + SuccessFail: Success or fail. + """ + if self.has_objects(frame) is False: + return SuccessFail.FAIL + score: float = self.calculate_score(frame) + return SuccessFail.SUCCESS if self.level.is_valid(score) else SuccessFail.FAIL + + @staticmethod + def has_objects(frame: PerceptionFrameResult) -> bool: + """ + Return whether the frame result contains at least one objects. + + Args: + ---- + frame (PerceptionFrameResult): Frame result. + + Returns: + ------- + bool: Whether the frame result has objects is. + """ + num_success: int = frame.pass_fail_result.get_num_success() + num_fail: int = frame.pass_fail_result.get_num_fail() + return num_success + num_fail > 0 + + @staticmethod + @abstractmethod + def calculate_score(frame: PerceptionFrameResult) -> float: + """ + Calculate score depending on the method. + + Args: + ---- + frame (PerceptionFrameResult): Frame result. + + Returns: + ------- + float: Calculated score. + """ + + +class NumTP(CriteriaMethodImpl): + name = CriteriaMethod.NUM_TP + + def __init__(self, level: CriteriaLevel) -> None: + super().__init__(level) + + @staticmethod + def calculate_score(frame: PerceptionFrameResult) -> float: + num_success: int = frame.pass_fail_result.get_num_success() + num_objects: int = num_success + frame.pass_fail_result.get_num_fail() + return 100.0 * num_success / num_objects if num_objects != 0 else 0.0 + + +class MetricsScore(CriteriaMethodImpl): + name = CriteriaMethod.METRICS_SCORE + + def __init__(self, level: CriteriaLevel) -> None: + super().__init__(level) + + @staticmethod + def calculate_score(frame: PerceptionFrameResult) -> float: + if frame.metrics_score.evaluation_task == EvaluationTask.CLASSIFICATION2D: + scores = [ + acc.accuracy + for score in frame.metrics_score.classification_scores + for acc in score.accuracies + if acc.accuracy != float("inf") + ] + else: + scores = [ + map_.map + for map_ in frame.metrics_score.maps + if map_.map != float("inf") and map_.matching_mode == MatchingMode.CENTERDISTANCE + ] + + return 100.0 * sum(scores) / len(scores) if len(scores) != 0 else 0.0 + + +class PerceptionCriteria: + """ + Criteria interface for perception evaluation. + + Args: + ---- + method (str | CriteriaMethod | None): Criteria method instance or name. + If None, `CriteriaMethod.NUM_TP` is used. Defaults to None. + level (str | Number | CriteriaLevel | None): Criteria level instance or name. + If None, `CriteriaLevel.Easy` is used. Defaults to None. + """ + + def __init__( + self, + method: str | CriteriaMethod | None = None, + level: str | Number | CriteriaLevel | None = None, + ) -> None: + method = CriteriaMethod.NUM_TP if method is None else self.load_method(method) + level = CriteriaLevel.EASY if level is None else self.load_level(level) + + if method == CriteriaMethod.NUM_TP: + self.method = NumTP(level) + elif method == CriteriaMethod.METRICS_SCORE: + self.method = MetricsScore(level) + + @staticmethod + def load_method(method: str | CriteriaMethod) -> CriteriaMethod: + """ + Load `CriteriaMethod` enum. + + Args: + ---- + method (str | CriteriaMethod): Criteria method instance or name. + + Returns: + ------- + CriteriaMethod: Instance. + """ + if isinstance(method, str): + method: CriteriaMethod = CriteriaMethod.from_str(method) + assert isinstance(method, CriteriaMethod), f"Invalid type of method: {type(method)}" + return method + + @staticmethod + def load_level(level: str | Number | CriteriaLevel) -> CriteriaLevel: + """ + Load `CriteriaLevel`. + + Args: + ---- + level (str | Number | CriteriaLevel): Criteria level instance, name or value. + + Returns: + ------- + CriteriaLevel: Instance. + """ + if isinstance(level, str): + level = CriteriaLevel.from_str(level) + elif isinstance(level, Number): + level = CriteriaLevel.from_number(level) + assert isinstance(level, CriteriaLevel), f"Invalid type of level: {type(level)}" + return level + + def get_result(self, frame: PerceptionFrameResult) -> SuccessFail: + """ + Return Success/Fail result from `PerceptionFrameResult`. + + Args: + ---- + frame (PerceptionFrameResult): Frame result of perception evaluation. + + Returns: + ------- + SuccessFail: Success/Fail result. + """ + return self.method.get_result(frame) diff --git a/driving_log_replayer/scripts/perception_2d_evaluator_node.py b/driving_log_replayer/scripts/perception_2d_evaluator_node.py index c17669727..cb55d4b55 100755 --- a/driving_log_replayer/scripts/perception_2d_evaluator_node.py +++ b/driving_log_replayer/scripts/perception_2d_evaluator_node.py @@ -31,6 +31,7 @@ from tier4_perception_msgs.msg import DetectedObjectsWithFeature from tier4_perception_msgs.msg import DetectedObjectWithFeature +from driving_log_replayer.criteria import PerceptionCriteria from driving_log_replayer.evaluator import DLREvaluator from driving_log_replayer.evaluator import evaluator_main import driving_log_replayer.perception_eval_conversions as eval_conversions @@ -52,6 +53,11 @@ def __init__(self, condition: dict) -> None: self.__result[camera_type] = True self.__msg[camera_type] = "NotTested" + self.__criteria = PerceptionCriteria( + method=condition.get("CriteriaMethod"), + level=condition.get("CriteriaLevel"), + ) + def update(self) -> None: summary_str = "" for camera_type, eval_msg in self.__msg.items(): @@ -72,21 +78,11 @@ def set_frame( camera_type: str, ) -> None: self.__total[camera_type] += 1 - has_objects = True - if ( - frame.pass_fail_result.tp_object_results == [] - and frame.pass_fail_result.fp_object_results == [] - and frame.pass_fail_result.fn_objects == [] - ): - has_objects = False - - success = ( - "Success" - if frame.pass_fail_result.get_fail_object_num() == 0 and has_objects - else "Fail" - ) - if success == "Success": + result = self.__criteria.get_result(frame) + + if result.is_success(): self.__success[camera_type] += 1 + test_rate = self.__success[camera_type] / self.__total[camera_type] * 100.0 self.__result[camera_type] = test_rate >= self.__pass_rate self.__msg[ @@ -100,7 +96,7 @@ def set_frame( "FrameSkip": skip, } out_frame["PassFail"] = { - "Result": success, + "Result": str(result), "Info": [ { "TP": len(frame.pass_fail_result.tp_object_results), diff --git a/driving_log_replayer/scripts/perception_evaluator_node.py b/driving_log_replayer/scripts/perception_evaluator_node.py index ed300f6f6..799c86903 100755 --- a/driving_log_replayer/scripts/perception_evaluator_node.py +++ b/driving_log_replayer/scripts/perception_evaluator_node.py @@ -41,6 +41,7 @@ from std_msgs.msg import Header from visualization_msgs.msg import MarkerArray +from driving_log_replayer.criteria import PerceptionCriteria from driving_log_replayer.evaluator import DLREvaluator from driving_log_replayer.evaluator import evaluator_main import driving_log_replayer.perception_eval_conversions as eval_conversions @@ -54,6 +55,11 @@ def __init__(self, condition: dict) -> None: self.__success = 0 self.__total = 0 + self.__criteria = PerceptionCriteria( + method=condition.get("CriteriaMethod"), + level=condition.get("CriteriaLevel"), + ) + def update(self) -> None: test_rate = 0.0 if self.__total == 0 else self.__success / self.__total * 100.0 success = test_rate >= self.__pass_rate @@ -74,28 +80,18 @@ def set_frame( map_to_baselink: dict, ) -> tuple[MarkerArray, MarkerArray]: self.__total += 1 - has_objects = True - if ( - frame.pass_fail_result.tp_object_results == [] - and frame.pass_fail_result.fp_object_results == [] - and frame.pass_fail_result.fn_objects == [] - ): - has_objects = False - - success = ( - "Success" - if frame.pass_fail_result.get_fail_object_num() == 0 and has_objects - else "Fail" - ) - if success == "Success": + result = self.__criteria.get_result(frame) + + if result.is_success(): self.__success += 1 + out_frame = { "Ego": {"TransformStamped": map_to_baselink}, "FrameName": frame.frame_name, "FrameSkip": skip, } out_frame["PassFail"] = { - "Result": success, + "Result": str(result), "Info": [ { "TP": len(frame.pass_fail_result.tp_object_results), diff --git a/driving_log_replayer/scripts/traffic_light_evaluator_node.py b/driving_log_replayer/scripts/traffic_light_evaluator_node.py index a2182bee6..a422c568f 100755 --- a/driving_log_replayer/scripts/traffic_light_evaluator_node.py +++ b/driving_log_replayer/scripts/traffic_light_evaluator_node.py @@ -31,6 +31,7 @@ import rclpy from std_msgs.msg import Header +from driving_log_replayer.criteria import PerceptionCriteria from driving_log_replayer.evaluator import DLREvaluator from driving_log_replayer.evaluator import evaluator_main import driving_log_replayer.perception_eval_conversions as eval_conversions @@ -44,6 +45,11 @@ def __init__(self, condition: dict) -> None: self.__success = 0 self.__total = 0 + self.__criteria = PerceptionCriteria( + method=condition.get("CriteriaMethod"), + level=condition.get("CriteriaLevel"), + ) + def update(self) -> None: test_rate = 0.0 if self.__total == 0 else self.__success / self.__total * 100.0 success = test_rate >= self.__pass_rate @@ -64,28 +70,18 @@ def set_frame( map_to_baselink: dict, ) -> None: self.__total += 1 - has_objects = True - if ( - frame.pass_fail_result.tp_object_results == [] - and frame.pass_fail_result.fp_object_results == [] - and frame.pass_fail_result.fn_objects == [] - ): - has_objects = False - - success = ( - "Success" - if frame.pass_fail_result.get_fail_object_num() == 0 and has_objects - else "Fail" - ) - if success == "Success": + result = self.__criteria.get_result(frame) + + if result.is_success(): self.__success += 1 + out_frame = { "Ego": {"TransformStamped": map_to_baselink}, "FrameName": frame.frame_name, "FrameSkip": skip, } out_frame["PassFail"] = { - "Result": success, + "Result": str(result), "Info": [ { "TP": len(frame.pass_fail_result.tp_object_results), diff --git a/driving_log_replayer/test/unittest/criteria/test_perception.py b/driving_log_replayer/test/unittest/criteria/test_perception.py new file mode 100644 index 000000000..a37ff1d71 --- /dev/null +++ b/driving_log_replayer/test/unittest/criteria/test_perception.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023 TIER IV.inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from driving_log_replayer.criteria.perception import CriteriaLevel + + +def test_criteria_level_from_str_ok() -> None: + criteria_level = CriteriaLevel.from_str("perfect") + assert criteria_level == CriteriaLevel.PERFECT + + +def test_criteria_level_from_str_ng() -> None: + with pytest.raises(AssertionError): + CriteriaLevel.from_str("not_criteria_level_type") + + +def test_criteria_level_from_number_ok() -> None: + allowed_value = 50.0 + criteria_level = CriteriaLevel.from_number(allowed_value) + assert criteria_level == CriteriaLevel.CUSTOM + assert criteria_level.value == allowed_value + + +def test_criteria_level_from_number_ng() -> None: + with pytest.raises(AssertionError): + CriteriaLevel.from_number(110.0) diff --git a/sample/perception/scenario.fp.ja.yaml b/sample/perception/scenario.fp.ja.yaml index c699f8631..4baea2a8b 100644 --- a/sample/perception/scenario.fp.ja.yaml +++ b/sample/perception/scenario.fp.ja.yaml @@ -5,7 +5,7 @@ SensorModel: sample_sensor_kit VehicleModel: sample_vehicle Evaluation: UseCaseName: perception - UseCaseFormatVersion: 0.5.0 + UseCaseFormatVersion: 0.6.0 Datasets: - sample_dataset: VehicleId: default # データセット毎にVehicleIdを指定する @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/autoware_map/sample-map-planning # データセット毎にLocalMapPathを指定する Conditions: PassRate: 99.0 # 評価試行回数の内、どの程度(%)評価成功だったら成功とするか + CriteriaMethod: num_tp # クライテリアメソッド名(num_tp/metrics_score) + CriteriaLevel: easy # クライテリアレベル(perfect/hard/normal/easy、もしくはカスタム値[0.0, 100.0]) PerceptionEvaluationConfig: evaluation_config_dict: evaluation_task: fp_validation # detection/tracking/fp_validation ここで指定したobjectsを評価する diff --git a/sample/perception/scenario.fp.yaml b/sample/perception/scenario.fp.yaml index bdae63b49..07e82d6c1 100644 --- a/sample/perception/scenario.fp.yaml +++ b/sample/perception/scenario.fp.yaml @@ -5,7 +5,7 @@ SensorModel: sample_sensor_kit VehicleModel: sample_vehicle Evaluation: UseCaseName: perception - UseCaseFormatVersion: 0.5.0 + UseCaseFormatVersion: 0.6.0 Datasets: - sample_dataset: VehicleId: default # Specify VehicleId for each data set. @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/autoware_map/sample-map-planning # Specify LocalMapPath for each data set. Conditions: PassRate: 99.0 # How much (%) of the evaluation attempts are considered successful. + CriteriaMethod: num_tp # Method name of criteria (num_tp/metrics_score) + CriteriaLevel: easy # Level of criteria (perfect/hard/normal/easy, or custom value [0.0, 100.0]) PerceptionEvaluationConfig: evaluation_config_dict: evaluation_task: fp_validation # detection, tracking, or fp_validation. Evaluate the objects specified here diff --git a/sample/perception/scenario.ja.yaml b/sample/perception/scenario.ja.yaml index 4ffa409f7..2e40529f5 100644 --- a/sample/perception/scenario.ja.yaml +++ b/sample/perception/scenario.ja.yaml @@ -5,7 +5,7 @@ SensorModel: sample_sensor_kit VehicleModel: sample_vehicle Evaluation: UseCaseName: perception - UseCaseFormatVersion: 0.5.0 + UseCaseFormatVersion: 0.6.0 Datasets: - sample_dataset: VehicleId: default # データセット毎にVehicleIdを指定する @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/autoware_map/sample-map-planning # データセット毎にLocalMapPathを指定する Conditions: PassRate: 99.0 # 評価試行回数の内、どの程度(%)評価成功だったら成功とするか + CriteriaMethod: num_tp # クライテリアメソッド名(num_tp/metrics_score) + CriteriaLevel: easy # クライテリアレベル(perfect/hard/normal/easy、もしくはカスタム値[0.0, 100.0]) PerceptionEvaluationConfig: evaluation_config_dict: evaluation_task: detection # detection/tracking ここで指定したobjectsを評価する diff --git a/sample/perception/scenario.yaml b/sample/perception/scenario.yaml index 97b961f06..cecfd917f 100644 --- a/sample/perception/scenario.yaml +++ b/sample/perception/scenario.yaml @@ -5,7 +5,7 @@ SensorModel: sample_sensor_kit VehicleModel: sample_vehicle Evaluation: UseCaseName: perception - UseCaseFormatVersion: 0.5.0 + UseCaseFormatVersion: 0.6.0 Datasets: - sample_dataset: VehicleId: default # Specify VehicleId for each data set. @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/autoware_map/sample-map-planning # Specify LocalMapPath for each data set. Conditions: PassRate: 99.0 # How much (%) of the evaluation attempts are considered successful. + CriteriaMethod: num_tp # Method name of criteria (num_tp/metrics_score) + CriteriaLevel: easy # Level of criteria (perfect/hard/normal/easy, or custom value [0.0, 100.0]) PerceptionEvaluationConfig: evaluation_config_dict: evaluation_task: detection # detection or tracking. Evaluate the objects specified here diff --git a/sample/perception_2d/scenario.ja.yaml b/sample/perception_2d/scenario.ja.yaml index 2bd9dd85b..b2b912cc3 100644 --- a/sample/perception_2d/scenario.ja.yaml +++ b/sample/perception_2d/scenario.ja.yaml @@ -5,7 +5,7 @@ SensorModel: aip_x2 VehicleModel: gsm8 Evaluation: UseCaseName: perception_2d - UseCaseFormatVersion: 0.2.0 + UseCaseFormatVersion: 0.3.0 Datasets: - f72e1065-7c38-40fe-a4e2-c5bbe6ff6443: VehicleId: ps1/20210620/CAL_000015 # データセット毎にVehicleIdを指定する @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/map/perception # データセット毎にLocalMapPathを指定する Conditions: PassRate: 99.0 # 評価試行回数の内、どの程度(%)評価成功だったら成功とするか + CriteriaMethod: num_tp # クライテリアメソッド名(num_tp/metrics_score) + CriteriaLevel: easy # クライテリアレベル(perfect/hard/normal/easy、もしくはカスタム値[0.0, 100.0]) TargetCameras: # 評価対象のカメラの種類をキーに、カメラの番号を値として記述する cam_front: 0 cam_front_right: 1 diff --git a/sample/perception_2d/scenario.yaml b/sample/perception_2d/scenario.yaml index 9c9d5d7f8..94e65ad08 100644 --- a/sample/perception_2d/scenario.yaml +++ b/sample/perception_2d/scenario.yaml @@ -5,7 +5,7 @@ SensorModel: aip_x2 VehicleModel: gsm8 Evaluation: UseCaseName: perception_2d - UseCaseFormatVersion: 0.2.0 + UseCaseFormatVersion: 0.3.0 Datasets: - f72e1065-7c38-40fe-a4e2-c5bbe6ff6443: VehicleId: ps1/20210620/CAL_000015 # Specify VehicleId for each data set. @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/map/perception # Specify LocalMapPath for each data set. Conditions: PassRate: 99.0 # How much (%) of the evaluation attempts are considered successful. + CriteriaMethod: num_tp # Method name of criteria (num_tp/metrics_score) + CriteriaLevel: easy # Level of criteria (perfect/hard/normal/easy, or custom value [0.0, 100.0]) TargetCameras: # Describes the type of camera to be evaluated as a key and the camera number as a value. cam_front: 0 cam_front_right: 1 diff --git a/sample/traffic_light/scenario.ja.yaml b/sample/traffic_light/scenario.ja.yaml index cb09ebb09..d9c046c19 100644 --- a/sample/traffic_light/scenario.ja.yaml +++ b/sample/traffic_light/scenario.ja.yaml @@ -5,7 +5,7 @@ SensorModel: aip_xx1 VehicleModel: jpntaxi Evaluation: UseCaseName: traffic_light - UseCaseFormatVersion: 0.2.0 + UseCaseFormatVersion: 0.3.0 Datasets: - sample: VehicleId: "7" # データセット毎にVehicleIdを指定する @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/map/traffic_light_xx1 # データセット毎にLocalMapPathを指定する Conditions: PassRate: 99.0 # 評価試行回数の内、どの程度(%)評価成功だったら成功とするか + CriteriaMethod: num_tp # クライテリアメソッド名(num_tp/metrics_score) + CriteriaLevel: easy # クライテリアレベル(perfect/hard/normal/easy、もしくはカスタム値[0.0, 100.0]) PerceptionEvaluationConfig: camera_type: cam_traffic_light_near # 信号認識のカメラタイプを指定する evaluation_config_dict: diff --git a/sample/traffic_light/scenario.yaml b/sample/traffic_light/scenario.yaml index 777a18bda..220086297 100644 --- a/sample/traffic_light/scenario.yaml +++ b/sample/traffic_light/scenario.yaml @@ -5,7 +5,7 @@ SensorModel: aip_xx1 VehicleModel: jpntaxi Evaluation: UseCaseName: traffic_light - UseCaseFormatVersion: 0.2.0 + UseCaseFormatVersion: 0.3.0 Datasets: - sample: VehicleId: "7" # Specify VehicleId for each data set. @@ -13,6 +13,8 @@ Evaluation: LocalMapPath: $HOME/map/traffic_light_xx1 # Specify LocalMapPath for each data set. Conditions: PassRate: 99.0 # How much (%) of the evaluation attempts are considered successful. + CriteriaMethod: num_tp # Method name of criteria (num_tp/metrics_score) + CriteriaLevel: easy # Level of criteria (perfect/hard/normal/easy, or custom value [0.0, 100.0]) PerceptionEvaluationConfig: camera_type: cam_traffic_light_near # Specifies the type of camera for traffic_light. evaluation_config_dict: