From cc6449c8656dc5e9bf525c636ede4b5d61a1d3b9 Mon Sep 17 00:00:00 2001 From: Boyang Date: Fri, 31 May 2024 17:23:47 -0400 Subject: [PATCH 1/7] Lidar_centerpoint OpenAD kit modified --- perception/lidar_centerpoint/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/perception/lidar_centerpoint/README.md b/perception/lidar_centerpoint/README.md index 09cee5266760c..e39eda1f41506 100644 --- a/perception/lidar_centerpoint/README.md +++ b/perception/lidar_centerpoint/README.md @@ -27,6 +27,9 @@ We trained the models using . | `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) | ## Parameters +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json") }} +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json") }} +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint.schema.json") }} ### Core Parameters From ae6841c7b988ae74b0a159d2f423b14b15eb398c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 21:37:10 +0000 Subject: [PATCH 2/7] style(pre-commit): autofix --- perception/lidar_centerpoint/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/perception/lidar_centerpoint/README.md b/perception/lidar_centerpoint/README.md index e39eda1f41506..9095ce55cba4f 100644 --- a/perception/lidar_centerpoint/README.md +++ b/perception/lidar_centerpoint/README.md @@ -27,6 +27,7 @@ We trained the models using . | `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) | ## Parameters + {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json") }} {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json") }} {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint.schema.json") }} From ea2cc787a3b403f6afadc935b11e304fcf25f484 Mon Sep 17 00:00:00 2001 From: Boyang Date: Tue, 4 Jun 2024 14:30:28 -0400 Subject: [PATCH 3/7] Updated schema and readme --- perception/lidar_centerpoint/README.md | 24 +--- ...t.schemal.json => centerpoint.schema.json} | 27 +++- .../schema/centerpoint_ml_package.schema.json | 12 +- .../schema/centerpoint_sigma_ml_package.json | 79 ++++++++++++ .../schema/centerpoint_tiny.schema.json | 120 ++++++++++++++++++ .../centerpoint_tiny_ml_package.schema.json | 10 ++ .../detection_class_remapper.schema.json | 47 +++++++ 7 files changed, 293 insertions(+), 26 deletions(-) rename perception/lidar_centerpoint/schema/{centerpoint.schemal.json => centerpoint.schema.json} (70%) create mode 100644 perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json create mode 100644 perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json create mode 100644 perception/lidar_centerpoint/schema/detection_class_remapper.schema.json diff --git a/perception/lidar_centerpoint/README.md b/perception/lidar_centerpoint/README.md index e39eda1f41506..1744e68a4d675 100644 --- a/perception/lidar_centerpoint/README.md +++ b/perception/lidar_centerpoint/README.md @@ -27,28 +27,14 @@ We trained the models using . | `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) | ## Parameters + +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint.schema.json") }} {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json") }} +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json") }} {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json") }} -{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint.schema.json") }} +{{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json") }} +{{ json_to_markdown("perception/lidar_centerpoint/schema/detection_class_remapper.schema.json") }} -### Core Parameters - -| Name | Type | Default Value | Description | -| ------------------------------------------------ | ------------ | ------------------------- | ------------------------------------------------------------- | -| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file | -| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file | -| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file | -| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file | -| `build_only` | bool | `false` | shutdown the node after TensorRT engine file is built | -| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` | -| `post_process_params.score_threshold` | double | `0.4` | detected objects with score less than threshold are ignored | -| `post_process_params.yaw_norm_thresholds` | list[double] | [0.3, 0.3, 0.3, 0.3, 0.0] | An array of distance threshold values of norm of yaw [rad]. | -| `post_process_params.iou_nms_target_class_names` | list[string] | - | target classes for IoU-based Non Maximum Suppression | -| `post_process_params.iou_nms_search_distance_2d` | double | - | If two objects are farther than the value, NMS isn't applied. | -| `post_process_params.iou_nms_threshold` | double | - | IoU threshold for the IoU-based Non Maximum Suppression | -| `post_process_params.has_twist` | boolean | false | Indicates whether the model outputs twist value. | -| `densification_params.world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud | -| `densification_params.num_past_frames` | int | `1` | the number of past frames to fuse with the current frame | ### The `build_only` option diff --git a/perception/lidar_centerpoint/schema/centerpoint.schemal.json b/perception/lidar_centerpoint/schema/centerpoint.schema.json similarity index 70% rename from perception/lidar_centerpoint/schema/centerpoint.schemal.json rename to perception/lidar_centerpoint/schema/centerpoint.schema.json index b11c115cc2466..137c810b95345 100644 --- a/perception/lidar_centerpoint/schema/centerpoint.schemal.json +++ b/perception/lidar_centerpoint/schema/centerpoint.schema.json @@ -12,6 +12,26 @@ "default": "fp16", "enum": ["fp32", "fp16"] }, + "encoder_onnx_path": { + "type": "string", + "description": "A path to the ONNX file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.onnx" + }, + "encoder_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.engine" + }, + "head_onnx_path": { + "type": "string", + "description":"A path to the ONNX file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.onnx" + }, + "head_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.engine" + }, "post_process_params": { "type": "object", "properties": { @@ -31,7 +51,7 @@ }, "circle_nms_dist_threshold": { "type": "number", - "description": "", + "description": "It specifies the distance threshold for performing Circle NMS. Detection boxes within this distance that overlap or are close to each other will be suppressed, keeping only the one with the highest score.", "default": 0.5, "minimum": 0.0, "maximum": 1.0 @@ -54,11 +74,6 @@ "default": 0.1, "minimum": 0.0, "maximum": 1.0 - }, - "has_twist": { - "type": "boolean", - "description": "Indicates whether the model outputs twist value.", - "default": false } } }, diff --git a/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json index 2c5655c6201ed..00fbf36198905 100644 --- a/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json +++ b/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json @@ -46,6 +46,16 @@ "type": "integer", "description": "A size of encoder input feature channels.", "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false } } } @@ -58,7 +68,7 @@ "type": "object", "properties": { "ros__parameters": { - "$ref": "#/definitions/centerpoint_ml_package" + "$ref": "#/definitions/centerpoint_tiny_ml_package" } }, "required": ["ros__parameters"] diff --git a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json new file mode 100644 index 0000000000000..de03014d0f19c --- /dev/null +++ b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json @@ -0,0 +1,79 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Centerpoint Sigma ML model", + "type": "object", + "definitions": { + "centerpoint_sigma_ml_package": { + "type": "object", + "properties": { + "model_params": { + "type": "object", + "description": "Parameters for model configuration.", + "properties": { + "class_names": { + "type": "array", + "description": "An array of class names will be predicted.", + "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], + "uniqueItems": true + }, + "point_feature_size": { + "type": "integer", + "description": "A number of channels of point feature layer.", + "default": 4 + }, + "max_voxel_size": { + "type": "integer", + "description": "A maximum size of voxel grid.", + "default": 40000 + }, + "point_cloud_range": { + "type": "array", + "description": "An array of distance ranges of each class, this must have same length with `class_names`.", + "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] + }, + "voxel_size": { + "type": "array", + "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", + "default": [0.32, 0.32, 10.0] + }, + "down_sample_factor": { + "type": "integer", + "description": "A scale factor of downsampling points", + "default": 1, + "minimum": 1 + }, + "encoder_in_feature_size": { + "type": "integer", + "description": "A size of encoder input feature channels.", + "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false + } + } + } + }, + "required": ["model_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny_ml_package" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] + } + \ No newline at end of file diff --git a/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json b/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json new file mode 100644 index 0000000000000..dd2cac12f0881 --- /dev/null +++ b/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json @@ -0,0 +1,120 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for CenterPoint Tiny Model", + "type": "object", + "definitions": { + "centerpoint_tiny": { + "type": "object", + "properties": { + "trt_precision": { + "type": "string", + "description": "TensorRT inference precision.", + "default": "fp16", + "enum": ["fp32", "fp16"] + }, + "encoder_onnx_path": { + "type": "string", + "description": "A path to the ONNX file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.onnx" + }, + "encoder_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.engine" + }, + "head_onnx_path": { + "type": "string", + "description":"A path to the ONNX file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.onnx" + }, + "head_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.engine" + }, + "post_process_params": { + "type": "object", + "properties": { + "score_threshold": { + "type": "number", + "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored.", + "default": 0.35, + "minimum": 0.0, + "maximum": 1.0 + }, + "yaw_norm_thresholds": { + "type": "array", + "description": "An array of distance threshold values of norm of yaw [rad].", + "default": [0.3, 0.3, 0.3, 0.3, 0.0], + "items": { + "type": "number", + "minimum": 0.0, + "maximum": 1.0 + } + }, + "circle_nms_dist_threshold": { + "type": "number", + "description": "It specifies the distance threshold for performing Circle NMS. Detection boxes within this distance that overlap or are close to each other will be suppressed, keeping only the one with the highest score.", + "default": 0.5, + "minimum": 0.0, + "maximum": 1.0 + }, + "iou_nms_target_class_names": { + "type": "array", + "description": "An array of class names to be target in NMS.", + "default": ["CAR"], + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "iou_nms_search_distance_2d": { + "type": "number", + "description": "A maximum distance value to search the nearest objects.", + "default": 10.0, + "minimum": 0.0 + }, + "iou_nms_threshold": { + "type": "number", + "description": "A threshold value of NMS using IoU score.", + "default": 0.1, + "minimum": 0.0, + "maximum": 1.0 + } + } + }, + "densification_params": { + "type": "object", + "description": "Parameters for pointcloud densification.", + "properties": { + "world_frame_id": { + "type": "string", + "description": "A name of frame id where world coordinates system is defined with respect to.", + "default": "map" + }, + "num_past_frames": { + "type": "integer", + "description": "A number of past frames to be considered as same input frame.", + "default": 1, + "minimum": 0 + } + } + } + }, + "required": ["post_process_params", "densification_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] + } + \ No newline at end of file diff --git a/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json index 55329e21ea8ab..e6af9afe21b18 100644 --- a/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json +++ b/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json @@ -46,6 +46,16 @@ "type": "integer", "description": "A size of encoder input feature channels.", "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false } } } diff --git a/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json b/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json new file mode 100644 index 0000000000000..3f50a9b8f55d1 --- /dev/null +++ b/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Detection Class Remapper Node", + "type": "object", + "definitions": { + "detection_class_remapper": { + "type": "object", + "properties": { + "allow_remapping_by_area_matrix": { + "type": "array", + "description": "A matrix that defines how to remap original classes to new classes based on area.", + "items": { + "type": "integer" + } + }, + "min_area_matrix": { + "type": "array", + "description": "A matrix that defines the minimum area thresholds for each class.", + "items": { + "type": "number" + } + }, + "max_area_matrix": { + "type": "array", + "description": "A matrix that defines the maximum area thresholds for each class.", + "items": { + "type": "number" + } + } + }, + "required": ["allow_remapping_by_area_matrix", "min_area_matrix", "max_area_matrix"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/detection_class_remapper" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] + } + \ No newline at end of file From 31161554e965c12a7c921870dca8084ac03f37a3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Jun 2024 18:46:44 +0000 Subject: [PATCH 4/7] style(pre-commit): autofix --- perception/lidar_centerpoint/README.md | 1 - .../schema/centerpoint.schema.json | 2 +- .../schema/centerpoint_sigma_ml_package.json | 151 ++++++------ .../schema/centerpoint_tiny.schema.json | 219 +++++++++--------- .../detection_class_remapper.schema.json | 83 ++++--- 5 files changed, 226 insertions(+), 230 deletions(-) diff --git a/perception/lidar_centerpoint/README.md b/perception/lidar_centerpoint/README.md index 1744e68a4d675..6a50851f080fd 100644 --- a/perception/lidar_centerpoint/README.md +++ b/perception/lidar_centerpoint/README.md @@ -35,7 +35,6 @@ We trained the models using . {{ json_to_markdown("perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json") }} {{ json_to_markdown("perception/lidar_centerpoint/schema/detection_class_remapper.schema.json") }} - ### The `build_only` option The `lidar_centerpoint` node has `build_only` option to build the TensorRT engine file from the ONNX file. diff --git a/perception/lidar_centerpoint/schema/centerpoint.schema.json b/perception/lidar_centerpoint/schema/centerpoint.schema.json index 137c810b95345..609cb8390867d 100644 --- a/perception/lidar_centerpoint/schema/centerpoint.schema.json +++ b/perception/lidar_centerpoint/schema/centerpoint.schema.json @@ -24,7 +24,7 @@ }, "head_onnx_path": { "type": "string", - "description":"A path to the ONNX file of the head network.", + "description": "A path to the ONNX file of the head network.", "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.onnx" }, "head_engine_path": { diff --git a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json index de03014d0f19c..6f9fb1f647cce 100644 --- a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json +++ b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json @@ -1,79 +1,78 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Parameters for Centerpoint Sigma ML model", - "type": "object", - "definitions": { - "centerpoint_sigma_ml_package": { - "type": "object", - "properties": { - "model_params": { - "type": "object", - "description": "Parameters for model configuration.", - "properties": { - "class_names": { - "type": "array", - "description": "An array of class names will be predicted.", - "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], - "uniqueItems": true - }, - "point_feature_size": { - "type": "integer", - "description": "A number of channels of point feature layer.", - "default": 4 - }, - "max_voxel_size": { - "type": "integer", - "description": "A maximum size of voxel grid.", - "default": 40000 - }, - "point_cloud_range": { - "type": "array", - "description": "An array of distance ranges of each class, this must have same length with `class_names`.", - "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] - }, - "voxel_size": { - "type": "array", - "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", - "default": [0.32, 0.32, 10.0] - }, - "down_sample_factor": { - "type": "integer", - "description": "A scale factor of downsampling points", - "default": 1, - "minimum": 1 - }, - "encoder_in_feature_size": { - "type": "integer", - "description": "A size of encoder input feature channels.", - "default": 9 - }, - "has_variance": { - "type": "boolean", - "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", - "default": false - }, - "has_twist": { - "type": "boolean", - "description": "Indicates whether the model outputs twist value.", - "default": false - } + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Centerpoint Sigma ML model", + "type": "object", + "definitions": { + "centerpoint_sigma_ml_package": { + "type": "object", + "properties": { + "model_params": { + "type": "object", + "description": "Parameters for model configuration.", + "properties": { + "class_names": { + "type": "array", + "description": "An array of class names will be predicted.", + "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], + "uniqueItems": true + }, + "point_feature_size": { + "type": "integer", + "description": "A number of channels of point feature layer.", + "default": 4 + }, + "max_voxel_size": { + "type": "integer", + "description": "A maximum size of voxel grid.", + "default": 40000 + }, + "point_cloud_range": { + "type": "array", + "description": "An array of distance ranges of each class, this must have same length with `class_names`.", + "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] + }, + "voxel_size": { + "type": "array", + "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", + "default": [0.32, 0.32, 10.0] + }, + "down_sample_factor": { + "type": "integer", + "description": "A scale factor of downsampling points", + "default": 1, + "minimum": 1 + }, + "encoder_in_feature_size": { + "type": "integer", + "description": "A size of encoder input feature channels.", + "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false } } - }, - "required": ["model_params"] - } - }, - "properties": { - "/**": { - "type": "object", - "properties": { - "ros__parameters": { - "$ref": "#/definitions/centerpoint_tiny_ml_package" - } - }, - "required": ["ros__parameters"] - } - }, - "required": ["/**"] - } - \ No newline at end of file + } + }, + "required": ["model_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny_ml_package" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] +} diff --git a/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json b/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json index dd2cac12f0881..a3ab294c5795b 100644 --- a/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json +++ b/perception/lidar_centerpoint/schema/centerpoint_tiny.schema.json @@ -1,120 +1,119 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Parameters for CenterPoint Tiny Model", - "type": "object", - "definitions": { - "centerpoint_tiny": { - "type": "object", - "properties": { - "trt_precision": { - "type": "string", - "description": "TensorRT inference precision.", - "default": "fp16", - "enum": ["fp32", "fp16"] - }, - "encoder_onnx_path": { - "type": "string", - "description": "A path to the ONNX file of the encoder network.", - "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.onnx" - }, - "encoder_engine_path": { - "type": "string", - "description": "A path to the TensorRT engine file of the encoder network.", - "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.engine" - }, - "head_onnx_path": { - "type": "string", - "description":"A path to the ONNX file of the head network.", - "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.onnx" - }, - "head_engine_path": { - "type": "string", - "description": "A path to the TensorRT engine file of the head network.", - "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.engine" - }, - "post_process_params": { - "type": "object", - "properties": { - "score_threshold": { - "type": "number", - "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored.", - "default": 0.35, - "minimum": 0.0, - "maximum": 1.0 - }, - "yaw_norm_thresholds": { - "type": "array", - "description": "An array of distance threshold values of norm of yaw [rad].", - "default": [0.3, 0.3, 0.3, 0.3, 0.0], - "items": { - "type": "number", - "minimum": 0.0, - "maximum": 1.0 - } - }, - "circle_nms_dist_threshold": { - "type": "number", - "description": "It specifies the distance threshold for performing Circle NMS. Detection boxes within this distance that overlap or are close to each other will be suppressed, keeping only the one with the highest score.", - "default": 0.5, - "minimum": 0.0, - "maximum": 1.0 - }, - "iou_nms_target_class_names": { - "type": "array", - "description": "An array of class names to be target in NMS.", - "default": ["CAR"], - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "iou_nms_search_distance_2d": { - "type": "number", - "description": "A maximum distance value to search the nearest objects.", - "default": 10.0, - "minimum": 0.0 - }, - "iou_nms_threshold": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for CenterPoint Tiny Model", + "type": "object", + "definitions": { + "centerpoint_tiny": { + "type": "object", + "properties": { + "trt_precision": { + "type": "string", + "description": "TensorRT inference precision.", + "default": "fp16", + "enum": ["fp32", "fp16"] + }, + "encoder_onnx_path": { + "type": "string", + "description": "A path to the ONNX file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.onnx" + }, + "encoder_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the encoder network.", + "default": "~/autoware_data/lidar_centerpoint/pts_voxel_encoder_centerpoint_tiny.engine" + }, + "head_onnx_path": { + "type": "string", + "description": "A path to the ONNX file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.onnx" + }, + "head_engine_path": { + "type": "string", + "description": "A path to the TensorRT engine file of the head network.", + "default": "~/autoware_data/lidar_centerpoint/pts_backbone_neck_head_centerpoint_tiny.engine" + }, + "post_process_params": { + "type": "object", + "properties": { + "score_threshold": { + "type": "number", + "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored.", + "default": 0.35, + "minimum": 0.0, + "maximum": 1.0 + }, + "yaw_norm_thresholds": { + "type": "array", + "description": "An array of distance threshold values of norm of yaw [rad].", + "default": [0.3, 0.3, 0.3, 0.3, 0.0], + "items": { "type": "number", - "description": "A threshold value of NMS using IoU score.", - "default": 0.1, "minimum": 0.0, "maximum": 1.0 } - } - }, - "densification_params": { - "type": "object", - "description": "Parameters for pointcloud densification.", - "properties": { - "world_frame_id": { - "type": "string", - "description": "A name of frame id where world coordinates system is defined with respect to.", - "default": "map" + }, + "circle_nms_dist_threshold": { + "type": "number", + "description": "It specifies the distance threshold for performing Circle NMS. Detection boxes within this distance that overlap or are close to each other will be suppressed, keeping only the one with the highest score.", + "default": 0.5, + "minimum": 0.0, + "maximum": 1.0 + }, + "iou_nms_target_class_names": { + "type": "array", + "description": "An array of class names to be target in NMS.", + "default": ["CAR"], + "items": { + "type": "string" }, - "num_past_frames": { - "type": "integer", - "description": "A number of past frames to be considered as same input frame.", - "default": 1, - "minimum": 0 - } + "uniqueItems": true + }, + "iou_nms_search_distance_2d": { + "type": "number", + "description": "A maximum distance value to search the nearest objects.", + "default": 10.0, + "minimum": 0.0 + }, + "iou_nms_threshold": { + "type": "number", + "description": "A threshold value of NMS using IoU score.", + "default": 0.1, + "minimum": 0.0, + "maximum": 1.0 } } }, - "required": ["post_process_params", "densification_params"] - } - }, - "properties": { - "/**": { - "type": "object", - "properties": { - "ros__parameters": { - "$ref": "#/definitions/centerpoint_tiny" + "densification_params": { + "type": "object", + "description": "Parameters for pointcloud densification.", + "properties": { + "world_frame_id": { + "type": "string", + "description": "A name of frame id where world coordinates system is defined with respect to.", + "default": "map" + }, + "num_past_frames": { + "type": "integer", + "description": "A number of past frames to be considered as same input frame.", + "default": 1, + "minimum": 0 + } } - }, - "required": ["ros__parameters"] - } - }, - "required": ["/**"] - } - \ No newline at end of file + } + }, + "required": ["post_process_params", "densification_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] +} diff --git a/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json b/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json index 3f50a9b8f55d1..9d9f9dc81a768 100644 --- a/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json +++ b/perception/lidar_centerpoint/schema/detection_class_remapper.schema.json @@ -1,47 +1,46 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Parameters for Detection Class Remapper Node", - "type": "object", - "definitions": { - "detection_class_remapper": { - "type": "object", - "properties": { - "allow_remapping_by_area_matrix": { - "type": "array", - "description": "A matrix that defines how to remap original classes to new classes based on area.", - "items": { - "type": "integer" - } - }, - "min_area_matrix": { - "type": "array", - "description": "A matrix that defines the minimum area thresholds for each class.", - "items": { - "type": "number" - } - }, - "max_area_matrix": { - "type": "array", - "description": "A matrix that defines the maximum area thresholds for each class.", - "items": { - "type": "number" - } + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Detection Class Remapper Node", + "type": "object", + "definitions": { + "detection_class_remapper": { + "type": "object", + "properties": { + "allow_remapping_by_area_matrix": { + "type": "array", + "description": "A matrix that defines how to remap original classes to new classes based on area.", + "items": { + "type": "integer" } }, - "required": ["allow_remapping_by_area_matrix", "min_area_matrix", "max_area_matrix"] - } - }, - "properties": { - "/**": { - "type": "object", - "properties": { - "ros__parameters": { - "$ref": "#/definitions/detection_class_remapper" + "min_area_matrix": { + "type": "array", + "description": "A matrix that defines the minimum area thresholds for each class.", + "items": { + "type": "number" } }, - "required": ["ros__parameters"] - } - }, - "required": ["/**"] - } - \ No newline at end of file + "max_area_matrix": { + "type": "array", + "description": "A matrix that defines the maximum area thresholds for each class.", + "items": { + "type": "number" + } + } + }, + "required": ["allow_remapping_by_area_matrix", "min_area_matrix", "max_area_matrix"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/detection_class_remapper" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] +} From a06ad1a7314e810c62b8625c682e6dd92e431b1d Mon Sep 17 00:00:00 2001 From: Boyang Date: Wed, 5 Jun 2024 10:07:51 -0400 Subject: [PATCH 5/7] renamed one schema.json file --- .../centerpoint_sigma_ml_package.schema.json | 79 +++++++++++++++++++ perception/multi_object_tracker/README.md | 20 ++--- .../data_association/data_association.hpp | 4 +- .../debugger/debug_object.hpp | 6 +- .../debugger/debugger.hpp | 10 +-- .../multi_object_tracker_core.hpp | 10 +-- .../processor/input_manager.hpp | 6 +- .../processor/processor.hpp | 14 ++-- .../tracker/model/bicycle_tracker.hpp | 16 ++-- .../tracker/model/big_vehicle_tracker.hpp | 16 ++-- .../model/multiple_vehicle_tracker.hpp | 6 +- .../tracker/model/normal_vehicle_tracker.hpp | 16 ++-- .../tracker/model/pass_through_tracker.hpp | 10 +-- .../model/pedestrian_and_bicycle_tracker.hpp | 6 +- .../tracker/model/pedestrian_tracker.hpp | 16 ++-- .../tracker/model/tracker_base.hpp | 21 +++-- .../tracker/model/unknown_tracker.hpp | 16 ++-- .../multi_object_tracker/utils/utils.hpp | 29 ++++--- perception/multi_object_tracker/package.xml | 2 +- .../src/data_association/data_association.cpp | 6 +- .../src/debugger/debug_object.cpp | 6 +- .../src/debugger/debugger.cpp | 6 +- .../src/multi_object_tracker_core.cpp | 2 +- .../src/processor/input_manager.cpp | 2 +- .../src/processor/processor.cpp | 23 +++--- .../src/tracker/model/bicycle_tracker.cpp | 36 ++++----- .../src/tracker/model/big_vehicle_tracker.cpp | 43 +++++----- .../model/multiple_vehicle_tracker.cpp | 10 +-- .../tracker/model/normal_vehicle_tracker.cpp | 43 +++++----- .../tracker/model/pass_through_tracker.cpp | 6 +- .../model/pedestrian_and_bicycle_tracker.cpp | 10 +-- .../src/tracker/model/pedestrian_tracker.cpp | 42 +++++----- .../src/tracker/model/tracker_base.cpp | 10 +-- .../src/tracker/model/unknown_tracker.cpp | 17 ++-- 34 files changed, 318 insertions(+), 247 deletions(-) create mode 100644 perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json diff --git a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json new file mode 100644 index 0000000000000..de03014d0f19c --- /dev/null +++ b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json @@ -0,0 +1,79 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Centerpoint Sigma ML model", + "type": "object", + "definitions": { + "centerpoint_sigma_ml_package": { + "type": "object", + "properties": { + "model_params": { + "type": "object", + "description": "Parameters for model configuration.", + "properties": { + "class_names": { + "type": "array", + "description": "An array of class names will be predicted.", + "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], + "uniqueItems": true + }, + "point_feature_size": { + "type": "integer", + "description": "A number of channels of point feature layer.", + "default": 4 + }, + "max_voxel_size": { + "type": "integer", + "description": "A maximum size of voxel grid.", + "default": 40000 + }, + "point_cloud_range": { + "type": "array", + "description": "An array of distance ranges of each class, this must have same length with `class_names`.", + "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] + }, + "voxel_size": { + "type": "array", + "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", + "default": [0.32, 0.32, 10.0] + }, + "down_sample_factor": { + "type": "integer", + "description": "A scale factor of downsampling points", + "default": 1, + "minimum": 1 + }, + "encoder_in_feature_size": { + "type": "integer", + "description": "A size of encoder input feature channels.", + "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false + } + } + } + }, + "required": ["model_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny_ml_package" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] + } + \ No newline at end of file diff --git a/perception/multi_object_tracker/README.md b/perception/multi_object_tracker/README.md index ad9e43355aebf..b5d965e1232ed 100644 --- a/perception/multi_object_tracker/README.md +++ b/perception/multi_object_tracker/README.md @@ -57,9 +57,9 @@ Multiple inputs are pre-defined in the input channel parameters (described below ### Output -| Name | Type | Description | -| ---------- | ---------------------------------------------------- | --------------- | -| `~/output` | `autoware_auto_perception_msgs::msg::TrackedObjects` | tracked objects | +| Name | Type | Description | +| ---------- | ----------------------------------------------- | --------------- | +| `~/output` | `autoware_perception_msgs::msg::TrackedObjects` | tracked objects | ## Parameters @@ -67,13 +67,13 @@ Multiple inputs are pre-defined in the input channel parameters (described below Available input channels are defined in [input_channels.param.yaml](config/input_channels.param.yaml). -| Name | Type | Description | -| --------------------------------- | ----------------------------------------------------- | ------------------------------------- | -| `` | | the name of channel | -| `.topic` | `autoware_auto_perception_msgs::msg::DetectedObjects` | detected objects | -| `.can_spawn_new_tracker` | `bool` | a switch allow to spawn a new tracker | -| `.optional.name` | `std::string` | channel name for analysis | -| `.optional.short_name` | `std::string` | short name for visualization | +| Name | Type | Description | +| --------------------------------- | ------------------------------------------------ | ------------------------------------- | +| `` | | the name of channel | +| `.topic` | `autoware_perception_msgs::msg::DetectedObjects` | detected objects | +| `.can_spawn_new_tracker` | `bool` | a switch allow to spawn a new tracker | +| `.optional.name` | `std::string` | channel name for analysis | +| `.optional.short_name` | `std::string` | short name for visualization | ### Core Parameters diff --git a/perception/multi_object_tracker/include/multi_object_tracker/data_association/data_association.hpp b/perception/multi_object_tracker/include/multi_object_tracker/data_association/data_association.hpp index a29fcdaa4e1e5..56f4c3ac45e52 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/data_association/data_association.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/data_association/data_association.hpp @@ -31,7 +31,7 @@ #include #include -#include +#include class DataAssociation { @@ -55,7 +55,7 @@ class DataAssociation const Eigen::MatrixXd & src, std::unordered_map & direct_assignment, std::unordered_map & reverse_assignment); Eigen::MatrixXd calcScoreMatrix( - const autoware_auto_perception_msgs::msg::DetectedObjects & measurements, + const autoware_perception_msgs::msg::DetectedObjects & measurements, const std::list> & trackers); virtual ~DataAssociation() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/debugger/debug_object.hpp b/perception/multi_object_tracker/include/multi_object_tracker/debugger/debug_object.hpp index 6caa69181dd9d..c685024d6ef8b 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/debugger/debug_object.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/debugger/debug_object.hpp @@ -21,8 +21,8 @@ #include #include "unique_identifier_msgs/msg/uuid.hpp" -#include -#include +#include +#include #include #include @@ -87,7 +87,7 @@ class TrackerObjectDebugger void collect( const rclcpp::Time & message_time, const std::list> & list_tracker, const uint & channel_index, - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const std::unordered_map & direct_assignment, const std::unordered_map & reverse_assignment); diff --git a/perception/multi_object_tracker/include/multi_object_tracker/debugger/debugger.hpp b/perception/multi_object_tracker/include/multi_object_tracker/debugger/debugger.hpp index 1c632f5bebba0..9ab5576faccd8 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/debugger/debugger.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/debugger/debugger.hpp @@ -23,8 +23,8 @@ #include #include -#include -#include +#include +#include #include #include @@ -54,7 +54,7 @@ class TrackerDebugger // ROS node, publishers rclcpp::Node & node_; - rclcpp::Publisher::SharedPtr + rclcpp::Publisher::SharedPtr debug_tentative_objects_pub_; std::unique_ptr processing_time_publisher_; rclcpp::Publisher::SharedPtr debug_objects_markers_pub_; @@ -80,7 +80,7 @@ class TrackerDebugger // Object publishing bool shouldPublishTentativeObjects() const { return debug_settings_.publish_tentative_objects; } void publishTentativeObjects( - const autoware_auto_perception_msgs::msg::TrackedObjects & tentative_objects) const; + const autoware_perception_msgs::msg::TrackedObjects & tentative_objects) const; // Time measurement void startMeasurementTime( @@ -98,7 +98,7 @@ class TrackerDebugger void collectObjectInfo( const rclcpp::Time & message_time, const std::list> & list_tracker, const uint & channel_index, - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const std::unordered_map & direct_assignment, const std::unordered_map & reverse_assignment); void publishObjectsMarkers(); diff --git a/perception/multi_object_tracker/include/multi_object_tracker/multi_object_tracker_core.hpp b/perception/multi_object_tracker/include/multi_object_tracker/multi_object_tracker_core.hpp index 3daeaa2a46322..569bc4f43a3d4 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/multi_object_tracker_core.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/multi_object_tracker_core.hpp @@ -27,8 +27,8 @@ #include -#include -#include +#include +#include #include #include @@ -55,9 +55,9 @@ namespace multi_object_tracker { -using DetectedObject = autoware_auto_perception_msgs::msg::DetectedObject; -using DetectedObjects = autoware_auto_perception_msgs::msg::DetectedObjects; -using TrackedObjects = autoware_auto_perception_msgs::msg::TrackedObjects; +using DetectedObject = autoware_perception_msgs::msg::DetectedObject; +using DetectedObjects = autoware_perception_msgs::msg::DetectedObjects; +using TrackedObjects = autoware_perception_msgs::msg::TrackedObjects; class MultiObjectTracker : public rclcpp::Node { diff --git a/perception/multi_object_tracker/include/multi_object_tracker/processor/input_manager.hpp b/perception/multi_object_tracker/include/multi_object_tracker/processor/input_manager.hpp index f46c088c6d508..29b0c18ae766f 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/processor/input_manager.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/processor/input_manager.hpp @@ -17,7 +17,7 @@ #include "rclcpp/rclcpp.hpp" -#include "autoware_auto_perception_msgs/msg/detected_objects.hpp" +#include "autoware_perception_msgs/msg/detected_objects.hpp" #include #include @@ -28,7 +28,7 @@ namespace multi_object_tracker { -using DetectedObjects = autoware_auto_perception_msgs::msg::DetectedObjects; +using DetectedObjects = autoware_perception_msgs::msg::DetectedObjects; using ObjectsList = std::vector>; struct InputChannel @@ -52,7 +52,7 @@ class InputStream func_trigger_ = func_trigger; } - void onMessage(const autoware_auto_perception_msgs::msg::DetectedObjects::ConstSharedPtr msg); + void onMessage(const autoware_perception_msgs::msg::DetectedObjects::ConstSharedPtr msg); void updateTimingStatus(const rclcpp::Time & now, const rclcpp::Time & objects_time); bool isTimeInitialized() const { return initial_count_ > 0; } diff --git a/perception/multi_object_tracker/include/multi_object_tracker/processor/processor.hpp b/perception/multi_object_tracker/include/multi_object_tracker/processor/processor.hpp index 310871dfcb07a..b2f5f41c81f0a 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/processor/processor.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/processor/processor.hpp @@ -19,8 +19,8 @@ #include -#include -#include +#include +#include #include #include @@ -39,11 +39,11 @@ class TrackerProcessor // tracker processes void predict(const rclcpp::Time & time); void update( - const autoware_auto_perception_msgs::msg::DetectedObjects & transformed_objects, + const autoware_perception_msgs::msg::DetectedObjects & transformed_objects, const geometry_msgs::msg::Transform & self_transform, const std::unordered_map & direct_assignment, const uint & channel_index); void spawn( - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const geometry_msgs::msg::Transform & self_transform, const std::unordered_map & reverse_assignment, const uint & channel_index); void prune(const rclcpp::Time & time); @@ -52,10 +52,10 @@ class TrackerProcessor bool isConfidentTracker(const std::shared_ptr & tracker) const; void getTrackedObjects( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObjects & tracked_objects) const; + autoware_perception_msgs::msg::TrackedObjects & tracked_objects) const; void getTentativeObjects( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObjects & tentative_objects) const; + autoware_perception_msgs::msg::TrackedObjects & tentative_objects) const; void getExistenceProbabilities(std::vector> & existence_vectors) const; @@ -74,7 +74,7 @@ class TrackerProcessor void removeOldTracker(const rclcpp::Time & time); void removeOverlappedTracker(const rclcpp::Time & time); std::shared_ptr createNewTracker( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform, const uint & channel_index) const; }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/bicycle_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/bicycle_tracker.hpp index ead0d8dbf0e5a..36176b8202e72 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/bicycle_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/bicycle_tracker.hpp @@ -27,7 +27,7 @@ class BicycleTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject object_; rclcpp::Logger logger_; private: @@ -55,22 +55,22 @@ class BicycleTracker : public Tracker public: BicycleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; - autoware_auto_perception_msgs::msg::DetectedObject getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + autoware_perception_msgs::msg::DetectedObject getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform); - bool measureWithPose(const autoware_auto_perception_msgs::msg::DetectedObject & object); - bool measureWithShape(const autoware_auto_perception_msgs::msg::DetectedObject & object); + bool measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object); + bool measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object); bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~BicycleTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/big_vehicle_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/big_vehicle_tracker.hpp index 3b509a3cb47d8..f284196e1c816 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/big_vehicle_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/big_vehicle_tracker.hpp @@ -27,7 +27,7 @@ class BigVehicleTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject object_; rclcpp::Logger logger_; private: @@ -60,22 +60,22 @@ class BigVehicleTracker : public Tracker public: BigVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; - autoware_auto_perception_msgs::msg::DetectedObject getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + autoware_perception_msgs::msg::DetectedObject getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform); - bool measureWithPose(const autoware_auto_perception_msgs::msg::DetectedObject & object); - bool measureWithShape(const autoware_auto_perception_msgs::msg::DetectedObject & object); + bool measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object); + bool measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object); bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~BigVehicleTracker() {} private: diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/multiple_vehicle_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/multiple_vehicle_tracker.hpp index fef8caaf20d8a..b3d088da54721 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/multiple_vehicle_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/multiple_vehicle_tracker.hpp @@ -34,17 +34,17 @@ class MultipleVehicleTracker : public Tracker public: MultipleVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~MultipleVehicleTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/normal_vehicle_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/normal_vehicle_tracker.hpp index b7810d2471416..4c9a5a113a340 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/normal_vehicle_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/normal_vehicle_tracker.hpp @@ -27,7 +27,7 @@ class NormalVehicleTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject object_; rclcpp::Logger logger_; private: @@ -60,22 +60,22 @@ class NormalVehicleTracker : public Tracker public: NormalVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; - autoware_auto_perception_msgs::msg::DetectedObject getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + autoware_perception_msgs::msg::DetectedObject getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform); - bool measureWithPose(const autoware_auto_perception_msgs::msg::DetectedObject & object); - bool measureWithShape(const autoware_auto_perception_msgs::msg::DetectedObject & object); + bool measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object); + bool measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object); bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~NormalVehicleTracker() {} private: diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pass_through_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pass_through_tracker.hpp index 0f892098a373a..605b3ae462cec 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pass_through_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pass_through_tracker.hpp @@ -26,23 +26,23 @@ class PassThroughTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; - autoware_auto_perception_msgs::msg::DetectedObject prev_observed_object_; + autoware_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject prev_observed_object_; rclcpp::Logger logger_; rclcpp::Time last_update_time_; public: PassThroughTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~PassThroughTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_and_bicycle_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_and_bicycle_tracker.hpp index 490f6d93c6f6a..ad81c504d3719 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_and_bicycle_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_and_bicycle_tracker.hpp @@ -33,17 +33,17 @@ class PedestrianAndBicycleTracker : public Tracker public: PedestrianAndBicycleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~PedestrianAndBicycleTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_tracker.hpp index 72f5a5a002f0f..e5d718c4b15ee 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/pedestrian_tracker.hpp @@ -29,7 +29,7 @@ class PedestrianTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject object_; rclcpp::Logger logger_; private: @@ -63,22 +63,22 @@ class PedestrianTracker : public Tracker public: PedestrianTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; - autoware_auto_perception_msgs::msg::DetectedObject getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + autoware_perception_msgs::msg::DetectedObject getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform); - bool measureWithPose(const autoware_auto_perception_msgs::msg::DetectedObject & object); - bool measureWithShape(const autoware_auto_perception_msgs::msg::DetectedObject & object); + bool measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object); + bool measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object); bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~PedestrianTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/tracker_base.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/tracker_base.hpp index 348ba1f5d7383..56670e9b54b7e 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/tracker_base.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/tracker_base.hpp @@ -26,8 +26,8 @@ #include #include -#include "autoware_auto_perception_msgs/msg/detected_object.hpp" -#include "autoware_auto_perception_msgs/msg/tracked_object.hpp" +#include "autoware_perception_msgs/msg/detected_object.hpp" +#include "autoware_perception_msgs/msg/tracked_object.hpp" #include "geometry_msgs/msg/point.hpp" #include "unique_identifier_msgs/msg/uuid.hpp" @@ -39,7 +39,7 @@ class Tracker unique_identifier_msgs::msg::UUID uuid_; // classification - std::vector classification_; + std::vector classification_; // existence states int no_measurement_count_; @@ -52,7 +52,7 @@ class Tracker public: Tracker( const rclcpp::Time & time, - const std::vector & classification, + const std::vector & classification, const size_t & channel_size); virtual ~Tracker() {} @@ -64,13 +64,13 @@ class Tracker return existence_vector.size() > 0; } bool updateWithMeasurement( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & measurement_time, const geometry_msgs::msg::Transform & self_transform, const uint & channel_index); bool updateWithoutMeasurement(const rclcpp::Time & now); // classification - std::vector getClassification() const + std::vector getClassification() const { return classification_; } @@ -91,12 +91,12 @@ class Tracker protected: unique_identifier_msgs::msg::UUID getUUID() const { return uuid_; } void setClassification( - const std::vector & classification) + const std::vector & classification) { classification_ = classification; } void updateClassification( - const std::vector & classification); + const std::vector & classification); // virtual functions public: @@ -105,13 +105,12 @@ class Tracker protected: virtual bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) = 0; public: virtual bool getTrackedObject( - const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const = 0; + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const = 0; virtual bool predict(const rclcpp::Time & time) = 0; }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/unknown_tracker.hpp b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/unknown_tracker.hpp index 3ef58773b408f..43a1f2fd14842 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/unknown_tracker.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/tracker/model/unknown_tracker.hpp @@ -27,7 +27,7 @@ class UnknownTracker : public Tracker { private: - autoware_auto_perception_msgs::msg::DetectedObject object_; + autoware_perception_msgs::msg::DetectedObject object_; rclcpp::Logger logger_; private: @@ -48,22 +48,22 @@ class UnknownTracker : public Tracker public: UnknownTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index); bool predict(const rclcpp::Time & time) override; bool measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) override; - autoware_auto_perception_msgs::msg::DetectedObject getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + autoware_perception_msgs::msg::DetectedObject getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform); - bool measureWithPose(const autoware_auto_perception_msgs::msg::DetectedObject & object); - bool measureWithShape(const autoware_auto_perception_msgs::msg::DetectedObject & object); + bool measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object); + bool measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object); bool getTrackedObject( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObject & object) const override; + autoware_perception_msgs::msg::TrackedObject & object) const override; virtual ~UnknownTracker() {} }; diff --git a/perception/multi_object_tracker/include/multi_object_tracker/utils/utils.hpp b/perception/multi_object_tracker/include/multi_object_tracker/utils/utils.hpp index 5842246bc1313..2294462e05fea 100644 --- a/perception/multi_object_tracker/include/multi_object_tracker/utils/utils.hpp +++ b/perception/multi_object_tracker/include/multi_object_tracker/utils/utils.hpp @@ -22,9 +22,9 @@ #include #include -#include -#include -#include +#include +#include +#include #include #include #include @@ -97,7 +97,7 @@ enum BBOX_IDX { */ inline bool isLargeVehicleLabel(const uint8_t label) { - using Label = autoware_auto_perception_msgs::msg::ObjectClassification; + using Label = autoware_perception_msgs::msg::ObjectClassification; return label == Label::BUS || label == Label::TRUCK || label == Label::TRAILER; } @@ -163,11 +163,11 @@ inline int getNearestCornerOrSurface( * @return nearest corner or surface index */ inline int getNearestCornerOrSurfaceFromObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const double & yaw, + const autoware_perception_msgs::msg::DetectedObject & object, const double & yaw, const geometry_msgs::msg::Transform & self_transform) { // only work for BBOX shape - if (object.shape.type != autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type != autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { return BBOX_IDX::INVALID; } @@ -263,9 +263,8 @@ inline Eigen::Vector2d recoverFromTrackingPoint( */ inline void calcAnchorPointOffset( const double w, const double l, const int indx, - const autoware_auto_perception_msgs::msg::DetectedObject & input_object, const double & yaw, - autoware_auto_perception_msgs::msg::DetectedObject & offset_object, - Eigen::Vector2d & tracking_offset) + const autoware_perception_msgs::msg::DetectedObject & input_object, const double & yaw, + autoware_perception_msgs::msg::DetectedObject & offset_object, Eigen::Vector2d & tracking_offset) { // copy value offset_object = input_object; @@ -296,8 +295,8 @@ inline void calcAnchorPointOffset( * @param output_object: output bounding box objects */ inline bool convertConvexHullToBoundingBox( - const autoware_auto_perception_msgs::msg::DetectedObject & input_object, - autoware_auto_perception_msgs::msg::DetectedObject & output_object) + const autoware_perception_msgs::msg::DetectedObject & input_object, + autoware_perception_msgs::msg::DetectedObject & output_object) { // check footprint size if (input_object.shape.footprint.points.size() < 3) { @@ -340,7 +339,7 @@ inline bool convertConvexHullToBoundingBox( output_object.kinematics.pose_with_covariance.pose.position.x = new_center.x(); output_object.kinematics.pose_with_covariance.pose.position.y = new_center.y(); - output_object.shape.type = autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX; + output_object.shape.type = autoware_perception_msgs::msg::Shape::BOUNDING_BOX; output_object.shape.dimensions.x = length; output_object.shape.dimensions.y = width; output_object.shape.dimensions.z = height; @@ -349,7 +348,7 @@ inline bool convertConvexHullToBoundingBox( } inline bool getMeasurementYaw( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const double & predicted_yaw, + const autoware_perception_msgs::msg::DetectedObject & object, const double & predicted_yaw, double & measurement_yaw) { measurement_yaw = tf2::getYaw(object.kinematics.pose_with_covariance.pose.orientation); @@ -358,7 +357,7 @@ inline bool getMeasurementYaw( double limiting_delta_yaw = M_PI_2; if ( object.kinematics.orientation_availability == - autoware_auto_perception_msgs::msg::DetectedObjectKinematics::AVAILABLE) { + autoware_perception_msgs::msg::DetectedObjectKinematics::AVAILABLE) { limiting_delta_yaw = M_PI; } // limiting delta yaw, even the availability is unknown @@ -371,7 +370,7 @@ inline bool getMeasurementYaw( } // return false if the orientation is unknown return object.kinematics.orientation_availability != - autoware_auto_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; + autoware_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; } } // namespace utils diff --git a/perception/multi_object_tracker/package.xml b/perception/multi_object_tracker/package.xml index 1e4a90d8cc08c..4e2696700bfdc 100644 --- a/perception/multi_object_tracker/package.xml +++ b/perception/multi_object_tracker/package.xml @@ -13,7 +13,7 @@ autoware_cmake eigen3_cmake_module - autoware_auto_perception_msgs + autoware_perception_msgs diagnostic_updater eigen glog diff --git a/perception/multi_object_tracker/src/data_association/data_association.cpp b/perception/multi_object_tracker/src/data_association/data_association.cpp index f367e19c11f2a..0ff3188e069f2 100644 --- a/perception/multi_object_tracker/src/data_association/data_association.cpp +++ b/perception/multi_object_tracker/src/data_association/data_association.cpp @@ -147,7 +147,7 @@ void DataAssociation::assign( } Eigen::MatrixXd DataAssociation::calcScoreMatrix( - const autoware_auto_perception_msgs::msg::DetectedObjects & measurements, + const autoware_perception_msgs::msg::DetectedObjects & measurements, const std::list> & trackers) { Eigen::MatrixXd score_matrix = @@ -159,14 +159,14 @@ Eigen::MatrixXd DataAssociation::calcScoreMatrix( for (size_t measurement_idx = 0; measurement_idx < measurements.objects.size(); ++measurement_idx) { - const autoware_auto_perception_msgs::msg::DetectedObject & measurement_object = + const autoware_perception_msgs::msg::DetectedObject & measurement_object = measurements.objects.at(measurement_idx); const std::uint8_t measurement_label = object_recognition_utils::getHighestProbLabel(measurement_object.classification); double score = 0.0; if (can_assign_matrix_(tracker_label, measurement_label)) { - autoware_auto_perception_msgs::msg::TrackedObject tracked_object; + autoware_perception_msgs::msg::TrackedObject tracked_object; (*tracker_itr)->getTrackedObject(measurements.header.stamp, tracked_object); const double max_dist = max_dist_matrix_(tracker_label, measurement_label); diff --git a/perception/multi_object_tracker/src/debugger/debug_object.cpp b/perception/multi_object_tracker/src/debugger/debug_object.cpp index ebbeab43a155b..f166e83e136af 100644 --- a/perception/multi_object_tracker/src/debugger/debug_object.cpp +++ b/perception/multi_object_tracker/src/debugger/debug_object.cpp @@ -14,7 +14,7 @@ #include "multi_object_tracker/debugger/debug_object.hpp" -#include "autoware_auto_perception_msgs/msg/tracked_object.hpp" +#include "autoware_perception_msgs/msg/tracked_object.hpp" #include @@ -75,7 +75,7 @@ void TrackerObjectDebugger::reset() void TrackerObjectDebugger::collect( const rclcpp::Time & message_time, const std::list> & list_tracker, const uint & channel_index, - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const std::unordered_map & direct_assignment, const std::unordered_map & /*reverse_assignment*/) { @@ -90,7 +90,7 @@ void TrackerObjectDebugger::collect( object_data.time = message_time; object_data.channel_id = channel_index; - autoware_auto_perception_msgs::msg::TrackedObject tracked_object; + autoware_perception_msgs::msg::TrackedObject tracked_object; (*(tracker_itr))->getTrackedObject(message_time, tracked_object); object_data.uuid = uuidToBoostUuid(tracked_object.object_id); object_data.uuid_int = uuidToInt(object_data.uuid); diff --git a/perception/multi_object_tracker/src/debugger/debugger.cpp b/perception/multi_object_tracker/src/debugger/debugger.cpp index afb0ecdc22996..7b4f9d717a927 100644 --- a/perception/multi_object_tracker/src/debugger/debugger.cpp +++ b/perception/multi_object_tracker/src/debugger/debugger.cpp @@ -29,7 +29,7 @@ TrackerDebugger::TrackerDebugger(rclcpp::Node & node, const std::string & frame_ if (debug_settings_.publish_tentative_objects) { debug_tentative_objects_pub_ = - node_.create_publisher( + node_.create_publisher( "~/debug/tentative_objects", rclcpp::QoS{1}); } @@ -83,7 +83,7 @@ void TrackerDebugger::setupDiagnostics() // Object publishing functions void TrackerDebugger::publishTentativeObjects( - const autoware_auto_perception_msgs::msg::TrackedObjects & tentative_objects) const + const autoware_perception_msgs::msg::TrackedObjects & tentative_objects) const { if (debug_settings_.publish_tentative_objects) { debug_tentative_objects_pub_->publish(tentative_objects); @@ -181,7 +181,7 @@ void TrackerDebugger::endPublishTime(const rclcpp::Time & now, const rclcpp::Tim void TrackerDebugger::collectObjectInfo( const rclcpp::Time & message_time, const std::list> & list_tracker, const uint & channel_index, - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const std::unordered_map & direct_assignment, const std::unordered_map & reverse_assignment) { diff --git a/perception/multi_object_tracker/src/multi_object_tracker_core.cpp b/perception/multi_object_tracker/src/multi_object_tracker_core.cpp index 54c23d39f5357..70433f16125c9 100644 --- a/perception/multi_object_tracker/src/multi_object_tracker_core.cpp +++ b/perception/multi_object_tracker/src/multi_object_tracker_core.cpp @@ -67,7 +67,7 @@ boost::optional getTransformAnonymous( namespace multi_object_tracker { -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; MultiObjectTracker::MultiObjectTracker(const rclcpp::NodeOptions & node_options) : rclcpp::Node("multi_object_tracker", node_options), diff --git a/perception/multi_object_tracker/src/processor/input_manager.cpp b/perception/multi_object_tracker/src/processor/input_manager.cpp index 44bdf09046ccd..bee76e6c05940 100644 --- a/perception/multi_object_tracker/src/processor/input_manager.cpp +++ b/perception/multi_object_tracker/src/processor/input_manager.cpp @@ -58,7 +58,7 @@ bool InputStream::getTimestamps( } void InputStream::onMessage( - const autoware_auto_perception_msgs::msg::DetectedObjects::ConstSharedPtr msg) + const autoware_perception_msgs::msg::DetectedObjects::ConstSharedPtr msg) { const DetectedObjects objects = *msg; objects_que_.push_back(objects); diff --git a/perception/multi_object_tracker/src/processor/processor.cpp b/perception/multi_object_tracker/src/processor/processor.cpp index 08df50fa66993..ff14f7849b161 100644 --- a/perception/multi_object_tracker/src/processor/processor.cpp +++ b/perception/multi_object_tracker/src/processor/processor.cpp @@ -17,11 +17,11 @@ #include "multi_object_tracker/tracker/tracker.hpp" #include "object_recognition_utils/object_recognition_utils.hpp" -#include +#include #include -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; TrackerProcessor::TrackerProcessor( const std::map & tracker_map, const size_t & channel_size) @@ -47,7 +47,7 @@ void TrackerProcessor::predict(const rclcpp::Time & time) } void TrackerProcessor::update( - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const geometry_msgs::msg::Transform & self_transform, const std::unordered_map & direct_assignment, const uint & channel_index) { @@ -67,7 +67,7 @@ void TrackerProcessor::update( } void TrackerProcessor::spawn( - const autoware_auto_perception_msgs::msg::DetectedObjects & detected_objects, + const autoware_perception_msgs::msg::DetectedObjects & detected_objects, const geometry_msgs::msg::Transform & self_transform, const std::unordered_map & reverse_assignment, const uint & channel_index) { @@ -84,7 +84,7 @@ void TrackerProcessor::spawn( } std::shared_ptr TrackerProcessor::createNewTracker( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform, const uint & channel_index) const { const std::uint8_t label = object_recognition_utils::getHighestProbLabel(object.classification); @@ -143,12 +143,12 @@ void TrackerProcessor::removeOverlappedTracker(const rclcpp::Time & time) { // Iterate through the list of trackers for (auto itr1 = list_tracker_.begin(); itr1 != list_tracker_.end(); ++itr1) { - autoware_auto_perception_msgs::msg::TrackedObject object1; + autoware_perception_msgs::msg::TrackedObject object1; if (!(*itr1)->getTrackedObject(time, object1)) continue; // Compare the current tracker with the remaining trackers for (auto itr2 = std::next(itr1); itr2 != list_tracker_.end(); ++itr2) { - autoware_auto_perception_msgs::msg::TrackedObject object2; + autoware_perception_msgs::msg::TrackedObject object2; if (!(*itr2)->getTrackedObject(time, object2)) continue; // Calculate the distance between the two objects @@ -220,15 +220,14 @@ bool TrackerProcessor::isConfidentTracker(const std::shared_ptr & track } void TrackerProcessor::getTrackedObjects( - const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObjects & tracked_objects) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObjects & tracked_objects) const { tracked_objects.header.stamp = time; for (const auto & tracker : list_tracker_) { // Skip if the tracker is not confident if (!isConfidentTracker(tracker)) continue; // Get the tracked object, extrapolated to the given time - autoware_auto_perception_msgs::msg::TrackedObject tracked_object; + autoware_perception_msgs::msg::TrackedObject tracked_object; if (tracker->getTrackedObject(time, tracked_object)) { tracked_objects.objects.push_back(tracked_object); } @@ -237,12 +236,12 @@ void TrackerProcessor::getTrackedObjects( void TrackerProcessor::getTentativeObjects( const rclcpp::Time & time, - autoware_auto_perception_msgs::msg::TrackedObjects & tentative_objects) const + autoware_perception_msgs::msg::TrackedObjects & tentative_objects) const { tentative_objects.header.stamp = time; for (const auto & tracker : list_tracker_) { if (!isConfidentTracker(tracker)) { - autoware_auto_perception_msgs::msg::TrackedObject tracked_object; + autoware_perception_msgs::msg::TrackedObject tracked_object; if (tracker->getTrackedObject(time, tracked_object)) { tentative_objects.objects.push_back(tracked_object); } diff --git a/perception/multi_object_tracker/src/tracker/model/bicycle_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/bicycle_tracker.cpp index 4e5f14fde6834..b54c9b832b0cf 100644 --- a/perception/multi_object_tracker/src/tracker/model/bicycle_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/bicycle_tracker.cpp @@ -40,10 +40,10 @@ #include #include -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; BicycleTracker::BicycleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -65,7 +65,7 @@ BicycleTracker::BicycleTracker( ekf_params_.r_cov_yaw = std::pow(r_stddev_yaw, 2.0); // OBJECT SHAPE MODEL - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { bounding_box_ = { object.shape.dimensions.x, object.shape.dimensions.y, object.shape.dimensions.z}; } else { @@ -127,9 +127,9 @@ BicycleTracker::BicycleTracker( constexpr double p0_stddev_y = 0.5; // in object coordinate [m] constexpr double p0_stddev_yaw = tier4_autoware_utils::deg2rad(25); // in map coordinate [rad] - constexpr double p0_cov_x = std::pow(p0_stddev_x, 2.0); - constexpr double p0_cov_y = std::pow(p0_stddev_y, 2.0); - constexpr double p0_cov_yaw = std::pow(p0_stddev_yaw, 2.0); + constexpr double p0_cov_x = p0_stddev_x * p0_stddev_x; + constexpr double p0_cov_y = p0_stddev_y * p0_stddev_y; + constexpr double p0_cov_yaw = p0_stddev_yaw * p0_stddev_yaw; const double cos_yaw = std::cos(yaw); const double sin_yaw = std::sin(yaw); @@ -165,15 +165,15 @@ bool BicycleTracker::predict(const rclcpp::Time & time) return motion_model_.predictState(time); } -autoware_auto_perception_msgs::msg::DetectedObject BicycleTracker::getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, +autoware_perception_msgs::msg::DetectedObject BicycleTracker::getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/) { - autoware_auto_perception_msgs::msg::DetectedObject updating_object; + autoware_perception_msgs::msg::DetectedObject updating_object; // OBJECT SHAPE MODEL // convert to bounding box if input is convex shape - if (object.shape.type != autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type != autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { if (!utils::convertConvexHullToBoundingBox(object, updating_object)) { updating_object = object; } @@ -195,8 +195,7 @@ autoware_auto_perception_msgs::msg::DetectedObject BicycleTracker::getUpdatingOb return updating_object; } -bool BicycleTracker::measureWithPose( - const autoware_auto_perception_msgs::msg::DetectedObject & object) +bool BicycleTracker::measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object) { // get measurement yaw angle to update const double tracked_yaw = motion_model_.getStateElement(IDX::YAW); @@ -227,11 +226,10 @@ bool BicycleTracker::measureWithPose( return is_updated; } -bool BicycleTracker::measureWithShape( - const autoware_auto_perception_msgs::msg::DetectedObject & object) +bool BicycleTracker::measureWithShape(const autoware_perception_msgs::msg::DetectedObject & object) { - autoware_auto_perception_msgs::msg::DetectedObject bbox_object; - if (!object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + autoware_perception_msgs::msg::DetectedObject bbox_object; + if (!object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { // do not update shape if the input is not a bounding box return false; } @@ -270,7 +268,7 @@ bool BicycleTracker::measureWithShape( } bool BicycleTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { // keep the latest input object @@ -293,7 +291,7 @@ bool BicycleTracker::measure( } // update object - const autoware_auto_perception_msgs::msg::DetectedObject updating_object = + const autoware_perception_msgs::msg::DetectedObject updating_object = getUpdatingObject(object, self_transform); measureWithPose(updating_object); measureWithShape(updating_object); @@ -302,7 +300,7 @@ bool BicycleTracker::measure( } bool BicycleTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); diff --git a/perception/multi_object_tracker/src/tracker/model/big_vehicle_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/big_vehicle_tracker.cpp index 15c6b2e722ed5..5b7a52016967d 100644 --- a/perception/multi_object_tracker/src/tracker/model/big_vehicle_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/big_vehicle_tracker.cpp @@ -40,10 +40,10 @@ #include #include -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; BigVehicleTracker::BigVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -73,12 +73,12 @@ BigVehicleTracker::BigVehicleTracker( velocity_deviation_threshold_ = tier4_autoware_utils::kmph2mps(10); // [m/s] // OBJECT SHAPE MODEL - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { bounding_box_ = { object.shape.dimensions.x, object.shape.dimensions.y, object.shape.dimensions.z}; last_input_bounding_box_ = bounding_box_; } else { - autoware_auto_perception_msgs::msg::DetectedObject bbox_object; + autoware_perception_msgs::msg::DetectedObject bbox_object; if (!utils::convertConvexHullToBoundingBox(object, bbox_object)) { RCLCPP_WARN( logger_, @@ -147,9 +147,9 @@ BigVehicleTracker::BigVehicleTracker( constexpr double p0_stddev_y = 0.5; // in object coordinate [m] constexpr double p0_stddev_yaw = tier4_autoware_utils::deg2rad(25); // in map coordinate [rad] - constexpr double p0_cov_x = std::pow(p0_stddev_x, 2.0); - constexpr double p0_cov_y = std::pow(p0_stddev_y, 2.0); - constexpr double p0_cov_yaw = std::pow(p0_stddev_yaw, 2.0); + constexpr double p0_cov_x = p0_stddev_x * p0_stddev_x; + constexpr double p0_cov_y = p0_stddev_y * p0_stddev_y; + constexpr double p0_cov_yaw = p0_stddev_yaw * p0_stddev_yaw; const double cos_yaw = std::cos(yaw); const double sin_yaw = std::sin(yaw); @@ -188,11 +188,11 @@ bool BigVehicleTracker::predict(const rclcpp::Time & time) return motion_model_.predictState(time); } -autoware_auto_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, +autoware_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform) { - autoware_auto_perception_msgs::msg::DetectedObject updating_object = object; + autoware_perception_msgs::msg::DetectedObject updating_object = object; // current (predicted) state const double tracked_x = motion_model_.getStateElement(IDX::X); @@ -201,8 +201,8 @@ autoware_auto_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatin // OBJECT SHAPE MODEL // convert to bounding box if input is convex shape - autoware_auto_perception_msgs::msg::DetectedObject bbox_object; - if (object.shape.type != autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + autoware_perception_msgs::msg::DetectedObject bbox_object; + if (object.shape.type != autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { if (!utils::convertConvexHullToBoundingBox(object, bbox_object)) { RCLCPP_WARN( logger_, @@ -225,7 +225,7 @@ autoware_auto_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatin // measurement noise covariance float r_cov_x; float r_cov_y; - using Label = autoware_auto_perception_msgs::msg::ObjectClassification; + using Label = autoware_perception_msgs::msg::ObjectClassification; const uint8_t label = object_recognition_utils::getHighestProbLabel(object.classification); if (utils::isLargeVehicleLabel(label)) { r_cov_x = ekf_params_.r_cov_x; @@ -243,9 +243,8 @@ autoware_auto_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatin // yaw angle fix double pose_yaw = tf2::getYaw(object.kinematics.pose_with_covariance.pose.orientation); - bool is_yaw_available = - object.kinematics.orientation_availability != - autoware_auto_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; + bool is_yaw_available = object.kinematics.orientation_availability != + autoware_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; // fill covariance matrix auto & pose_cov = updating_object.kinematics.pose_with_covariance.covariance; @@ -274,7 +273,7 @@ autoware_auto_perception_msgs::msg::DetectedObject BigVehicleTracker::getUpdatin } bool BigVehicleTracker::measureWithPose( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { // current (predicted) state const double tracked_vel = motion_model_.getStateElement(IDX::VEL); @@ -317,9 +316,9 @@ bool BigVehicleTracker::measureWithPose( } bool BigVehicleTracker::measureWithShape( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { - if (!object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (!object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { // do not update shape if the input is not a bounding box return false; } @@ -363,7 +362,7 @@ bool BigVehicleTracker::measureWithShape( } bool BigVehicleTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { // keep the latest input object @@ -386,7 +385,7 @@ bool BigVehicleTracker::measure( } // update object - const autoware_auto_perception_msgs::msg::DetectedObject updating_object = + const autoware_perception_msgs::msg::DetectedObject updating_object = getUpdatingObject(object, self_transform); measureWithPose(updating_object); measureWithShape(updating_object); @@ -398,7 +397,7 @@ bool BigVehicleTracker::measure( } bool BigVehicleTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); diff --git a/perception/multi_object_tracker/src/tracker/model/multiple_vehicle_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/multiple_vehicle_tracker.cpp index c925976f65e7e..67445e5b0daa2 100644 --- a/perception/multi_object_tracker/src/tracker/model/multiple_vehicle_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/multiple_vehicle_tracker.cpp @@ -18,10 +18,10 @@ #include "multi_object_tracker/tracker/model/multiple_vehicle_tracker.hpp" -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; MultipleVehicleTracker::MultipleVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -40,7 +40,7 @@ bool MultipleVehicleTracker::predict(const rclcpp::Time & time) } bool MultipleVehicleTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { big_vehicle_tracker_.measure(object, time, self_transform); @@ -51,9 +51,9 @@ bool MultipleVehicleTracker::measure( } bool MultipleVehicleTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { - using Label = autoware_auto_perception_msgs::msg::ObjectClassification; + using Label = autoware_perception_msgs::msg::ObjectClassification; const uint8_t label = getHighestProbLabel(); if (label == Label::CAR) { diff --git a/perception/multi_object_tracker/src/tracker/model/normal_vehicle_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/normal_vehicle_tracker.cpp index bd04cc613ef12..a4c080cb40eea 100644 --- a/perception/multi_object_tracker/src/tracker/model/normal_vehicle_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/normal_vehicle_tracker.cpp @@ -40,10 +40,10 @@ #include #include -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; NormalVehicleTracker::NormalVehicleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -73,12 +73,12 @@ NormalVehicleTracker::NormalVehicleTracker( velocity_deviation_threshold_ = tier4_autoware_utils::kmph2mps(10); // [m/s] // OBJECT SHAPE MODEL - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { bounding_box_ = { object.shape.dimensions.x, object.shape.dimensions.y, object.shape.dimensions.z}; last_input_bounding_box_ = bounding_box_; } else { - autoware_auto_perception_msgs::msg::DetectedObject bbox_object; + autoware_perception_msgs::msg::DetectedObject bbox_object; if (!utils::convertConvexHullToBoundingBox(object, bbox_object)) { RCLCPP_WARN( logger_, @@ -148,9 +148,9 @@ NormalVehicleTracker::NormalVehicleTracker( constexpr double p0_stddev_y = 0.3; // in object coordinate [m] constexpr double p0_stddev_yaw = tier4_autoware_utils::deg2rad(25); // in map coordinate [rad] - constexpr double p0_cov_x = std::pow(p0_stddev_x, 2.0); - constexpr double p0_cov_y = std::pow(p0_stddev_y, 2.0); - constexpr double p0_cov_yaw = std::pow(p0_stddev_yaw, 2.0); + constexpr double p0_cov_x = p0_stddev_x * p0_stddev_x; + constexpr double p0_cov_y = p0_stddev_y * p0_stddev_y; + constexpr double p0_cov_yaw = p0_stddev_yaw * p0_stddev_yaw; const double cos_yaw = std::cos(yaw); const double sin_yaw = std::sin(yaw); @@ -189,11 +189,11 @@ bool NormalVehicleTracker::predict(const rclcpp::Time & time) return motion_model_.predictState(time); } -autoware_auto_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, +autoware_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform) { - autoware_auto_perception_msgs::msg::DetectedObject updating_object = object; + autoware_perception_msgs::msg::DetectedObject updating_object = object; // current (predicted) state const double tracked_x = motion_model_.getStateElement(IDX::X); @@ -202,8 +202,8 @@ autoware_auto_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpda // OBJECT SHAPE MODEL // convert to bounding box if input is convex shape - autoware_auto_perception_msgs::msg::DetectedObject bbox_object; - if (object.shape.type != autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + autoware_perception_msgs::msg::DetectedObject bbox_object; + if (object.shape.type != autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { if (!utils::convertConvexHullToBoundingBox(object, bbox_object)) { RCLCPP_WARN( logger_, @@ -227,7 +227,7 @@ autoware_auto_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpda // measurement noise covariance float r_cov_x; float r_cov_y; - using Label = autoware_auto_perception_msgs::msg::ObjectClassification; + using Label = autoware_perception_msgs::msg::ObjectClassification; const uint8_t label = object_recognition_utils::getHighestProbLabel(object.classification); if (label == Label::CAR) { r_cov_x = ekf_params_.r_cov_x; @@ -245,9 +245,8 @@ autoware_auto_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpda // yaw angle fix double pose_yaw = tf2::getYaw(object.kinematics.pose_with_covariance.pose.orientation); - bool is_yaw_available = - object.kinematics.orientation_availability != - autoware_auto_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; + bool is_yaw_available = object.kinematics.orientation_availability != + autoware_perception_msgs::msg::DetectedObjectKinematics::UNAVAILABLE; // fill covariance matrix auto & pose_cov = updating_object.kinematics.pose_with_covariance.covariance; @@ -276,7 +275,7 @@ autoware_auto_perception_msgs::msg::DetectedObject NormalVehicleTracker::getUpda } bool NormalVehicleTracker::measureWithPose( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { // current (predicted) state const double tracked_vel = motion_model_.getStateElement(IDX::VEL); @@ -319,9 +318,9 @@ bool NormalVehicleTracker::measureWithPose( } bool NormalVehicleTracker::measureWithShape( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { - if (!object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (!object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { // do not update shape if the input is not a bounding box return false; } @@ -365,7 +364,7 @@ bool NormalVehicleTracker::measureWithShape( } bool NormalVehicleTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { // keep the latest input object @@ -388,7 +387,7 @@ bool NormalVehicleTracker::measure( } // update object - const autoware_auto_perception_msgs::msg::DetectedObject updating_object = + const autoware_perception_msgs::msg::DetectedObject updating_object = getUpdatingObject(object, self_transform); measureWithPose(updating_object); measureWithShape(updating_object); @@ -400,7 +399,7 @@ bool NormalVehicleTracker::measure( } bool NormalVehicleTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); diff --git a/perception/multi_object_tracker/src/tracker/model/pass_through_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/pass_through_tracker.cpp index e1f4383cf11a3..3e4f23cd440bc 100644 --- a/perception/multi_object_tracker/src/tracker/model/pass_through_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/pass_through_tracker.cpp @@ -36,7 +36,7 @@ #include PassThroughTracker::PassThroughTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -62,7 +62,7 @@ bool PassThroughTracker::predict(const rclcpp::Time & time) } bool PassThroughTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { prev_observed_object_ = object_; @@ -84,7 +84,7 @@ bool PassThroughTracker::measure( } bool PassThroughTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); diff --git a/perception/multi_object_tracker/src/tracker/model/pedestrian_and_bicycle_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/pedestrian_and_bicycle_tracker.cpp index e399dade880fa..00609934990a3 100644 --- a/perception/multi_object_tracker/src/tracker/model/pedestrian_and_bicycle_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/pedestrian_and_bicycle_tracker.cpp @@ -18,10 +18,10 @@ #include "multi_object_tracker/tracker/model/pedestrian_and_bicycle_tracker.hpp" -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; PedestrianAndBicycleTracker::PedestrianAndBicycleTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & self_transform, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -40,7 +40,7 @@ bool PedestrianAndBicycleTracker::predict(const rclcpp::Time & time) } bool PedestrianAndBicycleTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { pedestrian_tracker_.measure(object, time, self_transform); @@ -51,9 +51,9 @@ bool PedestrianAndBicycleTracker::measure( } bool PedestrianAndBicycleTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { - using Label = autoware_auto_perception_msgs::msg::ObjectClassification; + using Label = autoware_perception_msgs::msg::ObjectClassification; const uint8_t label = getHighestProbLabel(); if (label == Label::PEDESTRIAN) { diff --git a/perception/multi_object_tracker/src/tracker/model/pedestrian_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/pedestrian_tracker.cpp index fd9ef82dfdca3..6c8198b51ecd0 100644 --- a/perception/multi_object_tracker/src/tracker/model/pedestrian_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/pedestrian_tracker.cpp @@ -40,10 +40,10 @@ #include #include -using Label = autoware_auto_perception_msgs::msg::ObjectClassification; +using Label = autoware_perception_msgs::msg::ObjectClassification; PedestrianTracker::PedestrianTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -67,12 +67,12 @@ PedestrianTracker::PedestrianTracker( // OBJECT SHAPE MODEL bounding_box_ = {0.5, 0.5, 1.7}; cylinder_ = {0.5, 1.7}; - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { bounding_box_ = { object.shape.dimensions.x, object.shape.dimensions.y, object.shape.dimensions.z}; - } else if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::CYLINDER) { + } else if (object.shape.type == autoware_perception_msgs::msg::Shape::CYLINDER) { cylinder_ = {object.shape.dimensions.x, object.shape.dimensions.z}; - } else if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::POLYGON) { + } else if (object.shape.type == autoware_perception_msgs::msg::Shape::POLYGON) { // do not update polygon shape } // set maximum and minimum size @@ -122,9 +122,9 @@ PedestrianTracker::PedestrianTracker( constexpr double p0_stddev_y = 2.0; // in object coordinate [m] constexpr double p0_stddev_yaw = tier4_autoware_utils::deg2rad(1000); // in map coordinate [rad] - constexpr double p0_cov_x = std::pow(p0_stddev_x, 2.0); - constexpr double p0_cov_y = std::pow(p0_stddev_y, 2.0); - constexpr double p0_cov_yaw = std::pow(p0_stddev_yaw, 2.0); + constexpr double p0_cov_x = p0_stddev_x * p0_stddev_x; + constexpr double p0_cov_y = p0_stddev_y * p0_stddev_y; + constexpr double p0_cov_yaw = p0_stddev_yaw * p0_stddev_yaw; const double cos_yaw = std::cos(yaw); const double sin_yaw = std::sin(yaw); @@ -160,11 +160,11 @@ bool PedestrianTracker::predict(const rclcpp::Time & time) return motion_model_.predictState(time); } -autoware_auto_perception_msgs::msg::DetectedObject PedestrianTracker::getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, +autoware_perception_msgs::msg::DetectedObject PedestrianTracker::getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/) { - autoware_auto_perception_msgs::msg::DetectedObject updating_object = object; + autoware_perception_msgs::msg::DetectedObject updating_object = object; // UNCERTAINTY MODEL if (!object.kinematics.has_position_covariance) { @@ -186,7 +186,7 @@ autoware_auto_perception_msgs::msg::DetectedObject PedestrianTracker::getUpdatin } bool PedestrianTracker::measureWithPose( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { // update motion model bool is_updated = false; @@ -207,12 +207,12 @@ bool PedestrianTracker::measureWithPose( } bool PedestrianTracker::measureWithShape( - const autoware_auto_perception_msgs::msg::DetectedObject & object) + const autoware_perception_msgs::msg::DetectedObject & object) { constexpr double gain = 0.1; constexpr double gain_inv = 1.0 - gain; - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { // check bound box size abnormality constexpr double size_max = 30.0; // [m] constexpr double size_min = 0.1; // [m] @@ -229,7 +229,7 @@ bool PedestrianTracker::measureWithShape( bounding_box_.length = gain_inv * bounding_box_.length + gain * object.shape.dimensions.x; bounding_box_.width = gain_inv * bounding_box_.width + gain * object.shape.dimensions.y; bounding_box_.height = gain_inv * bounding_box_.height + gain * object.shape.dimensions.z; - } else if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::CYLINDER) { + } else if (object.shape.type == autoware_perception_msgs::msg::Shape::CYLINDER) { // check cylinder size abnormality constexpr double size_max = 30.0; // [m] constexpr double size_min = 0.1; // [m] @@ -258,7 +258,7 @@ bool PedestrianTracker::measureWithShape( } bool PedestrianTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { // keep the latest input object @@ -280,7 +280,7 @@ bool PedestrianTracker::measure( } // update object - const autoware_auto_perception_msgs::msg::DetectedObject updating_object = + const autoware_perception_msgs::msg::DetectedObject updating_object = getUpdatingObject(object, self_transform); measureWithPose(updating_object); measureWithShape(updating_object); @@ -290,7 +290,7 @@ bool PedestrianTracker::measure( } bool PedestrianTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); @@ -311,15 +311,15 @@ bool PedestrianTracker::getTrackedObject( pose_with_cov.pose.position.z = z_; // set shape - if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::BOUNDING_BOX) { + if (object.shape.type == autoware_perception_msgs::msg::Shape::BOUNDING_BOX) { object.shape.dimensions.x = bounding_box_.length; object.shape.dimensions.y = bounding_box_.width; object.shape.dimensions.z = bounding_box_.height; - } else if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::CYLINDER) { + } else if (object.shape.type == autoware_perception_msgs::msg::Shape::CYLINDER) { object.shape.dimensions.x = cylinder_.width; object.shape.dimensions.y = cylinder_.width; object.shape.dimensions.z = cylinder_.height; - } else if (object.shape.type == autoware_auto_perception_msgs::msg::Shape::POLYGON) { + } else if (object.shape.type == autoware_perception_msgs::msg::Shape::POLYGON) { const auto origin_yaw = tf2::getYaw(object_.kinematics.pose_with_covariance.pose.orientation); const auto ekf_pose_yaw = tf2::getYaw(pose_with_cov.pose.orientation); object.shape.footprint = diff --git a/perception/multi_object_tracker/src/tracker/model/tracker_base.cpp b/perception/multi_object_tracker/src/tracker/model/tracker_base.cpp index 4318da0569721..6d2787ac1d550 100644 --- a/perception/multi_object_tracker/src/tracker/model/tracker_base.cpp +++ b/perception/multi_object_tracker/src/tracker/model/tracker_base.cpp @@ -31,7 +31,7 @@ float updateProbability(float prior, float true_positive, float false_positive) Tracker::Tracker( const rclcpp::Time & time, - const std::vector & classification, + const std::vector & classification, const size_t & channel_size) : classification_(classification), no_measurement_count_(0), @@ -62,7 +62,7 @@ void Tracker::initializeExistenceProbabilities( } bool Tracker::updateWithMeasurement( - const autoware_auto_perception_msgs::msg::DetectedObject & object, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & measurement_time, const geometry_msgs::msg::Transform & self_transform, const uint & channel_index) { @@ -122,7 +122,7 @@ bool Tracker::updateWithoutMeasurement(const rclcpp::Time & now) } void Tracker::updateClassification( - const std::vector & classification) + const std::vector & classification) { // classification algorithm: // 0. Normalize the input classification @@ -139,7 +139,7 @@ void Tracker::updateClassification( // Normalization function auto normalizeProbabilities = - [](std::vector & classification) { + [](std::vector & classification) { double sum = 0.0; for (const auto & class_ : classification) { sum += class_.probability; @@ -185,7 +185,7 @@ void Tracker::updateClassification( geometry_msgs::msg::PoseWithCovariance Tracker::getPoseWithCovariance( const rclcpp::Time & time) const { - autoware_auto_perception_msgs::msg::TrackedObject object; + autoware_perception_msgs::msg::TrackedObject object; getTrackedObject(time, object); return object.kinematics.pose_with_covariance; } diff --git a/perception/multi_object_tracker/src/tracker/model/unknown_tracker.cpp b/perception/multi_object_tracker/src/tracker/model/unknown_tracker.cpp index 04638267f7ad8..f15d5042a2316 100644 --- a/perception/multi_object_tracker/src/tracker/model/unknown_tracker.cpp +++ b/perception/multi_object_tracker/src/tracker/model/unknown_tracker.cpp @@ -37,7 +37,7 @@ #include UnknownTracker::UnknownTracker( - const rclcpp::Time & time, const autoware_auto_perception_msgs::msg::DetectedObject & object, + const rclcpp::Time & time, const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/, const size_t channel_size, const uint & channel_index) : Tracker(time, object.classification, channel_size), @@ -140,11 +140,11 @@ bool UnknownTracker::predict(const rclcpp::Time & time) return motion_model_.predictState(time); } -autoware_auto_perception_msgs::msg::DetectedObject UnknownTracker::getUpdatingObject( - const autoware_auto_perception_msgs::msg::DetectedObject & object, +autoware_perception_msgs::msg::DetectedObject UnknownTracker::getUpdatingObject( + const autoware_perception_msgs::msg::DetectedObject & object, const geometry_msgs::msg::Transform & /*self_transform*/) { - autoware_auto_perception_msgs::msg::DetectedObject updating_object = object; + autoware_perception_msgs::msg::DetectedObject updating_object = object; // UNCERTAINTY MODEL if (!object.kinematics.has_position_covariance) { @@ -165,8 +165,7 @@ autoware_auto_perception_msgs::msg::DetectedObject UnknownTracker::getUpdatingOb return updating_object; } -bool UnknownTracker::measureWithPose( - const autoware_auto_perception_msgs::msg::DetectedObject & object) +bool UnknownTracker::measureWithPose(const autoware_perception_msgs::msg::DetectedObject & object) { // update motion model bool is_updated = false; @@ -187,7 +186,7 @@ bool UnknownTracker::measureWithPose( } bool UnknownTracker::measure( - const autoware_auto_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, + const autoware_perception_msgs::msg::DetectedObject & object, const rclcpp::Time & time, const geometry_msgs::msg::Transform & self_transform) { // keep the latest input object @@ -204,7 +203,7 @@ bool UnknownTracker::measure( } // update object - const autoware_auto_perception_msgs::msg::DetectedObject updating_object = + const autoware_perception_msgs::msg::DetectedObject updating_object = getUpdatingObject(object, self_transform); measureWithPose(updating_object); @@ -212,7 +211,7 @@ bool UnknownTracker::measure( } bool UnknownTracker::getTrackedObject( - const rclcpp::Time & time, autoware_auto_perception_msgs::msg::TrackedObject & object) const + const rclcpp::Time & time, autoware_perception_msgs::msg::TrackedObject & object) const { object = object_recognition_utils::toTrackedObject(object_); object.object_id = getUUID(); From 8d816d980e3bd225a5251cefa44c7d33a0593231 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:12:22 +0000 Subject: [PATCH 6/7] style(pre-commit): autofix --- .../centerpoint_sigma_ml_package.schema.json | 151 +++++++++--------- 1 file changed, 75 insertions(+), 76 deletions(-) diff --git a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json index de03014d0f19c..6f9fb1f647cce 100644 --- a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json +++ b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.schema.json @@ -1,79 +1,78 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Parameters for Centerpoint Sigma ML model", - "type": "object", - "definitions": { - "centerpoint_sigma_ml_package": { - "type": "object", - "properties": { - "model_params": { - "type": "object", - "description": "Parameters for model configuration.", - "properties": { - "class_names": { - "type": "array", - "description": "An array of class names will be predicted.", - "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], - "uniqueItems": true - }, - "point_feature_size": { - "type": "integer", - "description": "A number of channels of point feature layer.", - "default": 4 - }, - "max_voxel_size": { - "type": "integer", - "description": "A maximum size of voxel grid.", - "default": 40000 - }, - "point_cloud_range": { - "type": "array", - "description": "An array of distance ranges of each class, this must have same length with `class_names`.", - "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] - }, - "voxel_size": { - "type": "array", - "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", - "default": [0.32, 0.32, 10.0] - }, - "down_sample_factor": { - "type": "integer", - "description": "A scale factor of downsampling points", - "default": 1, - "minimum": 1 - }, - "encoder_in_feature_size": { - "type": "integer", - "description": "A size of encoder input feature channels.", - "default": 9 - }, - "has_variance": { - "type": "boolean", - "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", - "default": false - }, - "has_twist": { - "type": "boolean", - "description": "Indicates whether the model outputs twist value.", - "default": false - } + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for Centerpoint Sigma ML model", + "type": "object", + "definitions": { + "centerpoint_sigma_ml_package": { + "type": "object", + "properties": { + "model_params": { + "type": "object", + "description": "Parameters for model configuration.", + "properties": { + "class_names": { + "type": "array", + "description": "An array of class names will be predicted.", + "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], + "uniqueItems": true + }, + "point_feature_size": { + "type": "integer", + "description": "A number of channels of point feature layer.", + "default": 4 + }, + "max_voxel_size": { + "type": "integer", + "description": "A maximum size of voxel grid.", + "default": 40000 + }, + "point_cloud_range": { + "type": "array", + "description": "An array of distance ranges of each class, this must have same length with `class_names`.", + "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] + }, + "voxel_size": { + "type": "array", + "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", + "default": [0.32, 0.32, 10.0] + }, + "down_sample_factor": { + "type": "integer", + "description": "A scale factor of downsampling points", + "default": 1, + "minimum": 1 + }, + "encoder_in_feature_size": { + "type": "integer", + "description": "A size of encoder input feature channels.", + "default": 9 + }, + "has_variance": { + "type": "boolean", + "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", + "default": false + }, + "has_twist": { + "type": "boolean", + "description": "Indicates whether the model outputs twist value.", + "default": false } } - }, - "required": ["model_params"] - } - }, - "properties": { - "/**": { - "type": "object", - "properties": { - "ros__parameters": { - "$ref": "#/definitions/centerpoint_tiny_ml_package" - } - }, - "required": ["ros__parameters"] - } - }, - "required": ["/**"] - } - \ No newline at end of file + } + }, + "required": ["model_params"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/centerpoint_tiny_ml_package" + } + }, + "required": ["ros__parameters"] + } + }, + "required": ["/**"] +} From 66fd8c6c795553830bdf267b4cebf665e1c3f30e Mon Sep 17 00:00:00 2001 From: Boyang <144167190+tby-udel@users.noreply.github.com> Date: Wed, 5 Jun 2024 10:13:45 -0400 Subject: [PATCH 7/7] Delete perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json --- .../schema/centerpoint_sigma_ml_package.json | 78 ------------------- 1 file changed, 78 deletions(-) delete mode 100644 perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json diff --git a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json b/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json deleted file mode 100644 index 6f9fb1f647cce..0000000000000 --- a/perception/lidar_centerpoint/schema/centerpoint_sigma_ml_package.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Parameters for Centerpoint Sigma ML model", - "type": "object", - "definitions": { - "centerpoint_sigma_ml_package": { - "type": "object", - "properties": { - "model_params": { - "type": "object", - "description": "Parameters for model configuration.", - "properties": { - "class_names": { - "type": "array", - "description": "An array of class names will be predicted.", - "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"], - "uniqueItems": true - }, - "point_feature_size": { - "type": "integer", - "description": "A number of channels of point feature layer.", - "default": 4 - }, - "max_voxel_size": { - "type": "integer", - "description": "A maximum size of voxel grid.", - "default": 40000 - }, - "point_cloud_range": { - "type": "array", - "description": "An array of distance ranges of each class, this must have same length with `class_names`.", - "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0] - }, - "voxel_size": { - "type": "array", - "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.", - "default": [0.32, 0.32, 10.0] - }, - "down_sample_factor": { - "type": "integer", - "description": "A scale factor of downsampling points", - "default": 1, - "minimum": 1 - }, - "encoder_in_feature_size": { - "type": "integer", - "description": "A size of encoder input feature channels.", - "default": 9 - }, - "has_variance": { - "type": "boolean", - "description": "determines whether the covariance matrices for the object's position and velocity information are filled in.", - "default": false - }, - "has_twist": { - "type": "boolean", - "description": "Indicates whether the model outputs twist value.", - "default": false - } - } - } - }, - "required": ["model_params"] - } - }, - "properties": { - "/**": { - "type": "object", - "properties": { - "ros__parameters": { - "$ref": "#/definitions/centerpoint_tiny_ml_package" - } - }, - "required": ["ros__parameters"] - } - }, - "required": ["/**"] -}