From 3b019b1fcc104dd7a345a50061c3802eef4d8abf Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Tue, 24 Dec 2024 09:03:41 +0900 Subject: [PATCH] feat(image_projection_based_fusion): add cache for camera projection (#9635) * add camera_projection class and projection cache Signed-off-by: a-maumau * style(pre-commit): autofix * fix FOV filtering Signed-off-by: a-maumau * style(pre-commit): autofix * remove unused includes Signed-off-by: a-maumau * update schema Signed-off-by: a-maumau * fix cpplint error Signed-off-by: a-maumau * apply suggestion: more simple and effcient computation Signed-off-by: a-maumau * correct terminology Signed-off-by: a-maumau * style(pre-commit): autofix * fix comments Signed-off-by: a-maumau * fix var name Signed-off-by: a-maumau Co-authored-by: Taekjin LEE * fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE * fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE * fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE * fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE * fix variable names Signed-off-by: a-maumau * fix initialization Co-authored-by: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Signed-off-by: a-maumau * add verification to point_project_to_unrectified_image when loading Co-authored-by: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Signed-off-by: a-maumau * chore: add option description to project points to unrectified image Signed-off-by: Taekjin LEE --------- Signed-off-by: a-maumau Signed-off-by: Taekjin LEE Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Taekjin LEE Co-authored-by: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Co-authored-by: Taekjin LEE --- .../CMakeLists.txt | 1 + .../README.md | 6 + .../config/fusion_common.param.yaml | 9 +- .../camera_projection.hpp | 81 ++++++ .../fusion_node.hpp | 11 +- .../pointpainting_fusion/node.hpp | 3 - .../roi_cluster_fusion/node.hpp | 1 - .../roi_detected_object_fusion/node.hpp | 8 +- .../roi_pointcloud_fusion/node.hpp | 3 +- .../segmentation_pointcloud_fusion/node.hpp | 2 +- .../utils/utils.hpp | 4 - .../schema/fusion_common.schema.json | 20 ++ .../src/camera_projection.cpp | 252 ++++++++++++++++++ .../src/fusion_node.cpp | 52 +++- .../src/pointpainting_fusion/node.cpp | 57 ++-- .../src/roi_cluster_fusion/node.cpp | 31 +-- .../src/roi_detected_object_fusion/node.cpp | 39 ++- .../src/roi_pointcloud_fusion/node.cpp | 75 +++--- .../segmentation_pointcloud_fusion/node.cpp | 20 +- .../src/utils/utils.cpp | 14 - 20 files changed, 522 insertions(+), 167 deletions(-) create mode 100644 perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp create mode 100644 perception/autoware_image_projection_based_fusion/src/camera_projection.cpp diff --git a/perception/autoware_image_projection_based_fusion/CMakeLists.txt b/perception/autoware_image_projection_based_fusion/CMakeLists.txt index 73d305d2ab2a8..7c8160d6fe1b7 100644 --- a/perception/autoware_image_projection_based_fusion/CMakeLists.txt +++ b/perception/autoware_image_projection_based_fusion/CMakeLists.txt @@ -16,6 +16,7 @@ endif() # Build non-CUDA dependent nodes ament_auto_add_library(${PROJECT_NAME} SHARED + src/camera_projection.cpp src/fusion_node.cpp src/debugger.cpp src/utils/geometry.cpp diff --git a/perception/autoware_image_projection_based_fusion/README.md b/perception/autoware_image_projection_based_fusion/README.md index c976697b0396d..dcf35e45bbd9d 100644 --- a/perception/autoware_image_projection_based_fusion/README.md +++ b/perception/autoware_image_projection_based_fusion/README.md @@ -66,6 +66,12 @@ ros2 launch autoware_image_projection_based_fusion pointpainting_fusion.launch.x The rclcpp::TimerBase timer could not break a for loop, therefore even if time is out when fusing a roi msg at the middle, the program will run until all msgs are fused. +### Approximate camera projection + +For algorithms like `pointpainting_fusion`, the computation required to project points onto an unrectified (raw) image can be substantial. To address this, an option is provided to reduce the computational load. Set the [`approximate_camera_projection parameter`](config/fusion_common.param.yaml) to `true` for each camera (ROIs). If the corresponding `point_project_to_unrectified_image` parameter is also set to true, the projections will be pre-cached. + +The cached projections are calculated using a grid, with the grid size specified by the `approximation_grid_width_size` and `approximation_grid_height_size` parameters in the [configuration file](config/fusion_common.param.yaml). The centers of the grid are used as the projected points. + ### Detail description of each fusion's algorithm is in the following links | Fusion Name | Description | Detail | diff --git a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml index 347cb57b484e9..86db3bad4f8f8 100644 --- a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml +++ b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml @@ -4,7 +4,8 @@ timeout_ms: 70.0 match_threshold_ms: 50.0 image_buffer_size: 15 - point_project_to_unrectified_image: false + # projection setting for each ROI whether unrectify image + point_project_to_unrectified_image: [false, false, false, false, false, false] debug_mode: false filter_scope_min_x: -100.0 filter_scope_min_y: -100.0 @@ -13,5 +14,11 @@ filter_scope_max_y: 100.0 filter_scope_max_z: 100.0 + # camera cache setting for each ROI + approximate_camera_projection: [true, true, true, true, true, true] + # grid size in pixels + approximation_grid_cell_width: 1.0 + approximation_grid_cell_height: 1.0 + # debug parameters publish_processing_time_detail: false diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp new file mode 100644 index 0000000000000..f9b3158b1672a --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -0,0 +1,81 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ +#define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ + +#define EIGEN_MPL2_ONLY + +#include +#include + +#include + +#include + +#include + +namespace autoware::image_projection_based_fusion +{ +struct PixelPos +{ + float x; + float y; +}; + +class CameraProjection +{ +public: + explicit CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_cell_width, + const float grid_cell_height, const bool unrectify, const bool use_approximation); + CameraProjection() : cell_width_(1.0), cell_height_(1.0), unrectify_(false) {} + void initialize(); + std::function calcImageProjectedPoint; + sensor_msgs::msg::CameraInfo getCameraInfo(); + bool isOutsideHorizontalView(const float px, const float pz); + bool isOutsideVerticalView(const float py, const float pz); + bool isOutsideFOV(const float px, const float py, const float pz); + +protected: + bool calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + void initializeCache(); + + sensor_msgs::msg::CameraInfo camera_info_; + uint32_t image_height_, image_width_; + double tan_h_x_, tan_h_y_; + double fov_left_, fov_right_, fov_top_, fov_bottom_; + + uint32_t cache_size_; + float cell_width_; + float cell_height_; + float inv_cell_width_; + float inv_cell_height_; + int grid_width_; + int grid_height_; + + bool unrectify_; + bool use_approximation_; + + std::unique_ptr projection_cache_; + image_geometry::PinholeCameraModel camera_model_; +}; + +} // namespace autoware::image_projection_based_fusion + +#endif // AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index 04ecf89faa0d4..239d406650ce8 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -15,6 +15,7 @@ #ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ #define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ +#include #include #include #include @@ -55,6 +56,7 @@ using sensor_msgs::msg::PointCloud2; using tier4_perception_msgs::msg::DetectedObjectsWithFeature; using tier4_perception_msgs::msg::DetectedObjectWithFeature; using PointCloud = pcl::PointCloud; +using autoware::image_projection_based_fusion::CameraProjection; using autoware_perception_msgs::msg::ObjectClassification; template @@ -86,7 +88,7 @@ class FusionNode : public rclcpp::Node virtual void fuseOnSingleImage( const TargetMsg3D & input_msg, const std::size_t image_id, const Msg2D & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, TargetMsg3D & output_msg) = 0; + TargetMsg3D & output_msg) = 0; // set args if you need virtual void postprocess(TargetMsg3D & output_msg); @@ -100,11 +102,16 @@ class FusionNode : public rclcpp::Node tf2_ros::Buffer tf_buffer_; tf2_ros::TransformListener tf_listener_; - bool point_project_to_unrectified_image_{false}; + std::vector point_project_to_unrectified_image_; // camera_info std::map camera_info_map_; std::vector::SharedPtr> camera_info_subs_; + // camera projection + std::vector camera_projectors_; + std::vector approx_camera_projection_; + float approx_grid_cell_w_size_; + float approx_grid_cell_h_size_; rclcpp::TimerBase::SharedPtr timer_; double timeout_ms_{}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp index 8c0e2ed78fc6c..cd6cd87976522 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp @@ -53,15 +53,12 @@ class PointPaintingFusionNode void fuseOnSingleImage( const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; void postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; rclcpp::Publisher::SharedPtr obj_pub_ptr_; - std::vector tan_h_; // horizontal field of view - int omp_num_threads_{1}; float score_threshold_{0.0}; std::vector class_names_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp index be05c0a1c4bc6..903b153af0681 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp @@ -38,7 +38,6 @@ class RoiClusterFusionNode void fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) override; std::string trust_object_iou_mode_{"iou"}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index 7103537ec852d..43ec134168ef9 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -42,13 +42,11 @@ class RoiDetectedObjectFusionNode void fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjects & output_object_msg) override; std::map generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model); + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine); void fuseObjectsOnImage( const DetectedObjects & input_object_msg, diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 9baf754e224a7..2f2c8222e196f 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -49,8 +49,7 @@ class RoiPointCloudFusionNode void fuseOnSingleImage( const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, PointCloud2 & output_pointcloud_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, PointCloud2 & output_pointcloud_msg) override; bool out_of_scope(const DetectedObjectWithFeature & obj) override; }; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp index 0bec3195bb402..7454cb7bcd173 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp @@ -61,7 +61,7 @@ class SegmentPointCloudFusionNode : public FusionNode getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time); diff --git a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json index 73ee1661adaea..8077f3e2f3e30 100644 --- a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json +++ b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json @@ -31,6 +31,11 @@ "default": 15, "minimum": 1 }, + "point_project_to_unrectified_image": { + "type": "array", + "description": "An array of options indicating whether to project point to unrectified image or not.", + "default": [false, false, false, false, false, false] + }, "debug_mode": { "type": "boolean", "description": "Whether to debug or not.", @@ -65,6 +70,21 @@ "type": "number", "description": "Maximum z position [m].", "default": 100.0 + }, + "approximate_camera_projection": { + "type": "array", + "description": "An array of options indicating whether to use approximated projection for each camera.", + "default": [true, true, true, true, true, true] + }, + "approximation_grid_cell_width": { + "type": "number", + "description": "The width of grid cell used in approximated projection [pixel].", + "default": 1.0 + }, + "approximation_grid_cell_height": { + "type": "number", + "description": "The height of grid cell used in approximated projection [pixel].", + "default": 1.0 } } } diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp new file mode 100644 index 0000000000000..c0a820609a0a7 --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -0,0 +1,252 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "autoware/image_projection_based_fusion/camera_projection.hpp" + +#include + +namespace autoware::image_projection_based_fusion +{ +CameraProjection::CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_cell_width, + const float grid_cell_height, const bool unrectify, const bool use_approximation = false) +: camera_info_(camera_info), + cell_width_(grid_cell_width), + cell_height_(grid_cell_height), + unrectify_(unrectify), + use_approximation_(use_approximation) +{ + if (grid_cell_width <= 0.0 || grid_cell_height <= 0.0) { + throw std::runtime_error("Both grid_cell_width and grid_cell_height must be > 0.0"); + } + + image_width_ = camera_info.width; + image_height_ = camera_info.height; + + // prepare camera model + camera_model_.fromCameraInfo(camera_info); + + // cache settings + inv_cell_width_ = 1 / cell_width_; + inv_cell_height_ = 1 / cell_height_; + grid_width_ = static_cast(std::ceil(image_width_ / cell_width_)); + grid_height_ = static_cast(std::ceil(image_height_ / cell_height_)); + cache_size_ = grid_width_ * grid_height_; + + // compute 3D rays for the image corners and pixels related to optical center + // to find the actual FOV size + // optical centers + const double ocx = static_cast(camera_info.k.at(2)); + const double ocy = static_cast(camera_info.k.at(5)); + // for checking pincushion shape case + const cv::Point3d ray_top_left = camera_model_.projectPixelTo3dRay(cv::Point(0, 0)); + const cv::Point3d ray_top_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, 0)); + const cv::Point3d ray_bottom_left = + camera_model_.projectPixelTo3dRay(cv::Point(0, image_height_ - 1)); + const cv::Point3d ray_bottom_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, image_height_ - 1)); + // for checking barrel shape case + const cv::Point3d ray_mid_top = camera_model_.projectPixelTo3dRay(cv::Point(ocx, 0)); + const cv::Point3d ray_mid_left = camera_model_.projectPixelTo3dRay(cv::Point(0, ocy)); + const cv::Point3d ray_mid_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, ocy)); + const cv::Point3d ray_mid_bottom = + camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_height_ - 1)); + + cv::Point3d x_left = ray_top_left; + cv::Point3d x_right = ray_top_right; + cv::Point3d y_top = ray_top_left; + cv::Point3d y_bottom = ray_bottom_left; + + // check each side of the view + if (ray_bottom_left.x < x_left.x) x_left = ray_bottom_left; + if (ray_mid_left.x < x_left.x) x_left = ray_mid_left; + + if (x_right.x < ray_bottom_right.x) x_right = ray_bottom_right; + if (x_right.x < ray_mid_right.x) x_right = ray_mid_right; + + if (y_top.y < ray_top_right.y) y_top = ray_top_right; + if (y_top.y < ray_mid_top.y) y_top = ray_mid_top; + + if (ray_bottom_left.y < y_bottom.y) y_bottom = ray_bottom_left; + if (ray_mid_bottom.y < y_bottom.y) y_bottom = ray_mid_bottom; + + // set FOV at z = 1.0 + fov_left_ = x_left.x / x_left.z; + fov_right_ = x_right.x / x_right.z; + fov_top_ = y_top.y / y_top.z; + fov_bottom_ = y_bottom.y / y_bottom.z; +} + +void CameraProjection::initialize() +{ + if (unrectify_) { + if (use_approximation_) { + // create the cache with size of grid center + // store only xy position in float to reduce memory consumption + projection_cache_ = std::make_unique(cache_size_); + initializeCache(); + + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPointWithApproximation(point3d, projected_point); + }; + } else { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPoint(point3d, projected_point); + }; + } + } else { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRectifiedImageProjectedPoint(point3d, projected_point); + }; + } +} + +void CameraProjection::initializeCache() +{ + // sample grid cell centers till the camera height, width to precompute the projections + // + // grid cell size + // / + // v + // |---| w + // 0-----------------> + // 0 | . | . | . | + // |___|___|___| + // | . | . | . | + // | ^ + // h | | + // v grid cell center + // + // each pixel will be rounded in these grid cell centers + // edge pixels in right and bottom side in the image will be assign to these centers + // that is the outside of the image + + for (int idx_y = 0; idx_y < grid_height_; idx_y++) { + for (int idx_x = 0; idx_x < grid_width_; idx_x++) { + const uint32_t grid_index = idx_y * grid_width_ + idx_x; + const float px = (idx_x + 0.5f) * cell_width_; + const float py = (idx_y + 0.5f) * cell_height_; + + // precompute projected point + cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(px, py)); + projection_cache_[grid_index] = + PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; + } + } +} + +/** + * @brief Calculate a projection of 3D point to rectified image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_width_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_height_) { + return false; + } else { + projected_point << rectified_image_point.x, rectified_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); + + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_width_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_height_) { + return false; + } else { + projected_point << raw_image_point.x, raw_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point with approximation. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + // round to a near grid cell + const int grid_idx_x = static_cast(std::floor(rectified_image_point.x * inv_cell_width_)); + const int grid_idx_y = static_cast(std::floor(rectified_image_point.y * inv_cell_height_)); + + if (grid_idx_x < 0.0 || grid_idx_x >= grid_width_) return false; + if (grid_idx_y < 0.0 || grid_idx_y >= grid_height_) return false; + + const uint32_t grid_index = grid_idx_y * grid_width_ + grid_idx_x; + projected_point << projection_cache_[grid_index].x, projection_cache_[grid_index].y; + + return true; +} + +sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo() +{ + return camera_info_; +} + +bool CameraProjection::isOutsideHorizontalView(const float px, const float pz) +{ + // assuming the points' origin is centered on the camera + if (pz <= 0.0) return true; + if (px < fov_left_ * pz) return true; + if (px > fov_right_ * pz) return true; + + // inside of the horizontal view + return false; +} + +bool CameraProjection::isOutsideVerticalView(const float py, const float pz) +{ + // assuming the points' origin is centered on the camera + if (pz <= 0.0) return true; + // up in the camera coordinate is negative + if (py < fov_top_ * pz) return true; + if (py > fov_bottom_ * pz) return true; + + // inside of the vertical view + return false; +} + +bool CameraProjection::isOutsideFOV(const float px, const float py, const float pz) +{ + if (isOutsideHorizontalView(px, pz)) return true; + if (isOutsideVerticalView(py, pz)) return true; + + // inside of the FOV + return false; +} + +} // namespace autoware::image_projection_based_fusion diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index bd4d57e45c582..ea0904215cb86 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -18,6 +18,7 @@ #include #include +#include #include @@ -124,6 +125,34 @@ FusionNode::FusionNode( timer_ = rclcpp::create_timer( this, get_clock(), period_ns, std::bind(&FusionNode::timer_callback, this)); + // camera projection settings + camera_projectors_.resize(rois_number_); + point_project_to_unrectified_image_ = + declare_parameter>("point_project_to_unrectified_image"); + + if (rois_number_ > point_project_to_unrectified_image_.size()) { + throw std::runtime_error( + "The number of point_project_to_unrectified_image does not match the number of rois topics."); + } + approx_camera_projection_ = declare_parameter>("approximate_camera_projection"); + if (rois_number_ != approx_camera_projection_.size()) { + const std::size_t current_size = approx_camera_projection_.size(); + RCLCPP_WARN( + this->get_logger(), + "The number of elements in approximate_camera_projection should be the same as in " + "rois_number. " + "It has %zu elements.", + current_size); + if (current_size < rois_number_) { + approx_camera_projection_.resize(rois_number_); + for (std::size_t i = current_size; i < rois_number_; i++) { + approx_camera_projection_.at(i) = true; + } + } + } + approx_grid_cell_w_size_ = declare_parameter("approximation_grid_cell_width"); + approx_grid_cell_h_size_ = declare_parameter("approximation_grid_cell_height"); + // debugger if (declare_parameter("debug_mode", false)) { std::size_t image_buffer_size = @@ -142,9 +171,6 @@ FusionNode::FusionNode( time_keeper_ = std::make_shared(time_keeper); } - point_project_to_unrectified_image_ = - declare_parameter("point_project_to_unrectified_image"); - // initialize debug tool { using autoware::universe_utils::DebugPublisher; @@ -168,7 +194,18 @@ void FusionNode::cameraInfoCallback( const sensor_msgs::msg::CameraInfo::ConstSharedPtr input_camera_info_msg, const std::size_t camera_id) { - camera_info_map_[camera_id] = *input_camera_info_msg; + // create the CameraProjection when the camera info arrives for the first time + // assuming the camera info does not change while the node is running + if ( + camera_info_map_.find(camera_id) == camera_info_map_.end() && + checkCameraInfo(*input_camera_info_msg)) { + camera_projectors_.at(camera_id) = CameraProjection( + *input_camera_info_msg, approx_grid_cell_w_size_, approx_grid_cell_h_size_, + point_project_to_unrectified_image_.at(camera_id), approx_camera_projection_.at(camera_id)); + camera_projectors_.at(camera_id).initialize(); + + camera_info_map_[camera_id] = *input_camera_info_msg; + } } template @@ -273,8 +310,7 @@ void FusionNode::subCallback( } fuseOnSingleImage( - *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), - camera_info_map_.at(roi_i), *output_msg); + *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), *output_msg); (cached_roi_msgs_.at(roi_i)).erase(matched_stamp); is_fused_.at(roi_i) = true; @@ -346,9 +382,7 @@ void FusionNode::roiCallback( debugger_->clear(); } - fuseOnSingleImage( - *(cached_msg_.second), roi_i, *input_roi_msg, camera_info_map_.at(roi_i), - *(cached_msg_.second)); + fuseOnSingleImage(*(cached_msg_.second), roi_i, *input_roi_msg, *(cached_msg_.second)); is_fused_.at(roi_i) = true; if (debug_publisher_) { diff --git a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp index 4d12de2685c95..2837bac458541 100644 --- a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -161,13 +161,6 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt sub_ = this->create_subscription( "~/input/pointcloud", rclcpp::SensorDataQoS().keep_last(3), sub_callback); - tan_h_.resize(rois_number_); - for (std::size_t roi_i = 0; roi_i < rois_number_; ++roi_i) { - auto fx = camera_info_map_[roi_i].k.at(0); - auto x0 = camera_info_map_[roi_i].k.at(2); - tan_h_[roi_i] = x0 / fx; - } - detection_class_remapper_.setParameters( allow_remapping_by_area_matrix, min_area_matrix, max_area_matrix); @@ -260,9 +253,7 @@ void PointPaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted void PointPaintingFusionNode::fuseOnSingleImage( __attribute__((unused)) const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, + const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) { if (painted_pointcloud_msg.data.empty() || painted_pointcloud_msg.fields.empty()) { @@ -276,8 +267,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( return; } - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -312,9 +301,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( .offset; const auto class_offset = painted_pointcloud_msg.fields.at(4).offset; const auto p_step = painted_pointcloud_msg.point_step; - // projection matrix - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); Eigen::Vector3f point_lidar, point_camera; /** dc : don't care @@ -346,24 +332,27 @@ dc | dc dc dc dc ||zc| p_y = point_camera.y(); p_z = point_camera.z(); - if (p_z <= 0.0 || p_x > (tan_h_.at(image_id) * p_z) || p_x < (-tan_h_.at(image_id) * p_z)) { + if (camera_projectors_[image_id].isOutsideHorizontalView(p_x, p_z)) { continue; } + // project - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(p_x, p_y, p_z), point_project_to_unrectified_image_); - - // iterate 2d bbox - for (const auto & feature_object : objects) { - sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; - // paint current point if it is inside bbox - int label2d = feature_object.object.classification.front().label; - if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { - // cppcheck-suppress invalidPointerCast - auto p_class = reinterpret_cast(&output[stride + class_offset]); - for (const auto & cls : isClassTable_) { - // add up the class values if the point belongs to multiple classes - *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(p_x, p_y, p_z), projected_point)) { + // iterate 2d bbox + for (const auto & feature_object : objects) { + sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; + // paint current point if it is inside bbox + int label2d = feature_object.object.classification.front().label; + if ( + !isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { + // cppcheck-suppress invalidPointerCast + auto p_class = reinterpret_cast(&output[stride + class_offset]); + for (const auto & cls : isClassTable_) { + // add up the class values if the point belongs to multiple classes + *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + } } } #if 0 @@ -375,15 +364,15 @@ dc | dc dc dc dc ||zc| } } - for (const auto & feature_object : input_roi_msg.feature_objects) { - debug_image_rois.push_back(feature_object.feature.roi); - } - if (debugger_) { std::unique_ptr inner_st_ptr; if (time_keeper_) inner_st_ptr = std::make_unique("publish debug message", *time_keeper_); + for (const auto & feature_object : input_roi_msg.feature_objects) { + debug_image_rois.push_back(feature_object.feature.roi); + } + debugger_->image_rois_ = debug_image_rois; debugger_->obstacle_points_ = debug_image_points; debugger_->publishImage(image_id, input_roi_msg.header.stamp); diff --git a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp index 7520647544684..32db5319bb487 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp @@ -95,16 +95,12 @@ void RoiClusterFusionNode::postprocess(DetectedObjectsWithFeature & output_clust void RoiClusterFusionNode::fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjectsWithFeature & output_cluster_msg) { - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); // get transform from cluster frame id to camera optical frame id geometry_msgs::msg::TransformStamped transform_stamped; @@ -152,18 +148,17 @@ void RoiClusterFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z), - point_project_to_unrectified_image_); - if ( - 0 <= static_cast(projected_point.x()) && - static_cast(projected_point.x()) <= static_cast(camera_info.width) - 1 && - 0 <= static_cast(projected_point.y()) && - static_cast(projected_point.y()) <= static_cast(camera_info.height) - 1) { - min_x = std::min(static_cast(projected_point.x()), min_x); - min_y = std::min(static_cast(projected_point.y()), min_y); - max_x = std::max(static_cast(projected_point.x()), max_x); - max_y = std::max(static_cast(projected_point.y()), max_y); + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(*iter_x, *iter_y, *iter_z), projected_point)) { + const int px = static_cast(projected_point.x()); + const int py = static_cast(projected_point.y()); + + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); + projected_points.push_back(projected_point); if (debugger_) debugger_->obstacle_points_.push_back(projected_point); } diff --git a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 035bc259ab73c..7be18d59fdbf1 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -84,14 +84,11 @@ void RoiDetectedObjectFusionNode::preprocess(DetectedObjects & output_msg) void RoiDetectedObjectFusionNode::fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg __attribute__((unused))) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - if (!checkCameraInfo(camera_info)) return; - Eigen::Affine3d object2camera_affine; { const auto transform_stamped_optional = getTransformStamped( @@ -103,12 +100,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - - const auto object_roi_map = generateDetectedObjectRoIs( - input_object_msg, static_cast(camera_info.width), - static_cast(camera_info.height), object2camera_affine, pinhole_camera_model); + const auto object_roi_map = + generateDetectedObjectRoIs(input_object_msg, image_id, object2camera_affine); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -122,9 +115,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( std::map RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model) + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -139,6 +131,10 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( return object_roi_map; } const auto & passthrough_object_flags = passthrough_object_flags_map_.at(timestamp_nsec); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); + const double image_width = static_cast(camera_info.width); + const double image_height = static_cast(camera_info.height); + for (std::size_t obj_i = 0; obj_i < input_object_msg.objects.size(); ++obj_i) { std::vector vertices_camera_coord; const auto & object = input_object_msg.objects.at(obj_i); @@ -165,18 +161,17 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( continue; } - Eigen::Vector2d proj_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()), - point_project_to_unrectified_image_); + Eigen::Vector2d proj_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(point.x(), point.y(), point.z()), proj_point)) { + const double px = proj_point.x(); + const double py = proj_point.y(); - min_x = std::min(proj_point.x(), min_x); - min_y = std::min(proj_point.y(), min_y); - max_x = std::max(proj_point.x(), max_x); - max_y = std::max(proj_point.y(), max_y); + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); - if ( - proj_point.x() >= 0 && proj_point.x() < image_width && proj_point.y() >= 0 && - proj_point.y() < image_height) { point_on_image_cnt++; if (debugger_) { diff --git a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index 206a5f77a0235..998072d87774e 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -86,10 +86,8 @@ void RoiPointCloudFusionNode::postprocess( } } void RoiPointCloudFusionNode::fuseOnSingleImage( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, __attribute__((unused)) sensor_msgs::msg::PointCloud2 & output_pointcloud_msg) { std::unique_ptr st_ptr; @@ -98,7 +96,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; std::vector output_objs; std::vector debug_image_rois; @@ -122,10 +119,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( return; } - // transform pointcloud to camera optical frame id - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - geometry_msgs::msg::TransformStamped transform_stamped; { const auto transform_stamped_optional = getTransformStamped( @@ -136,10 +129,13 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( } transform_stamped = transform_stamped_optional.value(); } - int point_step = input_pointcloud_msg.point_step; - int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; - int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; - int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; + const int point_step = input_pointcloud_msg.point_step; + const int x_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; + const int y_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; + const int z_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; sensor_msgs::msg::PointCloud2 transformed_cloud; tf2::doTransform(input_pointcloud_msg, transformed_cloud, transform_stamped); @@ -164,33 +160,36 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (transformed_z <= 0.0) { continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - for (std::size_t i = 0; i < output_objs.size(); ++i) { - auto & feature_obj = output_objs.at(i); - const auto & check_roi = feature_obj.feature.roi; - auto & cluster = clusters.at(i); - - if ( - clusters_data_size.at(i) >= - static_cast(max_cluster_size_) * static_cast(point_step)) { - continue; - } - if ( - check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() && - check_roi.x_offset + check_roi.width >= projected_point.x() && - check_roi.y_offset + check_roi.height >= projected_point.y()) { - std::memcpy( - &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step); - clusters_data_size.at(i) += point_step; + + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { + for (std::size_t i = 0; i < output_objs.size(); ++i) { + auto & feature_obj = output_objs.at(i); + const auto & check_roi = feature_obj.feature.roi; + auto & cluster = clusters.at(i); + + const double px = projected_point.x(); + const double py = projected_point.y(); + + if ( + clusters_data_size.at(i) >= + static_cast(max_cluster_size_) * static_cast(point_step)) { + continue; + } + if ( + check_roi.x_offset <= px && check_roi.y_offset <= py && + check_roi.x_offset + check_roi.width >= px && + check_roi.y_offset + check_roi.height >= py) { + std::memcpy( + &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], + point_step); + clusters_data_size.at(i) += point_step; + } } - } - if (debugger_) { - // add all points inside image to debug - if ( - projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height) { + + if (debugger_) { + // add all points inside image to debug debug_image_points.push_back(projected_point); } } diff --git a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index afeff213c0afe..486ae291abc6a 100644 --- a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -87,9 +87,8 @@ void SegmentPointCloudFusionNode::postprocess(PointCloud2 & pointcloud_msg) } void SegmentPointCloudFusionNode::fuseOnSingleImage( - const PointCloud2 & input_pointcloud_msg, __attribute__((unused)) const std::size_t image_id, - [[maybe_unused]] const Image & input_mask, __attribute__((unused)) const CameraInfo & camera_info, - __attribute__((unused)) PointCloud2 & output_cloud) + const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, + [[maybe_unused]] const Image & input_mask, __attribute__((unused)) PointCloud2 & output_cloud) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -97,10 +96,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; if (input_mask.height == 0 || input_mask.width == 0) { return; } + + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); std::vector mask_data(input_mask.data.begin(), input_mask.data.end()); cv::Mat mask = perception_utils::runLengthDecoder(mask_data, input_mask.height, input_mask.width); @@ -115,8 +115,6 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( const int orig_height = camera_info.height; // resize mask to the same size as the camera image cv::resize(mask, mask, cv::Size(orig_width, orig_height), 0, 0, cv::INTER_NEAREST); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); geometry_msgs::msg::TransformStamped transform_stamped; // transform pointcloud from frame id to camera optical frame id @@ -151,13 +149,9 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - - bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height; - if (!is_inside_image) { + Eigen::Vector2d projected_point; + if (!camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { continue; } diff --git a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp index 338a5605e5a79..4456d25b18167 100644 --- a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp +++ b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp @@ -44,20 +44,6 @@ bool checkCameraInfo(const sensor_msgs::msg::CameraInfo & camera_info) return true; } -Eigen::Vector2d calcRawImageProjectedPoint( - const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d, - const bool & unrectify) -{ - const cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d); - - if (!unrectify) { - return Eigen::Vector2d(rectified_image_point.x, rectified_image_point.y); - } - const cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point); - - return Eigen::Vector2d(raw_image_point.x, raw_image_point.y); -} - std::optional getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time)