Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/awf-latest' into tmp/shallow_nest
Browse files Browse the repository at this point in the history
  • Loading branch information
mitukou1109 committed Jun 27, 2024
2 parents 61ee56a + 0cc45ae commit 5a44be7
Show file tree
Hide file tree
Showing 46 changed files with 248 additions and 195 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
#include "autoware/universe_utils/ros/debug_publisher.hpp"
#include "image_projection_based_fusion/fusion_node.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include "autoware_perception_msgs/msg/object_classification.hpp"

#include <map>
Expand Down Expand Up @@ -45,7 +47,8 @@ class RoiDetectedObjectFusionNode

std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs(
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection);
const Eigen::Affine3d & object2camera_affine,
const image_geometry::PinholeCameraModel & pinhole_camera_model);

void fuseObjectsOnImage(
const DetectedObjects & input_object_msg,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

#include "image_projection_based_fusion/fusion_node.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include <string>
#include <vector>
namespace image_projection_based_fusion
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

#include "image_projection_based_fusion/fusion_node.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include <string>
#include <vector>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include "autoware_perception_msgs/msg/shape.hpp"
#include "tier4_perception_msgs/msg/detected_object_with_feature.hpp"

#include <image_geometry/pinhole_camera_model.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
Expand All @@ -60,6 +61,10 @@ struct PointData
float distance;
size_t orig_index;
};

Eigen::Vector2d calcRawImageProjectedPoint(
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d);

std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
const std::string & source_frame_id, const rclcpp::Time & time);
Expand Down
1 change: 1 addition & 0 deletions perception/image_projection_based_fusion/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
<depend>autoware_universe_utils</depend>
<depend>cv_bridge</depend>
<depend>euclidean_cluster</depend>
<depend>image_geometry</depend>
<depend>image_transport</depend>
<depend>lidar_centerpoint</depend>
<depend>message_filters</depend>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,9 +307,9 @@ void PointPaintingFusionNode::fuseOnSingleImage(
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
const auto p_step = painted_pointcloud_msg.point_step;
// projection matrix
Eigen::Matrix3f camera_projection; // use only x,y,z
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

Eigen::Vector3f point_lidar, point_camera;
/** dc : don't care
Expand Down Expand Up @@ -342,15 +342,15 @@ dc | dc dc dc dc ||zc|
continue;
}
// project
Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z);
Eigen::Vector2d projected_point =
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z));

// iterate 2d bbox
for (const auto & feature_object : objects) {
sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi;
// paint current point if it is inside bbox
int label2d = feature_object.object.classification.front().label;
if (
!isUnknown(label2d) &&
isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) {
if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) {
data = &painted_pointcloud_msg.data[0];
auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]);
for (const auto & cls : isClassTable_) {
Expand All @@ -361,7 +361,7 @@ dc | dc dc dc dc ||zc|
#if 0
// Parallelizing loop don't support push_back
if (debugger_) {
debug_image_points.push_back(normalized_projected_point);
debug_image_points.push_back(projected_point);
}
#endif
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,8 @@ void RoiClusterFusionNode::fuseOnSingleImage(
const DetectedObjectsWithFeature & input_roi_msg,
const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg)
{
Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

// get transform from cluster frame id to camera optical frame id
geometry_msgs::msg::TransformStamped transform_stamped;
Expand Down Expand Up @@ -133,23 +131,19 @@ void RoiClusterFusionNode::fuseOnSingleImage(
continue;
}

Eigen::Vector4d projected_point =
projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
Eigen::Vector2d projected_point =
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
if (
0 <= static_cast<int>(normalized_projected_point.x()) &&
static_cast<int>(normalized_projected_point.x()) <=
static_cast<int>(camera_info.width) - 1 &&
0 <= static_cast<int>(normalized_projected_point.y()) &&
static_cast<int>(normalized_projected_point.y()) <=
static_cast<int>(camera_info.height) - 1) {
min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x);
min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y);
max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x);
max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y);
projected_points.push_back(normalized_projected_point);
if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point);
0 <= static_cast<int>(projected_point.x()) &&
static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 &&
0 <= static_cast<int>(projected_point.y()) &&
static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) {
min_x = std::min(static_cast<int>(projected_point.x()), min_x);
min_y = std::min(static_cast<int>(projected_point.y()), min_y);
max_x = std::max(static_cast<int>(projected_point.x()), max_x);
max_y = std::max(static_cast<int>(projected_point.y()), max_y);
projected_points.push_back(projected_point);
if (debugger_) debugger_->obstacle_points_.push_back(projected_point);
}
}
if (projected_points.empty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,12 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
object2camera_affine = transformToEigen(transform_stamped_optional.value().transform);
}

Eigen::Matrix4d camera_projection;
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6),
camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10),
camera_info.p.at(11);
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

const auto object_roi_map = generateDetectedObjectRoIs(
input_object_msg, static_cast<double>(camera_info.width),
static_cast<double>(camera_info.height), object2camera_affine, camera_projection);
static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model);
fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map);

if (debugger_) {
Expand All @@ -109,7 +106,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
std::map<std::size_t, DetectedObjectWithFeature>
RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection)
const Eigen::Affine3d & object2camera_affine,
const image_geometry::PinholeCameraModel & pinhole_camera_model)
{
std::map<std::size_t, DetectedObjectWithFeature> object_roi_map;
int64_t timestamp_nsec =
Expand Down Expand Up @@ -148,13 +146,8 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
continue;
}

Eigen::Vector2d proj_point;
{
Eigen::Vector4d proj_point_hom =
camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0);
proj_point = Eigen::Vector2d(
proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z()));
}
Eigen::Vector2d proj_point = calcRawImageProjectedPoint(
pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()));

min_x = std::min(proj_point.x(), min_x);
min_y = std::min(proj_point.y(), min_y);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
}

// transform pointcloud to camera optical frame id
Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

geometry_msgs::msg::TransformStamped transform_stamped;
{
const auto transform_stamped_optional = getTransformStamped(
Expand Down Expand Up @@ -143,10 +142,8 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
if (transformed_z <= 0.0) {
continue;
}
Eigen::Vector4d projected_point =
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
for (std::size_t i = 0; i < output_objs.size(); ++i) {
auto & feature_obj = output_objs.at(i);
const auto & check_roi = feature_obj.feature.roi;
Expand All @@ -156,10 +153,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
continue;
}
if (
check_roi.x_offset <= normalized_projected_point.x() &&
check_roi.y_offset <= normalized_projected_point.y() &&
check_roi.x_offset + check_roi.width >= normalized_projected_point.x() &&
check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) {
check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() &&
check_roi.x_offset + check_roi.width >= projected_point.x() &&
check_roi.y_offset + check_roi.height >= projected_point.y()) {
std::memcpy(
&cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step);
clusters_data_size.at(i) += point_step;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,9 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
if (mask.cols == 0 || mask.rows == 0) {
return;
}
Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0,
0.0, 1.0;
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

geometry_msgs::msg::TransformStamped transform_stamped;
// transform pointcloud from frame id to camera optical frame id
{
Expand Down Expand Up @@ -113,14 +111,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
continue;
}

Eigen::Vector4d projected_point =
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));

bool is_inside_image =
normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width &&
normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height;
bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width &&
projected_point.y() > 0 && projected_point.y() < camera_info.height;
if (!is_inside_image) {
copyPointCloud(
input_pointcloud_msg, point_step, global_offset, output_cloud, output_pointcloud_size);
Expand All @@ -129,8 +124,7 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(

// skip filtering pointcloud where semantic id out of the defined list
uint8_t semantic_id = mask.at<uint8_t>(
static_cast<uint16_t>(normalized_projected_point.y()),
static_cast<uint16_t>(normalized_projected_point.x()));
static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x()));
if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) {
copyPointCloud(
input_pointcloud_msg, point_step, global_offset, output_cloud, output_pointcloud_size);
Expand Down
10 changes: 10 additions & 0 deletions perception/image_projection_based_fusion/src/utils/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,18 @@
// limitations under the License.

#include "image_projection_based_fusion/utils/utils.hpp"

namespace image_projection_based_fusion
{
Eigen::Vector2d calcRawImageProjectedPoint(
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d)
{
const cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d);

const cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point);

return Eigen::Vector2d(raw_image_point.x, raw_image_point.y);
}

std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
Expand Down
1 change: 0 additions & 1 deletion planning/.pages
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ nav:
- 'RTC Interface': planning/autoware_rtc_interface
- 'Additional Tools':
- 'Remaining Distance Time Calculator': planning/autoware_remaining_distance_time_calculator
- 'RTC Replayer': planning/rtc_replayer
- 'Planning Debug Tools':
- 'About Planning Debug Tools': https://github.com/autowarefoundation/autoware_tools/tree/main/planning/planning_debug_tools
- 'Stop Reason Visualizer': https://github.com/autowarefoundation/autoware_tools/blob/main/planning/planning_debug_tools/doc-stop-reason-visualizer.md
Expand Down
4 changes: 4 additions & 0 deletions planning/autoware_rtc_interface/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,10 @@ Return `true` if `uuid` is registered.
If `uuid` is registered, return `true`.
If not, return `false`.

## Debugging Tools

There is a debugging tool called [RTC replayer](https://github.com/autowarefoundation/autoware_tools/tree/main/planning/autoware_rtc_replayer) for the RTC interface.

## Assumptions / Known limits

## Future extensions / Unimplemented parts
Original file line number Diff line number Diff line change
Expand Up @@ -382,13 +382,14 @@ In addition, the safety check has a time hysteresis, and if the path is judged "

### Parameters for safety check

| Name | Unit | Type | Description | Default value |
| :----------------------- | :--- | :----- | :------------------------------------------------------------------------------------------------------- | :--------------------------- |
| enable_safety_check | [-] | bool | flag whether to use safety check | true |
| method | [-] | string | method for safety check. `RSS` or `integral_predicted_polygon` | `integral_predicted_polygon` |
| keep_unsafe_time | [s] | double | safety check Hysteresis time. if the path is judged "safe" for the time it is finally treated as "safe". | 3.0 |
| check_all_predicted_path | - | bool | Flag to check all predicted paths | true |
| publish_debug_marker | - | bool | Flag to publish debug markers | false |
| Name | Unit | Type | Description | Default value |
| :----------------------------------- | :---- | :----- | :------------------------------------------------------------------------------------------------------- | :--------------------------- |
| enable_safety_check | [-] | bool | flag whether to use safety check | true |
| method | [-] | string | method for safety check. `RSS` or `integral_predicted_polygon` | `integral_predicted_polygon` |
| keep_unsafe_time | [s] | double | safety check Hysteresis time. if the path is judged "safe" for the time it is finally treated as "safe". | 3.0 |
| check_all_predicted_path | - | bool | Flag to check all predicted paths | true |
| publish_debug_marker | - | bool | Flag to publish debug markers | false |
| `collision_check_yaw_diff_threshold` | [rad] | double | Maximum yaw difference between ego and object when executing rss-based collision checking | 3.1416 |

#### Parameters for RSS safety check

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@
time_horizon: 10.0
# hysteresis factor to expand/shrink polygon with the value
hysteresis_factor_expand_rate: 1.0
collision_check_yaw_diff_threshold: 3.1416
# temporary
backward_path_length: 100.0
forward_path_length: 100.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2319,8 +2319,10 @@ std::pair<bool, bool> GoalPlannerModule::isSafePath(
return autoware::behavior_path_planner::utils::path_safety_checker::checkSafetyWithRSS(
pull_over_path, ego_predicted_path, filtered_objects, collision_check,
planner_data->parameters, safety_check_params->rss_params,
objects_filtering_params->use_all_predicted_path, hysteresis_factor);
} else if (parameters.safety_check_params.method == "integral_predicted_polygon") {
objects_filtering_params->use_all_predicted_path, hysteresis_factor,
safety_check_params->collision_check_yaw_diff_threshold);
}
if (parameters.safety_check_params.method == "integral_predicted_polygon") {
return utils::path_safety_checker::checkSafetyWithIntegralPredictedPolygon(
ego_predicted_path, vehicle_info_, filtered_objects,
objects_filtering_params->check_all_predicted_path,
Expand Down
Loading

0 comments on commit 5a44be7

Please sign in to comment.