diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f9983f0fccc3f..9de27f82ffcef 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -56,7 +56,7 @@ common/traffic_light_recognition_marker_publisher/** shumpei.wakabayashi@tier4.j common/traffic_light_utils/** mingyu.li@tier4.jp shunsuke.miura@tier4.jp common/trtexec_vendor/** daisuke.nishimatsu@tier4.jp yusuke.muramatsu@tier4.jp common/tvm_utility/** ambroise.vincent@arm.com xinyu.wang@tier4.jp -control/autonomous_emergency_braking/** takamasa.horibe@tier4.jp yutaka.shimizu@tier4.jp +control/autonomous_emergency_braking/** takamasa.horibe@tier4.jp tomoya.kimura@tier4.jp yutaka.shimizu@tier4.jp control/control_performance_analysis/** berkay@leodrive.ai fumiya.watanabe@tier4.jp satoshi.ota@tier4.jp shumpei.wakabayashi@tier4.jp taiki.tanaka@tier4.jp takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp tomoya.kimura@tier4.jp control/control_validator/** kyoichi.sugahara@tier4.jp makoto.kurihara@tier4.jp takamasa.horibe@tier4.jp control/external_cmd_selector/** fumiya.watanabe@tier4.jp satoshi.ota@tier4.jp shumpei.wakabayashi@tier4.jp taiki.tanaka@tier4.jp takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp tomoya.kimura@tier4.jp @@ -66,6 +66,7 @@ control/mpc_lateral_controller/** takamasa.horibe@tier4.jp takayuki.murooka@tier control/obstacle_collision_checker/** fumiya.watanabe@tier4.jp satoshi.ota@tier4.jp shumpei.wakabayashi@tier4.jp taiki.tanaka@tier4.jp takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp tomoya.kimura@tier4.jp control/operation_mode_transition_manager/** takamasa.horibe@tier4.jp tomoya.kimura@tier4.jp control/pid_longitudinal_controller/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp +control/predicted_path_checker/** berkay@leodrive.ai control/pure_pursuit/** takamasa.horibe@tier4.jp control/shift_decider/** takamasa.horibe@tier4.jp control/trajectory_follower_base/** takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp @@ -140,6 +141,7 @@ perception/simple_object_merger/** satoshi.tanaka@tier4.jp shunsuke.miura@tier4. perception/tensorrt_classifier/** mingyu.li@tier4.jp perception/tensorrt_yolo/** daisuke.nishimatsu@tier4.jp perception/tensorrt_yolox/** daisuke.nishimatsu@tier4.jp dan.umeda@tier4.jp manato.hirabayashi@tier4.jp +perception/tracking_object_merger/** yoshi.ri@tier4.jp yukihiro.saito@tier4.jp perception/traffic_light_arbiter/** kenzo.lobos@tier4.jp shunsuke.miura@tier4.jp perception/traffic_light_classifier/** shunsuke.miura@tier4.jp yukihiro.saito@tier4.jp perception/traffic_light_fine_detector/** shunsuke.miura@tier4.jp tao.zhong@tier4.jp @@ -181,7 +183,6 @@ planning/planning_debug_tools/** taiki.tanaka@tier4.jp takamasa.horibe@tier4.jp planning/planning_test_utils/** kyoichi.sugahara@tier4.jp takamasa.horibe@tier4.jp planning/planning_validator/** kosuke.takeuchi@tier4.jp takamasa.horibe@tier4.jp planning/route_handler/** fumiya.watanabe@tier4.jp kosuke.takeuchi@tier4.jp mamoru.sobue@tier4.jp takayuki.murooka@tier4.jp yutaka.shimizu@tier4.jp zulfaqar.azmi@tier4.jp -planning/rtc_auto_mode_manager/** fumiya.watanabe@tier4.jp taiki.tanaka@tier4.jp planning/rtc_interface/** fumiya.watanabe@tier4.jp taiki.tanaka@tier4.jp planning/rtc_replayer/** fumiya.watanabe@tier4.jp taiki.tanaka@tier4.jp planning/sampling_based_planner/bezier_sampler/** maxime.clement@tier4.jp @@ -215,6 +216,7 @@ system/default_ad_api_helpers/ad_api_visualizers/** isamu.takagi@tier4.jp kahhoo system/default_ad_api_helpers/automatic_pose_initializer/** isamu.takagi@tier4.jp kahhooi.tan@tier4.jp makoto.yabuta@tier4.jp ryohsuke.mitsudome@tier4.jp yukihiro.saito@tier4.jp system/dummy_diag_publisher/** akihiro.sakurai@tier4.jp fumihito.ito@tier4.jp system/dummy_infrastructure/** ryohsuke.mitsudome@tier4.jp +system/duplicated_node_checker/** shumpei.wakabayashi@tier4.jp uken.ryu@tier4.jp system/emergency_handler/** makoto.kurihara@tier4.jp system/mrm_comfortable_stop_operator/** makoto.kurihara@tier4.jp system/mrm_emergency_stop_operator/** makoto.kurihara@tier4.jp diff --git a/common/kalman_filter/CMakeLists.txt b/common/kalman_filter/CMakeLists.txt index dd59da727605d..6cbdf67a0affa 100644 --- a/common/kalman_filter/CMakeLists.txt +++ b/common/kalman_filter/CMakeLists.txt @@ -19,4 +19,11 @@ ament_auto_add_library(kalman_filter SHARED include/kalman_filter/time_delay_kalman_filter.hpp ) +if(BUILD_TESTING) + file(GLOB_RECURSE test_files test/*.cpp) + ament_add_ros_isolated_gtest(test_kalman_filter ${test_files}) + + target_link_libraries(test_kalman_filter kalman_filter) +endif() + ament_auto_package() diff --git a/common/kalman_filter/package.xml b/common/kalman_filter/package.xml index ea061f3f23bb8..200440b5774c7 100644 --- a/common/kalman_filter/package.xml +++ b/common/kalman_filter/package.xml @@ -18,7 +18,9 @@ eigen3_cmake_module ament_cmake_cppcheck + ament_cmake_ros ament_lint_auto + autoware_lint_common ament_cmake diff --git a/common/kalman_filter/test/test_kalman_filter.cpp b/common/kalman_filter/test/test_kalman_filter.cpp new file mode 100644 index 0000000000000..4f4e9ce44669b --- /dev/null +++ b/common/kalman_filter/test/test_kalman_filter.cpp @@ -0,0 +1,94 @@ +// Copyright 2023 The Autoware Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "kalman_filter/kalman_filter.hpp" + +#include + +TEST(kalman_filter, kf) +{ + KalmanFilter kf_; + + Eigen::MatrixXd x_t(2, 1); + x_t << 1, 2; + + Eigen::MatrixXd P_t(2, 2); + P_t << 1, 0, 0, 1; + + Eigen::MatrixXd Q_t(2, 2); + Q_t << 0.01, 0, 0, 0.01; + + Eigen::MatrixXd R_t(2, 2); + R_t << 0.09, 0, 0, 0.09; + + Eigen::MatrixXd C_t(2, 2); + C_t << 1, 0, 0, 1; + + Eigen::MatrixXd A_t(2, 2); + A_t << 1, 0, 0, 1; + + Eigen::MatrixXd B_t(2, 2); + B_t << 1, 0, 0, 1; + + // Initialize the filter and check if initialization was successful + EXPECT_TRUE(kf_.init(x_t, A_t, B_t, C_t, Q_t, R_t, P_t)); + + // Perform prediction + Eigen::MatrixXd u_t(2, 1); + u_t << 0.1, 0.1; + EXPECT_TRUE(kf_.predict(u_t)); + + // Check the updated state and covariance matrix + Eigen::MatrixXd x_predict_expected = A_t * x_t + B_t * u_t; + Eigen::MatrixXd P_predict_expected = A_t * P_t * A_t.transpose() + Q_t; + + Eigen::MatrixXd x_predict; + kf_.getX(x_predict); + Eigen::MatrixXd P_predict; + kf_.getP(P_predict); + + EXPECT_NEAR(x_predict(0, 0), x_predict_expected(0, 0), 1e-5); + EXPECT_NEAR(x_predict(1, 0), x_predict_expected(1, 0), 1e-5); + EXPECT_NEAR(P_predict(0, 0), P_predict_expected(0, 0), 1e-5); + EXPECT_NEAR(P_predict(1, 1), P_predict_expected(1, 1), 1e-5); + + // Perform update + Eigen::MatrixXd y_t(2, 1); + y_t << 1.05, 2.05; + EXPECT_TRUE(kf_.update(y_t)); + + // Check the updated state and covariance matrix + const Eigen::MatrixXd PCT_t = P_predict_expected * C_t.transpose(); + const Eigen::MatrixXd K_t = PCT_t * ((R_t + C_t * PCT_t).inverse()); + const Eigen::MatrixXd y_pred = C_t * x_predict_expected; + Eigen::MatrixXd x_update_expected = x_predict_expected + K_t * (y_t - y_pred); + Eigen::MatrixXd P_update_expected = P_predict_expected - K_t * (C_t * P_predict_expected); + + Eigen::MatrixXd x_update; + kf_.getX(x_update); + Eigen::MatrixXd P_update; + kf_.getP(P_update); + + EXPECT_NEAR(x_update(0, 0), x_update_expected(0, 0), 1e-5); + EXPECT_NEAR(x_update(1, 0), x_update_expected(1, 0), 1e-5); + EXPECT_NEAR(P_update(0, 0), P_update_expected(0, 0), 1e-5); + EXPECT_NEAR(P_update(1, 1), P_update_expected(1, 1), 1e-5); +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + bool result = RUN_ALL_TESTS(); + return result; +} diff --git a/common/kalman_filter/test/test_time_delay_kalman_filter.cpp b/common/kalman_filter/test/test_time_delay_kalman_filter.cpp new file mode 100644 index 0000000000000..32fefd8ceff70 --- /dev/null +++ b/common/kalman_filter/test/test_time_delay_kalman_filter.cpp @@ -0,0 +1,121 @@ +// Copyright 2023 The Autoware Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "kalman_filter/time_delay_kalman_filter.hpp" + +#include + +TEST(time_delay_kalman_filter, td_kf) +{ + TimeDelayKalmanFilter td_kf_; + + Eigen::MatrixXd x_t(3, 1); + x_t << 1.0, 2.0, 3.0; + Eigen::MatrixXd P_t(3, 3); + P_t << 0.1, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.3; + const int max_delay_step = 5; + const int dim_x = x_t.rows(); + const int dim_x_ex = dim_x * max_delay_step; + // Initialize the filter + td_kf_.init(x_t, P_t, max_delay_step); + + // Check if initialization was successful + Eigen::MatrixXd x_init = td_kf_.getLatestX(); + Eigen::MatrixXd P_init = td_kf_.getLatestP(); + Eigen::MatrixXd x_ex_t = Eigen::MatrixXd::Zero(dim_x_ex, 1); + Eigen::MatrixXd P_ex_t = Eigen::MatrixXd::Zero(dim_x_ex, dim_x_ex); + for (int i = 0; i < max_delay_step; ++i) { + x_ex_t.block(i * dim_x, 0, dim_x, 1) = x_t; + P_ex_t.block(i * dim_x, i * dim_x, dim_x, dim_x) = P_t; + } + + EXPECT_EQ(x_init.rows(), 3); + EXPECT_EQ(x_init.cols(), 1); + EXPECT_EQ(P_init.rows(), 3); + EXPECT_EQ(P_init.cols(), 3); + EXPECT_NEAR(x_init(0, 0), x_t(0, 0), 1e-5); + EXPECT_NEAR(x_init(1, 0), x_t(1, 0), 1e-5); + EXPECT_NEAR(x_init(2, 0), x_t(2, 0), 1e-5); + EXPECT_NEAR(P_init(0, 0), P_t(0, 0), 1e-5); + EXPECT_NEAR(P_init(1, 1), P_t(1, 1), 1e-5); + EXPECT_NEAR(P_init(2, 2), P_t(2, 2), 1e-5); + + // Define prediction parameters + Eigen::MatrixXd A_t(3, 3); + A_t << 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0; + Eigen::MatrixXd Q_t(3, 3); + Q_t << 0.01, 0.0, 0.0, 0.0, 0.02, 0.0, 0.0, 0.0, 0.03; + Eigen::MatrixXd x_next(3, 1); + x_next << 2.0, 4.0, 6.0; + + // Perform prediction + EXPECT_TRUE(td_kf_.predictWithDelay(x_next, A_t, Q_t)); + + // Check the prediction state and covariance matrix + Eigen::MatrixXd x_predict = td_kf_.getLatestX(); + Eigen::MatrixXd P_predict = td_kf_.getLatestP(); + Eigen::MatrixXd x_tmp = Eigen::MatrixXd::Zero(dim_x_ex, 1); + x_tmp.block(0, 0, dim_x, 1) = A_t * x_t; + x_tmp.block(dim_x, 0, dim_x_ex - dim_x, 1) = x_ex_t.block(0, 0, dim_x_ex - dim_x, 1); + x_ex_t = x_tmp; + Eigen::MatrixXd x_predict_expected = x_ex_t.block(0, 0, dim_x, 1); + Eigen::MatrixXd P_tmp = Eigen::MatrixXd::Zero(dim_x_ex, dim_x_ex); + P_tmp.block(0, 0, dim_x, dim_x) = A_t * P_ex_t.block(0, 0, dim_x, dim_x) * A_t.transpose() + Q_t; + P_tmp.block(0, dim_x, dim_x, dim_x_ex - dim_x) = + A_t * P_ex_t.block(0, 0, dim_x, dim_x_ex - dim_x); + P_tmp.block(dim_x, 0, dim_x_ex - dim_x, dim_x) = + P_ex_t.block(0, 0, dim_x_ex - dim_x, dim_x) * A_t.transpose(); + P_tmp.block(dim_x, dim_x, dim_x_ex - dim_x, dim_x_ex - dim_x) = + P_ex_t.block(0, 0, dim_x_ex - dim_x, dim_x_ex - dim_x); + P_ex_t = P_tmp; + Eigen::MatrixXd P_predict_expected = P_ex_t.block(0, 0, dim_x, dim_x); + EXPECT_NEAR(x_predict(0, 0), x_predict_expected(0, 0), 1e-5); + EXPECT_NEAR(x_predict(1, 0), x_predict_expected(1, 0), 1e-5); + EXPECT_NEAR(x_predict(2, 0), x_predict_expected(2, 0), 1e-5); + EXPECT_NEAR(P_predict(0, 0), P_predict_expected(0, 0), 1e-5); + EXPECT_NEAR(P_predict(1, 1), P_predict_expected(1, 1), 1e-5); + EXPECT_NEAR(P_predict(2, 2), P_predict_expected(2, 2), 1e-5); + + // Define update parameters + Eigen::MatrixXd C_t(3, 3); + C_t << 0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.5; + Eigen::MatrixXd R_t(3, 3); + R_t << 0.001, 0.0, 0.0, 0.0, 0.002, 0.0, 0.0, 0.0, 0.003; + Eigen::MatrixXd y_t(3, 1); + y_t << 1.05, 2.05, 3.05; + const int delay_step = 2; // Choose an appropriate delay step + const int dim_y = y_t.rows(); + + // Perform update + EXPECT_TRUE(td_kf_.updateWithDelay(y_t, C_t, R_t, delay_step)); + + // Check the updated state and covariance matrix + Eigen::MatrixXd x_update = td_kf_.getLatestX(); + Eigen::MatrixXd P_update = td_kf_.getLatestP(); + + Eigen::MatrixXd C_ex_t = Eigen::MatrixXd::Zero(dim_y, dim_x_ex); + const Eigen::MatrixXd PCT_t = P_ex_t * C_ex_t.transpose(); + const Eigen::MatrixXd K_t = PCT_t * ((R_t + C_ex_t * PCT_t).inverse()); + const Eigen::MatrixXd y_pred = C_ex_t * x_ex_t; + x_ex_t = x_ex_t + K_t * (y_t - y_pred); + P_ex_t = P_ex_t - K_t * (C_ex_t * P_ex_t); + Eigen::MatrixXd x_update_expected = x_ex_t.block(0, 0, dim_x, 1); + Eigen::MatrixXd P_update_expected = P_ex_t.block(0, 0, dim_x, dim_x); + EXPECT_NEAR(x_update(0, 0), x_update_expected(0, 0), 1e-5); + EXPECT_NEAR(x_update(1, 0), x_update_expected(1, 0), 1e-5); + EXPECT_NEAR(x_update(2, 0), x_update_expected(2, 0), 1e-5); + EXPECT_NEAR(P_update(0, 0), P_update_expected(0, 0), 1e-5); + EXPECT_NEAR(P_update(1, 1), P_update_expected(1, 1), 1e-5); + EXPECT_NEAR(P_update(2, 2), P_update_expected(2, 2), 1e-5); +} diff --git a/common/motion_utils/include/motion_utils/trajectory/trajectory.hpp b/common/motion_utils/include/motion_utils/trajectory/trajectory.hpp index dcdefe61e4000..5d773c5be32d9 100644 --- a/common/motion_utils/include/motion_utils/trajectory/trajectory.hpp +++ b/common/motion_utils/include/motion_utils/trajectory/trajectory.hpp @@ -177,8 +177,7 @@ T removeOverlapPoints(const T & points, const size_t start_idx = 0) for (size_t i = start_idx + 1; i < points.size(); ++i) { const auto prev_p = tier4_autoware_utils::getPoint(dst.back()); const auto curr_p = tier4_autoware_utils::getPoint(points.at(i)); - const double dist = tier4_autoware_utils::calcDistance2d(prev_p, curr_p); - if (dist < eps) { + if (std::abs(prev_p.x - curr_p.x) < eps && std::abs(prev_p.y - curr_p.y) < eps) { continue; } dst.push_back(points.at(i)); diff --git a/common/tier4_autoware_utils/include/tier4_autoware_utils/geometry/geometry.hpp b/common/tier4_autoware_utils/include/tier4_autoware_utils/geometry/geometry.hpp index 17ce7e1f4de5a..6106e3a15f4c7 100644 --- a/common/tier4_autoware_utils/include/tier4_autoware_utils/geometry/geometry.hpp +++ b/common/tier4_autoware_utils/include/tier4_autoware_utils/geometry/geometry.hpp @@ -261,7 +261,7 @@ inline void setOrientation(const geometry_msgs::msg::Quaternion & orientation, T } template -void setLongitudinalVelocity([[maybe_unused]] const double velocity, [[maybe_unused]] T & p) +void setLongitudinalVelocity([[maybe_unused]] const float velocity, [[maybe_unused]] T & p) { static_assert(sizeof(T) == 0, "Only specializations of getLongitudinalVelocity can be used."); throw std::logic_error("Only specializations of getLongitudinalVelocity can be used."); @@ -269,21 +269,21 @@ void setLongitudinalVelocity([[maybe_unused]] const double velocity, [[maybe_unu template <> inline void setLongitudinalVelocity( - const double velocity, autoware_auto_planning_msgs::msg::TrajectoryPoint & p) + const float velocity, autoware_auto_planning_msgs::msg::TrajectoryPoint & p) { p.longitudinal_velocity_mps = velocity; } template <> inline void setLongitudinalVelocity( - const double velocity, autoware_auto_planning_msgs::msg::PathPoint & p) + const float velocity, autoware_auto_planning_msgs::msg::PathPoint & p) { p.longitudinal_velocity_mps = velocity; } template <> inline void setLongitudinalVelocity( - const double velocity, autoware_auto_planning_msgs::msg::PathPointWithLaneId & p) + const float velocity, autoware_auto_planning_msgs::msg::PathPointWithLaneId & p) { p.point.longitudinal_velocity_mps = velocity; } diff --git a/common/tier4_logging_level_configure_rviz_plugin/config/logger_config.yaml b/common/tier4_logging_level_configure_rviz_plugin/config/logger_config.yaml index da5cc757682c5..97c3104242026 100644 --- a/common/tier4_logging_level_configure_rviz_plugin/config/logger_config.yaml +++ b/common/tier4_logging_level_configure_rviz_plugin/config/logger_config.yaml @@ -14,6 +14,10 @@ behavior_path_planner_avoidance: - node_name: /planning/scenario_planning/lane_driving/behavior_planning/behavior_path_planner logger_name: planning.scenario_planning.lane_driving.behavior_planning.behavior_path_planner.avoidance +behavior_path_planner_lane_change: + - node_name: /planning/scenario_planning/lane_driving/behavior_planning/behavior_path_planner + logger_name: lane_change + behavior_velocity_planner: - node_name: /planning/scenario_planning/lane_driving/behavior_planning/behavior_velocity_planner logger_name: planning.scenario_planning.lane_driving.behavior_planning.behavior_velocity_planner diff --git a/common/tier4_system_rviz_plugin/README.md b/common/tier4_system_rviz_plugin/README.md index 098844c8b4091..61260ecfda723 100644 --- a/common/tier4_system_rviz_plugin/README.md +++ b/common/tier4_system_rviz_plugin/README.md @@ -1 +1,11 @@ # tier4_system_rviz_plugin + +## Purpose + +This plugin display the Hazard information from Autoware; and output notices when emergencies are from initial localization and route setting. + +## Input + +| Name | Type | Description | +| --------------------------------- | ----------------------------------------------------- | ------------------------------------------------------------ | +| `/system/emergency/hazard_status` | `autoware_auto_system_msgs::msg::HazardStatusStamped` | The topic represents the emergency information from Autoware | diff --git a/common/tier4_system_rviz_plugin/src/mrm_summary_overlay_display.cpp b/common/tier4_system_rviz_plugin/src/mrm_summary_overlay_display.cpp index b80f06f645632..61c2bd378dab1 100644 --- a/common/tier4_system_rviz_plugin/src/mrm_summary_overlay_display.cpp +++ b/common/tier4_system_rviz_plugin/src/mrm_summary_overlay_display.cpp @@ -50,6 +50,7 @@ #include #include +#include #include #include @@ -126,6 +127,7 @@ MrmSummaryOverlayDisplay::~MrmSummaryOverlayDisplay() void MrmSummaryOverlayDisplay::onInitialize() { RTDClass::onInitialize(); + static int count = 0; rviz_common::UniformStringStream ss; ss << "MrmSummaryOverlayDisplayObject" << count++; @@ -160,9 +162,11 @@ void MrmSummaryOverlayDisplay::update(float wall_dt, float ros_dt) // MRM summary std::vector mrm_comfortable_stop_summary_list = {}; std::vector mrm_emergency_stop_summary_list = {}; + int hazard_level = autoware_auto_system_msgs::msg::HazardStatus::NO_FAULT; { std::lock_guard message_lock(mutex_); if (last_msg_ptr_) { + hazard_level = last_msg_ptr_->status.level; for (const auto & diag_status : last_msg_ptr_->status.diag_latent_fault) { const std::optional msg = generateMrmMessage(diag_status); if (msg != std::nullopt) { @@ -201,16 +205,32 @@ void MrmSummaryOverlayDisplay::update(float wall_dt, float ros_dt) painter.setFont(font); std::ostringstream output_text; - output_text << std::fixed - << "Comfortable Stop MRM Summary: " << int(mrm_comfortable_stop_summary_list.size()) - << std::endl; - for (const auto & mrm_element : mrm_comfortable_stop_summary_list) { - output_text << mrm_element << std::endl; - } - output_text << "Emergency Stop MRM Summary: " << int(mrm_emergency_stop_summary_list.size()) - << std::endl; - for (const auto & mrm_element : mrm_emergency_stop_summary_list) { - output_text << mrm_element << std::endl; + + // Display the MRM Summary only when there is a fault + if (hazard_level != autoware_auto_system_msgs::msg::HazardStatus::NO_FAULT) { + // Broadcasting the Basic Error Infos + int number_of_comfortable_stop_messages = + static_cast(mrm_comfortable_stop_summary_list.size()); + if (number_of_comfortable_stop_messages > 0) // Only Display when there are errors + { + output_text << std::fixed + << "Comfortable Stop MRM Summary: " << number_of_comfortable_stop_messages + << std::endl; + for (const auto & mrm_element : mrm_comfortable_stop_summary_list) { + output_text << mrm_element << std::endl; + } + } + + int number_of_emergency_stop_messages = + static_cast(mrm_emergency_stop_summary_list.size()); + if (number_of_emergency_stop_messages > 0) // Only Display when there are some errors + { + output_text << "Emergency Stop MRM Summary: " << int(mrm_emergency_stop_summary_list.size()) + << std::endl; + for (const auto & mrm_element : mrm_emergency_stop_summary_list) { + output_text << mrm_element << std::endl; + } + } } // same as above, but align on right side diff --git a/control/autonomous_emergency_braking/package.xml b/control/autonomous_emergency_braking/package.xml index 4f2ad6c50a9b3..5dc65cb243bfc 100644 --- a/control/autonomous_emergency_braking/package.xml +++ b/control/autonomous_emergency_braking/package.xml @@ -6,6 +6,8 @@ Autonomous Emergency Braking package as a ROS 2 node Yutaka Shimizu Takamasa Horibe + Tomoya Kimura + Apache License 2.0 ament_cmake diff --git a/control/autonomous_emergency_braking/src/node.cpp b/control/autonomous_emergency_braking/src/node.cpp index e68dff6b0ec1d..0ccc9bc4a3dae 100644 --- a/control/autonomous_emergency_braking/src/node.cpp +++ b/control/autonomous_emergency_braking/src/node.cpp @@ -432,6 +432,10 @@ void AEB::generateEgoPath( const Trajectory & predicted_traj, Path & path, std::vector & polygons) { + if (predicted_traj.points.empty()) { + return; + } + geometry_msgs::msg::TransformStamped transform_stamped{}; try { transform_stamped = tf_buffer_.lookupTransform( diff --git a/control/mpc_lateral_controller/README.md b/control/mpc_lateral_controller/README.md index 7579e3da265c4..50e9d44a0a7a4 100644 --- a/control/mpc_lateral_controller/README.md +++ b/control/mpc_lateral_controller/README.md @@ -67,7 +67,7 @@ Set the following from the [controller_node](../trajectory_follower_node/README. - `autoware_auto_planning_msgs/Trajectory` : reference trajectory to follow. - `nav_msgs/Odometry`: current odometry -- `autoware_auto_vehicle_msgs/SteeringReport` current steering +- `autoware_auto_vehicle_msgs/SteeringReport`: current steering ### Outputs @@ -89,83 +89,108 @@ can be calculated by providing the current steer, velocity, and pose to function The default parameters defined in `param/lateral_controller_defaults.param.yaml` are adjusted to the AutonomouStuff Lexus RX 450h for under 40 km/h driving. -| Name | Type | Description | Default value | -| :------------------------------------------- | :----- | :--------------------------------------------------------------------------------------------------------------------------------------------------- | :------------ | -| show_debug_info | bool | display debug info | false | -| traj_resample_dist | double | distance of waypoints in resampling [m] | 0.1 | -| enable_path_smoothing | bool | path smoothing flag. This should be true when uses path resampling to reduce resampling noise. | true | -| path_filter_moving_ave_num | int | number of data points moving average filter for path smoothing | 35 | -| path_smoothing_times | int | number of times of applying path smoothing filter | 1 | -| curvature_smoothing_num_ref_steer | double | index distance of points used in curvature calculation for reference steering command: p(i-num), p(i), p(i+num). larger num makes less noisy values. | 35 | -| curvature_smoothing_num_traj | double | index distance of points used in curvature calculation for trajectory: p(i-num), p(i), p(i+num). larger num makes less noisy values. | 1 | -| extend_trajectory_for_end_yaw_control | bool | trajectory extending flag for end yaw control. | true | -| steering_lpf_cutoff_hz | double | cutoff frequency of lowpass filter for steering output command [hz] | 3.0 | -| admissible_position_error | double | stop vehicle when following position error is larger than this value [m]. | 5.0 | -| admissible_yaw_error_rad | double | stop vehicle when following yaw angle error is larger than this value [rad]. | 1.57 | -| stop_state_entry_ego_speed \*1 | double | threshold value of the ego vehicle speed used to the stop state entry condition | 0.0 | -| stop_state_entry_target_speed \*1 | double | threshold value of the target speed used to the stop state entry condition | 0.0 | -| converged_steer_rad | double | threshold value of the steer convergence | 0.1 | -| keep_steer_control_until_converged | bool | keep steer control until steer is converged | true | -| new_traj_duration_time | double | threshold value of the time to be considered as new trajectory | 1.0 | -| new_traj_end_dist | double | threshold value of the distance between trajectory ends to be considered as new trajectory | 0.3 | -| mpc_converged_threshold_rps | double | threshold value to be sure output of the optimization is converged, it is used in stopped state | 0.3 | - -(\*1) To prevent unnecessary steering movement, the steering command is fixed to the previous value in the stop state. - -#### MPC algorithm - -| Name | Type | Description | Default value | -| :-------------------------------------------------- | :----- | :------------------------------------------------------------------------------------------------------------------------------------------------- | :---------------- | -| qp_solver_type | string | QP solver option. described below in detail. | unconstraint_fast | -| mpc_vehicle_model_type | string | vehicle model option. described below in detail. | kinematics | -| mpc_prediction_horizon | int | total prediction step for MPC | 70 | -| mpc_prediction_sampling_time | double | prediction period for one step [s] | 0.1 | -| mpc_weight_lat_error | double | weight for lateral error | 0.1 | -| mpc_weight_heading_error | double | weight for heading error | 0.0 | -| mpc_weight_heading_error_squared_vel_coeff | double | weight for heading error \* velocity | 5.0 | -| mpc_weight_steering_input | double | weight for steering error (steer command - reference steer) | 1.0 | -| mpc_weight_steering_input_squared_vel_coeff | double | weight for steering error (steer command - reference steer) \* velocity | 0.25 | -| mpc_weight_lat_jerk | double | weight for lateral jerk (steer(i) - steer(i-1)) \* velocity | 0.0 | -| mpc_weight_terminal_lat_error | double | terminal cost weight for lateral error | 1.0 | -| mpc_weight_steer_rate | double | weight for steering rate [rad/s] | 0.0 | -| mpc_weight_steer_acc | double | weight for derivatives of the steering rate [rad/ss] | 0.0 | -| mpc_weight_terminal_heading_error | double | terminal cost weight for heading error | 0.1 | -| mpc_low_curvature_thresh_curvature | double | trajectory curvature threshold to change the weight. If the curvature is lower than this value, the `low_curvature_weight_**` values will be used. | 0.0 | -| mpc_low_curvature_weight_lat_error | double | [used in a low curvature trajectory] weight for lateral error | 0.0 | -| mpc_low_curvature_weight_heading_error | double | [used in a low curvature trajectory] weight for heading error | 0.0 | -| mpc_low_curvature_weight_heading_error_squared_vel | double | [used in a low curvature trajectory] weight for heading error \* velocity | 0.0 | -| mpc_low_curvature_weight_steering_input | double | [used in a low curvature trajectory] weight for steering error (steer command - reference steer) | 0.0 | -| mpc_low_curvature_weight_steering_input_squared_vel | double | [used in a low curvature trajectory] weight for steering error (steer command - reference steer) \* velocity | 0.0 | -| mpc_low_curvature_weight_lat_jerk | double | [used in a low curvature trajectory] weight for lateral jerk (steer(i) - steer(i-1)) \* velocity | 0.0 | -| mpc_low_curvature_weight_steer_rate | double | [used in a low curvature trajectory] weight for steering rate [rad/s] | 0.0 | -| mpc_low_curvature_weight_steer_acc | double | [used in a low curvature trajectory] weight for derivatives of the steering rate [rad/ss] | 0.0 | -| mpc_zero_ff_steer_deg | double | threshold of feedforward angle [deg]. feedforward angle smaller than this value is set to zero. | 2.0 | - -#### Steering offset remover - -Defined in the `steering_offset` namespace. This logic is designed as simple as possible, with minimum design parameters. - -| Name | Type | Description | Default value | -| :---------------------------------- | :----- | :----------------------------------------------------------------------------------------------- | :------------ | -| enable_auto_steering_offset_removal | bool | Estimate the steering offset and apply compensation | true | -| update_vel_threshold | double | If the velocity is smaller than this value, the data is not used for the offset estimation. | 5.56 | -| update_steer_threshold | double | If the steering angle is larger than this value, the data is not used for the offset estimation. | 0.035 | -| average_num | double | The average of this number of data is used as a steering offset. | 1000 | -| steering_offset_limit | double | The angle limit to be applied to the offset compensation. | 0.02 | - -#### Vehicle model +#### System + +| Name | Type | Description | Default value | +| :------------------------ | :------ | :-------------------------------------------------------------------------- | :------------ | +| traj_resample_dist | double | distance of waypoints in resampling [m] | 0.1 | +| use_steer_prediction | boolean | flag for using steer prediction (do not use steer measurement) | false | +| admissible_position_error | double | stop vehicle when following position error is larger than this value [m] | 5.0 | +| admissible_yaw_error_rad | double | stop vehicle when following yaw angle error is larger than this value [rad] | 1.57 | + +#### Path Smoothing + +| Name | Type | Description | Default value | +| :-------------------------------- | :------ | :--------------------------------------------------------------------------------------------------------------------------------------------------- | :------------ | +| enable_path_smoothing | boolean | path smoothing flag. This should be true when uses path resampling to reduce resampling noise. | false | +| path_filter_moving_ave_num | int | number of data points moving average filter for path smoothing | 25 | +| curvature_smoothing_num_traj | int | index distance of points used in curvature calculation for trajectory: p(i-num), p(i), p(i+num). larger num makes less noisy values. | 15 | +| curvature_smoothing_num_ref_steer | int | index distance of points used in curvature calculation for reference steering command: p(i-num), p(i), p(i+num). larger num makes less noisy values. | 15 | + +#### Trajectory Extending + +| Name | Type | Description | Default value | +| :------------------------------------ | :------ | :-------------------------------------------- | :------------ | +| extend_trajectory_for_end_yaw_control | boolean | trajectory extending flag for end yaw control | true | + +#### MPC Optimization + +| Name | Type | Description | Default value | +| :-------------------------------------------------- | :----- | :----------------------------------------------------------------------------------------------------------- | :------------ | +| qp_solver_type | string | QP solver option. described below in detail. | "osqp" | +| mpc_prediction_horizon | int | total prediction step for MPC | 50 | +| mpc_prediction_dt | double | prediction period for one step [s] | 0.1 | +| mpc_weight_lat_error | double | weight for lateral error | 1.0 | +| mpc_weight_heading_error | double | weight for heading error | 0.0 | +| mpc_weight_heading_error_squared_vel | double | weight for heading error \* velocity | 0.3 | +| mpc_weight_steering_input | double | weight for steering error (steer command - reference steer) | 1.0 | +| mpc_weight_steering_input_squared_vel | double | weight for steering error (steer command - reference steer) \* velocity | 0.25 | +| mpc_weight_lat_jerk | double | weight for lateral jerk (steer(i) - steer(i-1)) \* velocity | 0.1 | +| mpc_weight_steer_rate | double | weight for steering rate [rad/s] | 0.0 | +| mpc_weight_steer_acc | double | weight for derivatives of the steering rate [rad/ss] | 0.000001 | +| mpc_low_curvature_weight_lat_error | double | [used in a low curvature trajectory] weight for lateral error | 0.1 | +| mpc_low_curvature_weight_heading_error | double | [used in a low curvature trajectory] weight for heading error | 0.0 | +| mpc_low_curvature_weight_heading_error_squared_vel | double | [used in a low curvature trajectory] weight for heading error \* velocity | 0.3 | +| mpc_low_curvature_weight_steering_input | double | [used in a low curvature trajectory] weight for steering error (steer command - reference steer) | 1.0 | +| mpc_low_curvature_weight_steering_input_squared_vel | double | [used in a low curvature trajectory] weight for steering error (steer command - reference steer) \* velocity | 0.25 | +| mpc_low_curvature_weight_lat_jerk | double | [used in a low curvature trajectory] weight for lateral jerk (steer(i) - steer(i-1)) \* velocity | 0.0 | +| mpc_low_curvature_weight_steer_rate | double | [used in a low curvature trajectory] weight for steering rate [rad/s] | 0.0 | +| mpc_low_curvature_weight_steer_acc | double | [used in a low curvature trajectory] weight for derivatives of the steering rate [rad/ss] | 0.000001 | +| mpc_low_curvature_thresh_curvature | double | threshold of curvature to use "low_curvature" parameter | 0.0 | +| mpc_weight_terminal_lat_error | double | terminal lateral error weight in matrix Q to improve mpc stability | 1.0 | +| mpc_weight_terminal_heading_error | double | terminal heading error weight in matrix Q to improve mpc stability | 0.1 | +| mpc_zero_ff_steer_deg | double | threshold that feed-forward angle becomes zero | 0.5 | +| mpc_acceleration_limit | double | limit on the vehicle's acceleration | 2.0 | +| mpc_velocity_time_constant | double | time constant used for velocity smoothing | 0.3 | +| mpc_min_prediction_length | double | minimum prediction length | 5.0 | + +#### Vehicle Model | Name | Type | Description | Default value | | :----------------------------------- | :------- | :--------------------------------------------------------------------------------- | :------------------- | +| vehicle_model_type | string | vehicle model type for mpc prediction | "kinematics" | | input_delay | double | steering input delay time for delay compensation | 0.24 | -| vehicle_model_steer_tau | double | steering dynamics time constant | 0.3 | -| steer_rate_lim_dps_list_by_curvature | [double] | steering angle rate limit list depending on curvature [deg/s] | [10.0, 20.0, 30.0] | +| vehicle_model_steer_tau | double | steering dynamics time constant (1d approximation) [s] | 0.3 | +| steer_rate_lim_dps_list_by_curvature | [double] | steering angle rate limit list depending on curvature [deg/s] | [40.0, 50.0, 60.0] | | curvature_list_for_steer_rate_lim | [double] | curvature list for steering angle rate limit interpolation in ascending order [/m] | [0.001, 0.002, 0.01] | -| steer_rate_lim_dps_list_by_velocity | [double] | steering angle rate limit list depending on velocity [deg/s] | [40.0, 30.0, 20.0] | +| steer_rate_lim_dps_list_by_velocity | [double] | steering angle rate limit list depending on velocity [deg/s] | [60.0, 50.0, 40.0] | | velocity_list_for_steer_rate_lim | [double] | velocity list for steering angle rate limit interpolation in ascending order [m/s] | [10.0, 15.0, 20.0] | | acceleration_limit | double | acceleration limit for trajectory velocity modification [m/ss] | 2.0 | | velocity_time_constant | double | velocity dynamics time constant for trajectory velocity modification [s] | 0.3 | +#### Lowpass Filter for Noise Reduction + +| Name | Type | Description | Default value | +| :------------------------ | :----- | :------------------------------------------------------------------ | :------------ | +| steering_lpf_cutoff_hz | double | cutoff frequency of lowpass filter for steering output command [hz] | 3.0 | +| error_deriv_lpf_cutoff_hz | double | cutoff frequency of lowpass filter for error derivative [Hz] | 5.0 | + +#### Stop State + +| Name | Type | Description | Default value | +| :------------------------------------------- | :------ | :---------------------------------------------------------------------------------------------- | :------------ | +| stop_state_entry_ego_speed \*1 | double | threshold value of the ego vehicle speed used to the stop state entry condition | 0.001 | +| stop_state_entry_target_speed \*1 | double | threshold value of the target speed used to the stop state entry condition | 0.001 | +| converged_steer_rad | double | threshold value of the steer convergence | 0.1 | +| keep_steer_control_until_converged | boolean | keep steer control until steer is converged | true | +| new_traj_duration_time | double | threshold value of the time to be considered as new trajectory | 1.0 | +| new_traj_end_dist | double | threshold value of the distance between trajectory ends to be considered as new trajectory | 0.3 | +| mpc_converged_threshold_rps | double | threshold value to be sure output of the optimization is converged, it is used in stopped state | 0.01 | + +(\*1) To prevent unnecessary steering movement, the steering command is fixed to the previous value in the stop state. + +#### Steer Offset + +Defined in the `steering_offset` namespace. This logic is designed as simple as possible, with minimum design parameters. + +| Name | Type | Description | Default value | +| :---------------------------------- | :------ | :----------------------------------------------------------------------------------------------- | :------------ | +| enable_auto_steering_offset_removal | boolean | Estimate the steering offset and apply compensation | true | +| update_vel_threshold | double | If the velocity is smaller than this value, the data is not used for the offset estimation | 5.56 | +| update_steer_threshold | double | If the steering angle is larger than this value, the data is not used for the offset estimation. | 0.035 | +| average_num | int | The average of this number of data is used as a steering offset. | 1000 | +| steering_offset_limit | double | The angle limit to be applied to the offset compensation. | 0.02 | + ##### For dynamics model (WIP) | Name | Type | Description | Default value | @@ -215,10 +240,8 @@ If you want to adjust the effect only in the high-speed range, you can use `weig - `weight_steering_input`: Reduce oscillation of tracking. - `weight_steering_input_squared_vel_coeff`: Reduce oscillation of tracking in high speed range. - `weight_lat_jerk`: Reduce lateral jerk. -- `weight_terminal_lat_error`: Preferable to set a higher value than normal lateral weight `weight_lat_error` for - stability. -- `weight_terminal_heading_error`: Preferable to set a higher value than normal heading weight `weight_heading_error` - for stability. +- `weight_terminal_lat_error`: Preferable to set a higher value than normal lateral weight `weight_lat_error` for stability. +- `weight_terminal_heading_error`: Preferable to set a higher value than normal heading weight `weight_heading_error` for stability. #### Other tips for tuning diff --git a/control/pid_longitudinal_controller/src/pid_longitudinal_controller.cpp b/control/pid_longitudinal_controller/src/pid_longitudinal_controller.cpp index b80874a8a2e57..a134fd775155b 100644 --- a/control/pid_longitudinal_controller/src/pid_longitudinal_controller.cpp +++ b/control/pid_longitudinal_controller/src/pid_longitudinal_controller.cpp @@ -501,9 +501,26 @@ void PidLongitudinalController::updateControlState(const ControlData & control_d m_enable_keep_stopped_until_steer_convergence && !lateral_sync_data_.is_steer_converged; const bool stopping_condition = stop_dist < p.stopping_state_stop_dist; - if ( - std::fabs(current_vel) > p.stopped_state_entry_vel || - std::fabs(current_acc) > p.stopped_state_entry_acc) { + + const bool is_stopped = std::abs(current_vel) < p.stopped_state_entry_vel && + std::abs(current_acc) < p.stopped_state_entry_acc; + // Case where the ego slips in the opposite direction of the gear due to e.g. a slope is also + // considered as a stop + const bool is_not_running = [&]() { + if (control_data.shift == Shift::Forward) { + if (is_stopped || current_vel < 0.0) { + // NOTE: Stopped or moving backward + return true; + } + } else { + if (is_stopped || 0.0 < current_vel) { + // NOTE: Stopped or moving forward + return true; + } + } + return false; + }(); + if (!is_not_running) { m_last_running_time = std::make_shared(clock_->now()); } const bool stopped_condition = diff --git a/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_fusion_based_detection.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_fusion_based_detection.launch.xml index 67e123ea5b018..59fe3f13f1231 100644 --- a/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_fusion_based_detection.launch.xml +++ b/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_fusion_based_detection.launch.xml @@ -192,7 +192,7 @@ - + @@ -329,19 +329,24 @@ + + + + - + - + + @@ -356,16 +361,20 @@ + + - + + + - + diff --git a/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_radar_fusion_based_detection.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_radar_fusion_based_detection.launch.xml index fbe5f21c041b6..11deaad5d06cc 100644 --- a/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_radar_fusion_based_detection.launch.xml +++ b/launch/tier4_perception_launch/launch/object_recognition/detection/camera_lidar_radar_fusion_based_detection.launch.xml @@ -51,6 +51,10 @@ + + + + - + @@ -363,21 +367,26 @@ + + + + - + - + + - + @@ -390,29 +399,36 @@ + + - - + + + + - - + + - - - - - - - - - - + + + + + + + + + + + + diff --git a/launch/tier4_perception_launch/launch/object_recognition/detection/lidar_based_detection.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/detection/lidar_based_detection.launch.xml index c844db39f4e9d..822c251ddad33 100644 --- a/launch/tier4_perception_launch/launch/object_recognition/detection/lidar_based_detection.launch.xml +++ b/launch/tier4_perception_launch/launch/object_recognition/detection/lidar_based_detection.launch.xml @@ -58,7 +58,7 @@ - + @@ -128,18 +128,23 @@ + + + + - + - + + @@ -154,16 +159,20 @@ + + - + + + - + diff --git a/launch/tier4_perception_launch/launch/object_recognition/prediction/prediction.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/prediction/prediction.launch.xml index 711e6374f5786..fa343f49840ad 100644 --- a/launch/tier4_perception_launch/launch/object_recognition/prediction/prediction.launch.xml +++ b/launch/tier4_perception_launch/launch/object_recognition/prediction/prediction.launch.xml @@ -1,12 +1,13 @@ - + + diff --git a/launch/tier4_perception_launch/launch/object_recognition/tracking/tracking.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/tracking/tracking.launch.xml index 77de1e5995ee0..07b53fb671732 100644 --- a/launch/tier4_perception_launch/launch/object_recognition/tracking/tracking.launch.xml +++ b/launch/tier4_perception_launch/launch/object_recognition/tracking/tracking.launch.xml @@ -3,29 +3,54 @@ + + + + + + + + - + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/tier4_perception_launch/launch/perception.launch.xml b/launch/tier4_perception_launch/launch/perception.launch.xml index bbb7ebb2d8d25..347606d91759f 100644 --- a/launch/tier4_perception_launch/launch/perception.launch.xml +++ b/launch/tier4_perception_launch/launch/perception.launch.xml @@ -96,6 +96,16 @@ + + + + + + @@ -181,6 +191,14 @@ + + + + + diff --git a/launch/tier4_simulator_launch/launch/simulator.launch.xml b/launch/tier4_simulator_launch/launch/simulator.launch.xml index aef1e45f517b1..4da6720b0af47 100644 --- a/launch/tier4_simulator_launch/launch/simulator.launch.xml +++ b/launch/tier4_simulator_launch/launch/simulator.launch.xml @@ -65,7 +65,16 @@ - + + + + + + + diff --git a/launch/tier4_system_launch/launch/system.launch.xml b/launch/tier4_system_launch/launch/system.launch.xml index 016d613cfa72d..2de6a61547498 100644 --- a/launch/tier4_system_launch/launch/system.launch.xml +++ b/launch/tier4_system_launch/launch/system.launch.xml @@ -3,6 +3,7 @@ + @@ -84,6 +85,12 @@ + + + + + + diff --git a/localization/localization_util/CMakeLists.txt b/localization/localization_util/CMakeLists.txt new file mode 100644 index 0000000000000..9490ffb67723b --- /dev/null +++ b/localization/localization_util/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 3.14) +project(localization_util) + +find_package(autoware_cmake REQUIRED) +autoware_package() + +ament_auto_add_library(localization_util SHARED + src/util_func.cpp + src/pose_array_interpolator.cpp + src/tf2_listener_module.cpp +) + +ament_auto_package( + INSTALL_TO_SHARE +) diff --git a/localization/localization_util/README.md b/localization/localization_util/README.md new file mode 100644 index 0000000000000..a3e980464d0c6 --- /dev/null +++ b/localization/localization_util/README.md @@ -0,0 +1,5 @@ +# localization_util + +`localization_util`` is a localization utility package. + +This package does not have a node, it is just a library. diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/matrix_type.hpp b/localization/localization_util/include/localization_util/matrix_type.hpp similarity index 84% rename from localization/ndt_scan_matcher/include/ndt_scan_matcher/matrix_type.hpp rename to localization/localization_util/include/localization_util/matrix_type.hpp index 038ed4549eb2f..d9ec9b369127a 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/matrix_type.hpp +++ b/localization/localization_util/include/localization_util/matrix_type.hpp @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef NDT_SCAN_MATCHER__MATRIX_TYPE_HPP_ -#define NDT_SCAN_MATCHER__MATRIX_TYPE_HPP_ +#ifndef LOCALIZATION_UTIL__MATRIX_TYPE_HPP_ +#define LOCALIZATION_UTIL__MATRIX_TYPE_HPP_ #include using Matrix6d = Eigen::Matrix; using RowMatrixXd = Eigen::Matrix; -#endif // NDT_SCAN_MATCHER__MATRIX_TYPE_HPP_ +#endif // LOCALIZATION_UTIL__MATRIX_TYPE_HPP_ diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/pose_array_interpolator.hpp b/localization/localization_util/include/localization_util/pose_array_interpolator.hpp similarity index 90% rename from localization/ndt_scan_matcher/include/ndt_scan_matcher/pose_array_interpolator.hpp rename to localization/localization_util/include/localization_util/pose_array_interpolator.hpp index f1e7dfb3f544b..998d8465733f5 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/pose_array_interpolator.hpp +++ b/localization/localization_util/include/localization_util/pose_array_interpolator.hpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef NDT_SCAN_MATCHER__POSE_ARRAY_INTERPOLATOR_HPP_ -#define NDT_SCAN_MATCHER__POSE_ARRAY_INTERPOLATOR_HPP_ +#ifndef LOCALIZATION_UTIL__POSE_ARRAY_INTERPOLATOR_HPP_ +#define LOCALIZATION_UTIL__POSE_ARRAY_INTERPOLATOR_HPP_ -#include "ndt_scan_matcher/util_func.hpp" +#include "localization_util/util_func.hpp" #include @@ -59,4 +59,4 @@ class PoseArrayInterpolator const geometry_msgs::msg::Point & reference_point, const double distance_tolerance_m_) const; }; -#endif // NDT_SCAN_MATCHER__POSE_ARRAY_INTERPOLATOR_HPP_ +#endif // LOCALIZATION_UTIL__POSE_ARRAY_INTERPOLATOR_HPP_ diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/tf2_listener_module.hpp b/localization/localization_util/include/localization_util/tf2_listener_module.hpp similarity index 88% rename from localization/ndt_scan_matcher/include/ndt_scan_matcher/tf2_listener_module.hpp rename to localization/localization_util/include/localization_util/tf2_listener_module.hpp index 159acbd75ce3d..b332f9effe0e0 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/tf2_listener_module.hpp +++ b/localization/localization_util/include/localization_util/tf2_listener_module.hpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef NDT_SCAN_MATCHER__TF2_LISTENER_MODULE_HPP_ -#define NDT_SCAN_MATCHER__TF2_LISTENER_MODULE_HPP_ +#ifndef LOCALIZATION_UTIL__TF2_LISTENER_MODULE_HPP_ +#define LOCALIZATION_UTIL__TF2_LISTENER_MODULE_HPP_ #include @@ -40,4 +40,4 @@ class Tf2ListenerModule tf2_ros::TransformListener tf2_listener_; }; -#endif // NDT_SCAN_MATCHER__TF2_LISTENER_MODULE_HPP_ +#endif // LOCALIZATION_UTIL__TF2_LISTENER_MODULE_HPP_ diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/util_func.hpp b/localization/localization_util/include/localization_util/util_func.hpp similarity index 96% rename from localization/ndt_scan_matcher/include/ndt_scan_matcher/util_func.hpp rename to localization/localization_util/include/localization_util/util_func.hpp index 5c37cef36c422..bd9a2b9305e25 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/util_func.hpp +++ b/localization/localization_util/include/localization_util/util_func.hpp @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef NDT_SCAN_MATCHER__UTIL_FUNC_HPP_ -#define NDT_SCAN_MATCHER__UTIL_FUNC_HPP_ +#ifndef LOCALIZATION_UTIL__UTIL_FUNC_HPP_ +#define LOCALIZATION_UTIL__UTIL_FUNC_HPP_ #include #include @@ -91,4 +91,4 @@ void output_pose_with_cov_to_log( const rclcpp::Logger logger, const std::string & prefix, const geometry_msgs::msg::PoseWithCovarianceStamped & pose_with_cov); -#endif // NDT_SCAN_MATCHER__UTIL_FUNC_HPP_ +#endif // LOCALIZATION_UTIL__UTIL_FUNC_HPP_ diff --git a/localization/localization_util/package.xml b/localization/localization_util/package.xml new file mode 100644 index 0000000000000..20f9b160212b5 --- /dev/null +++ b/localization/localization_util/package.xml @@ -0,0 +1,40 @@ + + + + localization_util + 0.1.0 + The localization_util package + Yamato Ando + Masahiro Sakamoto + Shintaro Sakoda + Kento Yabuuchi + Apache License 2.0 + Yamato Ando + + ament_cmake_auto + autoware_cmake + + fmt + geometry_msgs + nav_msgs + rclcpp + sensor_msgs + std_msgs + std_srvs + tf2 + tf2_eigen + tf2_geometry_msgs + tf2_ros + tf2_sensor_msgs + tier4_autoware_utils + tier4_debug_msgs + tier4_localization_msgs + visualization_msgs + + ament_cmake_cppcheck + ament_lint_auto + + + ament_cmake + + diff --git a/localization/ndt_scan_matcher/src/pose_array_interpolator.cpp b/localization/localization_util/src/pose_array_interpolator.cpp similarity index 98% rename from localization/ndt_scan_matcher/src/pose_array_interpolator.cpp rename to localization/localization_util/src/pose_array_interpolator.cpp index f09b71523e804..ebc904fcf8d38 100644 --- a/localization/ndt_scan_matcher/src/pose_array_interpolator.cpp +++ b/localization/localization_util/src/pose_array_interpolator.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ndt_scan_matcher/pose_array_interpolator.hpp" +#include "localization_util/pose_array_interpolator.hpp" PoseArrayInterpolator::PoseArrayInterpolator( rclcpp::Node * node, const rclcpp::Time & target_ros_time, diff --git a/localization/ndt_scan_matcher/src/tf2_listener_module.cpp b/localization/localization_util/src/tf2_listener_module.cpp similarity index 97% rename from localization/ndt_scan_matcher/src/tf2_listener_module.cpp rename to localization/localization_util/src/tf2_listener_module.cpp index 364952a631f06..8a19ceec9f30d 100644 --- a/localization/ndt_scan_matcher/src/tf2_listener_module.cpp +++ b/localization/localization_util/src/tf2_listener_module.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ndt_scan_matcher/tf2_listener_module.hpp" +#include "localization_util/tf2_listener_module.hpp" #include diff --git a/localization/ndt_scan_matcher/src/util_func.cpp b/localization/localization_util/src/util_func.cpp similarity index 99% rename from localization/ndt_scan_matcher/src/util_func.cpp rename to localization/localization_util/src/util_func.cpp index 52cb7844ab241..865cc02cff293 100644 --- a/localization/ndt_scan_matcher/src/util_func.cpp +++ b/localization/localization_util/src/util_func.cpp @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ndt_scan_matcher/util_func.hpp" +#include "localization_util/util_func.hpp" -#include "ndt_scan_matcher/matrix_type.hpp" +#include "localization_util/matrix_type.hpp" static std::random_device seed_gen; diff --git a/localization/ndt_scan_matcher/CMakeLists.txt b/localization/ndt_scan_matcher/CMakeLists.txt index 1d5a9d5ac5320..79491a85a0a66 100644 --- a/localization/ndt_scan_matcher/CMakeLists.txt +++ b/localization/ndt_scan_matcher/CMakeLists.txt @@ -29,9 +29,6 @@ ament_auto_add_executable(ndt_scan_matcher src/debug.cpp src/ndt_scan_matcher_node.cpp src/ndt_scan_matcher_core.cpp - src/util_func.cpp - src/pose_array_interpolator.cpp - src/tf2_listener_module.cpp src/map_module.cpp src/map_update_module.cpp ) @@ -39,6 +36,13 @@ ament_auto_add_executable(ndt_scan_matcher link_directories(${PCL_LIBRARY_DIRS}) target_link_libraries(ndt_scan_matcher ${PCL_LIBRARIES}) +if(BUILD_TESTING) + add_ros_test( + test/test_ndt_scan_matcher_launch.py + TIMEOUT "30" + ) +endif() + ament_auto_package( INSTALL_TO_SHARE launch diff --git a/localization/ndt_scan_matcher/README.md b/localization/ndt_scan_matcher/README.md index a1b9339b9780a..e210d3a28796f 100644 --- a/localization/ndt_scan_matcher/README.md +++ b/localization/ndt_scan_matcher/README.md @@ -38,6 +38,7 @@ One optional function is regularization. Please see the regularization chapter i | `transform_probability` | `tier4_debug_msgs::msg::Float32Stamped` | [debug topic] score of scan matching | | `no_ground_transform_probability` | `tier4_debug_msgs::msg::Float32Stamped` | [debug topic] score of scan matching based on de-grounded LiDAR scan | | `iteration_num` | `tier4_debug_msgs::msg::Int32Stamped` | [debug topic] number of scan matching iterations | +| `initial_to_result_relative_pose` | `geometry_msgs::msg::PoseStamped` | [debug topic] relative pose between the initial point and the convergence point | | `initial_to_result_distance` | `tier4_debug_msgs::msg::Float32Stamped` | [debug topic] distance difference between the initial point and the convergence point [m] | | `initial_to_result_distance_old` | `tier4_debug_msgs::msg::Float32Stamped` | [debug topic] distance difference between the older of the two initial points used in linear interpolation and the convergence point [m] | | `initial_to_result_distance_new` | `tier4_debug_msgs::msg::Float32Stamped` | [debug topic] distance difference between the newer of the two initial points used in linear interpolation and the convergence point [m] | @@ -68,6 +69,7 @@ One optional function is regularization. Please see the regularization chapter i | `converged_param_transform_probability` | double | TP threshold for deciding whether to trust the estimation result (when converged_param_type = 0) | | `converged_param_nearest_voxel_transformation_likelihood` | double | NVTL threshold for deciding whether to trust the estimation result (when converged_param_type = 1) | | `initial_estimate_particles_num` | int | The number of particles to estimate initial pose | +| `n_startup_trials` | int | The number of initial random trials in the TPE (Tree-Structured Parzen Estimator). | | `lidar_topic_timeout_sec` | double | Tolerance of timestamp difference between current time and sensor pointcloud | | `initial_pose_timeout_sec` | int | Tolerance of timestamp difference between initial_pose and sensor pointcloud. [sec] | | `initial_pose_distance_tolerance_m` | double | Tolerance of distance difference between two initial poses used for linear interpolation. [m] | diff --git a/localization/ndt_scan_matcher/config/ndt_scan_matcher.param.yaml b/localization/ndt_scan_matcher/config/ndt_scan_matcher.param.yaml index cf41a4cf55d0b..0ba1b1a2e15f4 100644 --- a/localization/ndt_scan_matcher/config/ndt_scan_matcher.param.yaml +++ b/localization/ndt_scan_matcher/config/ndt_scan_matcher.param.yaml @@ -43,6 +43,11 @@ # The number of particles to estimate initial pose initial_estimate_particles_num: 200 + # The number of initial random trials in the TPE (Tree-Structured Parzen Estimator). + # This value should be equal to or less than 'initial_estimate_particles_num' and more than 0. + # If it is equal to 'initial_estimate_particles_num', the search will be the same as a full random search. + n_startup_trials: 20 + # Tolerance of timestamp difference between current time and sensor pointcloud. [sec] lidar_topic_timeout_sec: 1.0 diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/map_update_module.hpp b/localization/ndt_scan_matcher/include/ndt_scan_matcher/map_update_module.hpp index 4f630bb8c5898..13721c03ca22e 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/map_update_module.hpp +++ b/localization/ndt_scan_matcher/include/ndt_scan_matcher/map_update_module.hpp @@ -15,10 +15,10 @@ #ifndef NDT_SCAN_MATCHER__MAP_UPDATE_MODULE_HPP_ #define NDT_SCAN_MATCHER__MAP_UPDATE_MODULE_HPP_ +#include "localization_util/tf2_listener_module.hpp" +#include "localization_util/util_func.hpp" #include "ndt_scan_matcher/debug.hpp" #include "ndt_scan_matcher/particle.hpp" -#include "ndt_scan_matcher/tf2_listener_module.hpp" -#include "ndt_scan_matcher/util_func.hpp" #include #include diff --git a/localization/ndt_scan_matcher/include/ndt_scan_matcher/ndt_scan_matcher_core.hpp b/localization/ndt_scan_matcher/include/ndt_scan_matcher/ndt_scan_matcher_core.hpp index 011cda6ba3356..e5f46b5022b51 100644 --- a/localization/ndt_scan_matcher/include/ndt_scan_matcher/ndt_scan_matcher_core.hpp +++ b/localization/ndt_scan_matcher/include/ndt_scan_matcher/ndt_scan_matcher_core.hpp @@ -17,9 +17,9 @@ #define FMT_HEADER_ONLY +#include "localization_util/tf2_listener_module.hpp" #include "ndt_scan_matcher/map_module.hpp" #include "ndt_scan_matcher/map_update_module.hpp" -#include "ndt_scan_matcher/tf2_listener_module.hpp" #include @@ -92,8 +92,7 @@ class NDTScanMatcher : public rclcpp::Node void callback_regularization_pose( geometry_msgs::msg::PoseWithCovarianceStamped::ConstSharedPtr pose_conv_msg_ptr); - geometry_msgs::msg::PoseWithCovarianceStamped align_using_monte_carlo( - const std::shared_ptr & ndt_ptr, + geometry_msgs::msg::PoseWithCovarianceStamped align_pose( const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_with_cov); void transform_sensor_measurement( @@ -111,7 +110,7 @@ class NDTScanMatcher : public rclcpp::Node const pcl::shared_ptr> & sensor_points_in_map_ptr); void publish_marker( const rclcpp::Time & sensor_ros_time, const std::vector & pose_array); - void publish_initial_to_result_distances( + void publish_initial_to_result( const rclcpp::Time & sensor_ros_time, const geometry_msgs::msg::Pose & result_pose_msg, const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_cov_msg, const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_old_msg, @@ -150,6 +149,8 @@ class NDTScanMatcher : public rclcpp::Node rclcpp::Publisher::SharedPtr no_ground_nearest_voxel_transformation_likelihood_pub_; rclcpp::Publisher::SharedPtr iteration_num_pub_; + rclcpp::Publisher::SharedPtr + initial_to_result_relative_pose_pub_; rclcpp::Publisher::SharedPtr initial_to_result_distance_pub_; rclcpp::Publisher::SharedPtr @@ -179,6 +180,7 @@ class NDTScanMatcher : public rclcpp::Node double converged_param_nearest_voxel_transformation_likelihood_; int initial_estimate_particles_num_; + int n_startup_trials_; double lidar_topic_timeout_sec_; double initial_pose_timeout_sec_; double initial_pose_distance_tolerance_m_; diff --git a/localization/ndt_scan_matcher/package.xml b/localization/ndt_scan_matcher/package.xml index 92c690a708492..a9083c4ae0ae4 100644 --- a/localization/ndt_scan_matcher/package.xml +++ b/localization/ndt_scan_matcher/package.xml @@ -19,6 +19,7 @@ fmt geometry_msgs libpcl-all-dev + localization_util nav_msgs ndt_omp pcl_conversions @@ -34,10 +35,12 @@ tier4_autoware_utils tier4_debug_msgs tier4_localization_msgs + tree_structured_parzen_estimator visualization_msgs ament_cmake_cppcheck ament_lint_auto + ros_testing ament_cmake diff --git a/localization/ndt_scan_matcher/src/debug.cpp b/localization/ndt_scan_matcher/src/debug.cpp index 9c82e06d89b80..941f682de5803 100644 --- a/localization/ndt_scan_matcher/src/debug.cpp +++ b/localization/ndt_scan_matcher/src/debug.cpp @@ -14,7 +14,7 @@ #include "ndt_scan_matcher/debug.hpp" -#include "ndt_scan_matcher/util_func.hpp" +#include "localization_util/util_func.hpp" visualization_msgs::msg::MarkerArray make_debug_markers( const builtin_interfaces::msg::Time & stamp, const std::string & map_frame_, diff --git a/localization/ndt_scan_matcher/src/map_update_module.cpp b/localization/ndt_scan_matcher/src/map_update_module.cpp index 9c94467dc94c0..7512ae2e77a21 100644 --- a/localization/ndt_scan_matcher/src/map_update_module.cpp +++ b/localization/ndt_scan_matcher/src/map_update_module.cpp @@ -55,12 +55,6 @@ MapUpdateModule::MapUpdateModule( pcd_loader_client_ = node->create_client("pcd_loader_service"); - while (!pcd_loader_client_->wait_for_service(std::chrono::seconds(1)) && rclcpp::ok()) { - RCLCPP_INFO( - logger_, - "Waiting for pcd loader service. Check if the enable_differential_load in " - "pointcloud_map_loader is set `true`."); - } double map_update_dt = 1.0; auto period_ns = std::chrono::duration_cast( @@ -117,7 +111,14 @@ void MapUpdateModule::update_map(const geometry_msgs::msg::Point & position) request->area.radius = static_cast(dynamic_map_loading_map_radius_); request->cached_ids = ndt_ptr_->getCurrentMapIDs(); - // // send a request to map_loader + while (!pcd_loader_client_->wait_for_service(std::chrono::seconds(1)) && rclcpp::ok()) { + RCLCPP_INFO( + logger_, + "Waiting for pcd loader service. Check if the enable_differential_load in " + "pointcloud_map_loader is set `true`."); + } + + // send a request to map_loader auto result{pcd_loader_client_->async_send_request( request, [](rclcpp::Client::SharedFuture) {})}; diff --git a/localization/ndt_scan_matcher/src/ndt_scan_matcher_core.cpp b/localization/ndt_scan_matcher/src/ndt_scan_matcher_core.cpp index d9054d77d2256..b38e3f0826184 100644 --- a/localization/ndt_scan_matcher/src/ndt_scan_matcher_core.cpp +++ b/localization/ndt_scan_matcher/src/ndt_scan_matcher_core.cpp @@ -14,14 +14,17 @@ #include "ndt_scan_matcher/ndt_scan_matcher_core.hpp" -#include "ndt_scan_matcher/matrix_type.hpp" +#include "localization_util/matrix_type.hpp" +#include "localization_util/pose_array_interpolator.hpp" +#include "localization_util/util_func.hpp" #include "ndt_scan_matcher/particle.hpp" -#include "ndt_scan_matcher/pose_array_interpolator.hpp" -#include "ndt_scan_matcher/util_func.hpp" +#include "tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp" #include #include +#include + #include #ifdef ROS_DISTRO_GALACTIC @@ -145,6 +148,7 @@ NDTScanMatcher::NDTScanMatcher() } initial_estimate_particles_num_ = this->declare_parameter("initial_estimate_particles_num"); + n_startup_trials_ = this->declare_parameter("n_startup_trials"); estimate_scores_for_degrounded_scan_ = this->declare_parameter("estimate_scores_for_degrounded_scan"); @@ -201,6 +205,8 @@ NDTScanMatcher::NDTScanMatcher() "no_ground_nearest_voxel_transformation_likelihood", 10); iteration_num_pub_ = this->create_publisher("iteration_num", 10); + initial_to_result_relative_pose_pub_ = + this->create_publisher("initial_to_result_relative_pose", 10); initial_to_result_distance_pub_ = this->create_publisher("initial_to_result_distance", 10); initial_to_result_distance_old_pub_ = @@ -442,20 +448,8 @@ void NDTScanMatcher::callback_sensor_points( } // perform several validations - /***************************************************************************** - The reason the add 2 to the ndt_ptr_->getMaximumIterations() is that there are bugs in - implementation of ndt. - 1. gradient descent method ends when the iteration is greater than max_iteration if it does not - converge (be careful it's 'greater than' instead of 'greater equal than'.) - https://github.com/tier4/autoware.iv/blob/2323e5baa0b680d43a9219f5fb3b7a11dd9edc82/localization/pose_estimator/ndt_scan_matcher/ndt_omp/include/ndt_omp/ndt_omp_impl.hpp#L212 - 2. iterate iteration count when end of gradient descent function. - https://github.com/tier4/autoware.iv/blob/2323e5baa0b680d43a9219f5fb3b7a11dd9edc82/localization/pose_estimator/ndt_scan_matcher/ndt_omp/include/ndt_omp/ndt_omp_impl.hpp#L217 - - These bugs are now resolved in original pcl implementation. - https://github.com/PointCloudLibrary/pcl/blob/424c1c6a0ca97d94ca63e5daff4b183a4db8aae4/registration/include/pcl/registration/impl/ndt.hpp#L73-L180 - *****************************************************************************/ bool is_ok_iteration_num = - validate_num_iteration(ndt_result.iteration_num, ndt_ptr_->getMaximumIterations() + 2); + validate_num_iteration(ndt_result.iteration_num, ndt_ptr_->getMaximumIterations()); bool is_local_optimal_solution_oscillation = false; if (!is_ok_iteration_num) { is_local_optimal_solution_oscillation = validate_local_optimal_solution_oscillation( @@ -483,7 +477,7 @@ void NDTScanMatcher::callback_sensor_points( publish_tf(sensor_ros_time, result_pose_msg); publish_pose(sensor_ros_time, result_pose_msg, is_converged); publish_marker(sensor_ros_time, transformation_msg_array); - publish_initial_to_result_distances( + publish_initial_to_result( sensor_ros_time, result_pose_msg, interpolator.get_current_pose(), interpolator.get_old_pose(), interpolator.get_new_pose()); @@ -624,12 +618,19 @@ void NDTScanMatcher::publish_marker( ndt_marker_pub_->publish(marker_array); } -void NDTScanMatcher::publish_initial_to_result_distances( +void NDTScanMatcher::publish_initial_to_result( const rclcpp::Time & sensor_ros_time, const geometry_msgs::msg::Pose & result_pose_msg, const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_cov_msg, const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_old_msg, const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_new_msg) { + geometry_msgs::msg::PoseStamped initial_to_result_relative_pose_stamped; + initial_to_result_relative_pose_stamped.pose = + tier4_autoware_utils::inverseTransformPose(result_pose_msg, initial_pose_cov_msg.pose.pose); + initial_to_result_relative_pose_stamped.header.stamp = sensor_ros_time; + initial_to_result_relative_pose_stamped.header.frame_id = map_frame_; + initial_to_result_relative_pose_pub_->publish(initial_to_result_relative_pose_stamped); + const auto initial_to_result_distance = static_cast(norm(initial_pose_cov_msg.pose.pose.position, result_pose_msg.position)); initial_to_result_distance_pub_->publish( @@ -751,51 +752,95 @@ void NDTScanMatcher::service_ndt_align( map_update_module_->update_map(initial_pose_msg_in_map_frame.pose.pose.position); } + // mutex Map + std::lock_guard lock(ndt_ptr_mtx_); + if (ndt_ptr_->getInputTarget() == nullptr) { res->success = false; - RCLCPP_WARN(get_logger(), "No InputTarget"); + RCLCPP_WARN( + get_logger(), "No InputTarget. Please check the map file and the map_loader service"); return; } if (ndt_ptr_->getInputSource() == nullptr) { res->success = false; - RCLCPP_WARN(get_logger(), "No InputSource"); + RCLCPP_WARN(get_logger(), "No InputSource. Please check the input lidar topic"); return; } - // mutex Map - std::lock_guard lock(ndt_ptr_mtx_); - (*state_ptr_)["state"] = "Aligning"; - res->pose_with_covariance = align_using_monte_carlo(ndt_ptr_, initial_pose_msg_in_map_frame); + res->pose_with_covariance = align_pose(initial_pose_msg_in_map_frame); (*state_ptr_)["state"] = "Sleeping"; res->success = true; res->pose_with_covariance.pose.covariance = req->pose_with_covariance.pose.covariance; } -geometry_msgs::msg::PoseWithCovarianceStamped NDTScanMatcher::align_using_monte_carlo( - const std::shared_ptr & ndt_ptr, +geometry_msgs::msg::PoseWithCovarianceStamped NDTScanMatcher::align_pose( const geometry_msgs::msg::PoseWithCovarianceStamped & initial_pose_with_cov) { - if (ndt_ptr->getInputTarget() == nullptr || ndt_ptr->getInputSource() == nullptr) { - RCLCPP_WARN(get_logger(), "No Map or Sensor PointCloud"); - return geometry_msgs::msg::PoseWithCovarianceStamped(); - } - - output_pose_with_cov_to_log(get_logger(), "align_using_monte_carlo_input", initial_pose_with_cov); - - // generateParticle - const auto initial_poses = - create_random_pose_array(initial_pose_with_cov, initial_estimate_particles_num_); + output_pose_with_cov_to_log(get_logger(), "align_pose_input", initial_pose_with_cov); + + const auto base_rpy = get_rpy(initial_pose_with_cov); + const Eigen::Map covariance = { + initial_pose_with_cov.pose.covariance.data(), 6, 6}; + const double stddev_x = std::sqrt(covariance(0, 0)); + const double stddev_y = std::sqrt(covariance(1, 1)); + const double stddev_z = std::sqrt(covariance(2, 2)); + const double stddev_roll = std::sqrt(covariance(3, 3)); + const double stddev_pitch = std::sqrt(covariance(4, 4)); + + // Let phi be the cumulative distribution function of the standard normal distribution. + // It has the following relationship with the error function (erf). + // phi(x) = 1/2 (1 + erf(x / sqrt(2))) + // so, 2 * phi(x) - 1 = erf(x / sqrt(2)). + // The range taken by 2 * phi(x) - 1 is [-1, 1], so it can be used as a uniform distribution in + // TPE. Let u = 2 * phi(x) - 1, then x = sqrt(2) * erf_inv(u). Computationally, it is not a good + // to give erf_inv -1 and 1, so it is rounded off at (-1 + eps, 1 - eps). + const double SQRT2 = std::sqrt(2); + auto uniform_to_normal = [&SQRT2](const double uniform) { + assert(-1.0 <= uniform && uniform <= 1.0); + constexpr double epsilon = 1.0e-6; + const double clamped = std::clamp(uniform, -1.0 + epsilon, 1.0 - epsilon); + return boost::math::erf_inv(clamped) * SQRT2; + }; + + auto normal_to_uniform = [&SQRT2](const double normal) { + return boost::math::erf(normal / SQRT2); + }; + + // Optimizing (x, y, z, roll, pitch, yaw) 6 dimensions. + // The last dimension (yaw) is a loop variable. + // Although roll and pitch are also angles, they are considered non-looping variables that follow + // a normal distribution with a small standard deviation. This assumes that the initial pose of + // the ego vehicle is aligned with the ground to some extent about roll and pitch. + const std::vector is_loop_variable = {false, false, false, false, false, true}; + TreeStructuredParzenEstimator tpe( + TreeStructuredParzenEstimator::Direction::MAXIMIZE, n_startup_trials_, is_loop_variable); std::vector particle_array; auto output_cloud = std::make_shared>(); - for (unsigned int i = 0; i < initial_poses.size(); i++) { - const auto & initial_pose = initial_poses[i]; + for (int i = 0; i < initial_estimate_particles_num_; i++) { + const TreeStructuredParzenEstimator::Input input = tpe.get_next_input(); + + geometry_msgs::msg::Pose initial_pose; + initial_pose.position.x = + initial_pose_with_cov.pose.pose.position.x + uniform_to_normal(input[0]) * stddev_x; + initial_pose.position.y = + initial_pose_with_cov.pose.pose.position.y + uniform_to_normal(input[1]) * stddev_y; + initial_pose.position.z = + initial_pose_with_cov.pose.pose.position.z + uniform_to_normal(input[2]) * stddev_z; + geometry_msgs::msg::Vector3 init_rpy; + init_rpy.x = base_rpy.x + uniform_to_normal(input[3]) * stddev_roll; + init_rpy.y = base_rpy.y + uniform_to_normal(input[4]) * stddev_pitch; + init_rpy.z = base_rpy.z + input[5] * M_PI; + tf2::Quaternion tf_quaternion; + tf_quaternion.setRPY(init_rpy.x, init_rpy.y, init_rpy.z); + initial_pose.orientation = tf2::toMsg(tf_quaternion); + const Eigen::Matrix4f initial_pose_matrix = pose_to_matrix4f(initial_pose); - ndt_ptr->align(*output_cloud, initial_pose_matrix); - const pclomp::NdtResult ndt_result = ndt_ptr->getResult(); + ndt_ptr_->align(*output_cloud, initial_pose_matrix); + const pclomp::NdtResult ndt_result = ndt_ptr_->getResult(); Particle particle( initial_pose, matrix4f_to_pose(ndt_result.pose), ndt_result.transform_probability, @@ -806,9 +851,30 @@ geometry_msgs::msg::PoseWithCovarianceStamped NDTScanMatcher::align_using_monte_ particle, i); ndt_monte_carlo_initial_pose_marker_pub_->publish(marker_array); + const geometry_msgs::msg::Pose pose = matrix4f_to_pose(ndt_result.pose); + const geometry_msgs::msg::Vector3 rpy = get_rpy(pose); + + const double diff_x = pose.position.x - initial_pose_with_cov.pose.pose.position.x; + const double diff_y = pose.position.y - initial_pose_with_cov.pose.pose.position.y; + const double diff_z = pose.position.z - initial_pose_with_cov.pose.pose.position.z; + const double diff_roll = rpy.x - base_rpy.x; + const double diff_pitch = rpy.y - base_rpy.y; + const double diff_yaw = rpy.z - base_rpy.z; + + // Only yaw is a loop_variable, so only simple normalization is performed. + // All other variables are converted from normal distribution to uniform distribution. + TreeStructuredParzenEstimator::Input result(is_loop_variable.size()); + result[0] = normal_to_uniform(diff_x / stddev_x); + result[1] = normal_to_uniform(diff_y / stddev_y); + result[2] = normal_to_uniform(diff_z / stddev_z); + result[3] = normal_to_uniform(diff_roll / stddev_roll); + result[4] = normal_to_uniform(diff_pitch / stddev_pitch); + result[5] = diff_yaw / M_PI; + tpe.add_trial(TreeStructuredParzenEstimator::Trial{result, ndt_result.transform_probability}); + auto sensor_points_in_map_ptr = std::make_shared>(); tier4_autoware_utils::transformPointCloud( - *ndt_ptr->getInputSource(), *sensor_points_in_map_ptr, ndt_result.pose); + *ndt_ptr_->getInputSource(), *sensor_points_in_map_ptr, ndt_result.pose); publish_point_cloud(initial_pose_with_cov.header.stamp, map_frame_, sensor_points_in_map_ptr); } @@ -821,8 +887,7 @@ geometry_msgs::msg::PoseWithCovarianceStamped NDTScanMatcher::align_using_monte_ result_pose_with_cov_msg.header.frame_id = map_frame_; result_pose_with_cov_msg.pose.pose = best_particle_ptr->result_pose; - output_pose_with_cov_to_log( - get_logger(), "align_using_monte_carlo_output", result_pose_with_cov_msg); + output_pose_with_cov_to_log(get_logger(), "align_pose_output", result_pose_with_cov_msg); RCLCPP_INFO_STREAM(get_logger(), "best_score," << best_particle_ptr->score); return result_pose_with_cov_msg; diff --git a/localization/ndt_scan_matcher/test/test_ndt_scan_matcher_launch.py b/localization/ndt_scan_matcher/test/test_ndt_scan_matcher_launch.py new file mode 100644 index 0000000000000..2dc4958c4704f --- /dev/null +++ b/localization/ndt_scan_matcher/test/test_ndt_scan_matcher_launch.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 + +# Copyright 2023 TIER IV, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from ament_index_python import get_package_share_directory +import launch +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import AnyLaunchDescriptionSource +from launch.logging import get_logger +import launch_testing +import pytest +import rclpy +from std_srvs.srv import SetBool + +logger = get_logger(__name__) + + +@pytest.mark.launch_test +def generate_test_description(): + test_ndt_scan_matcher_launch_file = os.path.join( + get_package_share_directory("ndt_scan_matcher"), + "launch", + "ndt_scan_matcher.launch.xml", + ) + ndt_scan_matcher = IncludeLaunchDescription( + AnyLaunchDescriptionSource(test_ndt_scan_matcher_launch_file), + ) + + return launch.LaunchDescription( + [ + ndt_scan_matcher, + # Start tests right away - no need to wait for anything + launch_testing.actions.ReadyToTest(), + ] + ) + + +class TestNDTScanMatcher(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Initialize the ROS context for the test node + rclpy.init() + + @classmethod + def tearDownClass(cls): + # Shutdown the ROS context + rclpy.shutdown() + + def setUp(self): + # Create a ROS node for tests + self.test_node = rclpy.create_node("test_node") + + def tearDown(self): + self.test_node.destroy_node() + + @staticmethod + def print_message(stat): + logger.debug("===========================") + logger.debug(stat) + + def test_launch(self): + # Trigger ndt_scan_matcher to activate the node + cli_trigger = self.test_node.create_client(SetBool, "/trigger_node") + while not cli_trigger.wait_for_service(timeout_sec=1.0): + continue + + request = SetBool.Request() + request.data = True + future = cli_trigger.call_async(request) + rclpy.spin_until_future_complete(self.test_node, future) + + if future.result() is not None: + self.test_node.get_logger().info("Result of bool service: %s" % future.result().message) + self.assertTrue(future.result().success, "ndt_scan_matcher is not activated") + else: + self.test_node.get_logger().error( + "Exception while calling service: %r" % future.exception() + ) + raise self.failureException("service trigger failed") + + +@launch_testing.post_shutdown_test() +class TestProcessOutput(unittest.TestCase): + def test_exit_code(self, proc_info): + # Check that process exits with code 0: no error + launch_testing.asserts.assertExitCodes(proc_info) diff --git a/localization/tree_structured_parzen_estimator/CMakeLists.txt b/localization/tree_structured_parzen_estimator/CMakeLists.txt new file mode 100644 index 0000000000000..7b687a12a003c --- /dev/null +++ b/localization/tree_structured_parzen_estimator/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 3.14) +project(tree_structured_parzen_estimator) + +find_package(autoware_cmake REQUIRED) +autoware_package() + +ament_auto_add_library(tree_structured_parzen_estimator SHARED + src/tree_structured_parzen_estimator.cpp +) + +if(BUILD_TESTING) + find_package(ament_cmake_gtest REQUIRED) + ament_add_gtest(test_tpe + test/test_tpe.cpp + src/tree_structured_parzen_estimator.cpp + ) + target_include_directories(test_tpe PRIVATE include) + target_link_libraries(test_tpe) +endif() + +ament_auto_package( + INSTALL_TO_SHARE +) diff --git a/localization/tree_structured_parzen_estimator/README.md b/localization/tree_structured_parzen_estimator/README.md new file mode 100644 index 0000000000000..2b21e65929485 --- /dev/null +++ b/localization/tree_structured_parzen_estimator/README.md @@ -0,0 +1,5 @@ +# tree_structured_parzen_estimator + +`tree_structured_parzen_estimator`` is a package for black-box optimization. + +This package does not have a node, it is just a library. diff --git a/localization/tree_structured_parzen_estimator/include/tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp b/localization/tree_structured_parzen_estimator/include/tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp new file mode 100644 index 0000000000000..b7b522b4e6b76 --- /dev/null +++ b/localization/tree_structured_parzen_estimator/include/tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp @@ -0,0 +1,80 @@ +// Copyright 2023 Autoware Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TREE_STRUCTURED_PARZEN_ESTIMATOR__TREE_STRUCTURED_PARZEN_ESTIMATOR_HPP_ +#define TREE_STRUCTURED_PARZEN_ESTIMATOR__TREE_STRUCTURED_PARZEN_ESTIMATOR_HPP_ + +/* +A implementation of tree-structured parzen estimator (TPE) +See below pdf for the TPE algorithm detail. +https://papers.nips.cc/paper_files/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf + +Optuna is also used as a reference for implementation. +https://github.com/optuna/optuna +*/ + +#include +#include +#include + +class TreeStructuredParzenEstimator +{ +public: + using Input = std::vector; + using Score = double; + struct Trial + { + Input input; + Score score; + }; + + enum Direction { + MINIMIZE = 0, + MAXIMIZE = 1, + }; + + TreeStructuredParzenEstimator() = delete; + TreeStructuredParzenEstimator( + const Direction direction, const int64_t n_startup_trials, std::vector is_loop_variable); + void add_trial(const Trial & trial); + Input get_next_input() const; + +private: + static constexpr double BASE_STDDEV_COEFF = 0.2; + static constexpr double MAX_GOOD_RATE = 0.10; + static constexpr double MAX_VALUE = 1.0; + static constexpr double MIN_VALUE = -1.0; + static constexpr double VALUE_WIDTH = MAX_VALUE - MIN_VALUE; + static constexpr int64_t N_EI_CANDIDATES = 100; + static constexpr double PRIOR_WEIGHT = 0.0; + + static std::mt19937_64 engine; + static std::uniform_real_distribution dist_uniform; + static std::normal_distribution dist_normal; + + double compute_log_likelihood_ratio(const Input & input) const; + double log_gaussian_pdf(const Input & input, const Input & mu, const Input & sigma) const; + static std::vector get_weights(const int64_t n); + static double normalize_loop_variable(const double value); + + std::vector trials_; + int64_t above_num_; + const Direction direction_; + const int64_t n_startup_trials_; + const int64_t input_dimension_; + const std::vector is_loop_variable_; + const Input base_stddev_; +}; + +#endif // TREE_STRUCTURED_PARZEN_ESTIMATOR__TREE_STRUCTURED_PARZEN_ESTIMATOR_HPP_ diff --git a/localization/tree_structured_parzen_estimator/package.xml b/localization/tree_structured_parzen_estimator/package.xml new file mode 100644 index 0000000000000..b699d5c69e610 --- /dev/null +++ b/localization/tree_structured_parzen_estimator/package.xml @@ -0,0 +1,23 @@ + + + + tree_structured_parzen_estimator + 0.1.0 + The tree_structured_parzen_estimator package + Yamato Ando + Masahiro Sakamoto + Shintaro Sakoda + Kento Yabuuchi + Apache License 2.0 + Shintaro Sakoda + + ament_cmake_auto + autoware_cmake + + ament_cmake_cppcheck + ament_lint_auto + + + ament_cmake + + diff --git a/localization/tree_structured_parzen_estimator/src/tree_structured_parzen_estimator.cpp b/localization/tree_structured_parzen_estimator/src/tree_structured_parzen_estimator.cpp new file mode 100644 index 0000000000000..99c70a844f331 --- /dev/null +++ b/localization/tree_structured_parzen_estimator/src/tree_structured_parzen_estimator.cpp @@ -0,0 +1,217 @@ +// Copyright 2023 Autoware Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp" + +#include +#include +#include +#include + +// random number generator +std::mt19937_64 TreeStructuredParzenEstimator::engine(std::random_device{}()); +std::uniform_real_distribution TreeStructuredParzenEstimator::dist_uniform( + TreeStructuredParzenEstimator::MIN_VALUE, TreeStructuredParzenEstimator::MAX_VALUE); +std::normal_distribution TreeStructuredParzenEstimator::dist_normal(0.0, 1.0); + +TreeStructuredParzenEstimator::TreeStructuredParzenEstimator( + const Direction direction, const int64_t n_startup_trials, std::vector is_loop_variable) +: above_num_(0), + direction_(direction), + n_startup_trials_(n_startup_trials), + input_dimension_(is_loop_variable.size()), + is_loop_variable_(is_loop_variable), + base_stddev_(input_dimension_, VALUE_WIDTH) +{ +} + +void TreeStructuredParzenEstimator::add_trial(const Trial & trial) +{ + trials_.push_back(trial); + std::sort(trials_.begin(), trials_.end(), [this](const Trial & lhs, const Trial & rhs) { + return (direction_ == Direction::MAXIMIZE ? lhs.score > rhs.score : lhs.score < rhs.score); + }); + above_num_ = + std::min(static_cast(25), static_cast(trials_.size() * MAX_GOOD_RATE)); +} + +TreeStructuredParzenEstimator::Input TreeStructuredParzenEstimator::get_next_input() const +{ + if (static_cast(trials_.size()) < n_startup_trials_ || above_num_ == 0) { + // Random sampling based on prior until the number of trials reaches `n_startup_trials_`. + Input input(input_dimension_); + for (int64_t j = 0; j < input_dimension_; j++) { + input[j] = dist_uniform(engine); + } + return input; + } + + Input best_input; + double best_log_likelihood_ratio = std::numeric_limits::lowest(); + const double coeff = BASE_STDDEV_COEFF * std::pow(above_num_, -1.0 / (4 + input_dimension_)); + std::vector weights = get_weights(above_num_); + weights.push_back(PRIOR_WEIGHT); + std::discrete_distribution dist(weights.begin(), weights.end()); + for (int64_t i = 0; i < N_EI_CANDIDATES; i++) { + Input mu, sigma; + const int64_t index = dist(engine); + if (index == above_num_) { + mu = Input(input_dimension_, 0.0); + sigma = base_stddev_; + } else { + mu = trials_[index].input; + sigma = base_stddev_; + for (int64_t j = 0; j < input_dimension_; j++) { + sigma[j] *= coeff; + } + } + // sample from the normal distribution + Input input(input_dimension_); + for (int64_t j = 0; j < input_dimension_; j++) { + input[j] = mu[j] + dist_normal(engine) * sigma[j]; + input[j] = + (is_loop_variable_[j] ? normalize_loop_variable(input[j]) + : std::clamp(input[j], MIN_VALUE, MAX_VALUE)); + } + const double log_likelihood_ratio = compute_log_likelihood_ratio(input); + if (log_likelihood_ratio > best_log_likelihood_ratio) { + best_log_likelihood_ratio = log_likelihood_ratio; + best_input = input; + } + } + return best_input; +} + +double TreeStructuredParzenEstimator::compute_log_likelihood_ratio(const Input & input) const +{ + const int64_t n = trials_.size(); + + // The above KDE and the below KDE are calculated respectively, and the ratio is the criteria to + // select best sample. + std::vector above_logs; + std::vector below_logs; + + // Scott's rule + const double coeff_above = + BASE_STDDEV_COEFF * std::pow(above_num_, -1.0 / (4 + input_dimension_)); + const double coeff_below = + BASE_STDDEV_COEFF * std::pow(n - above_num_, -1.0 / (4 + input_dimension_)); + Input sigma_above = base_stddev_; + Input sigma_below = base_stddev_; + for (int64_t j = 0; j < input_dimension_; j++) { + sigma_above[j] *= coeff_above; + sigma_below[j] *= coeff_below; + } + + std::vector above_weights = get_weights(above_num_); + std::vector below_weights = get_weights(n - above_num_); + std::reverse(below_weights.begin(), below_weights.end()); // below_weights is ascending order + + // calculate the sum of weights to normalize + double above_sum = std::accumulate(above_weights.begin(), above_weights.end(), 0.0); + double below_sum = std::accumulate(below_weights.begin(), below_weights.end(), 0.0); + + // above includes prior + above_sum += PRIOR_WEIGHT; + + for (int64_t i = 0; i < n; i++) { + if (i < above_num_) { + const double log_p = log_gaussian_pdf(input, trials_[i].input, sigma_above); + const double w = above_weights[i] / above_sum; + const double log_w = std::log(w); + above_logs.push_back(log_p + log_w); + } else { + const double log_p = log_gaussian_pdf(input, trials_[i].input, sigma_below); + const double w = below_weights[i - above_num_] / below_sum; + const double log_w = std::log(w); + below_logs.push_back(log_p + log_w); + } + } + + // prior + if (PRIOR_WEIGHT > 0.0) { + const double log_p = log_gaussian_pdf(input, Input(input_dimension_, 0.0), base_stddev_); + const double log_w = std::log(PRIOR_WEIGHT / above_sum); + above_logs.push_back(log_p + log_w); + } + + auto log_sum_exp = [](const std::vector & log_vec) { + const double max = *std::max_element(log_vec.begin(), log_vec.end()); + double sum = 0.0; + for (const double log_v : log_vec) { + sum += std::exp(log_v - max); + } + return max + std::log(sum); + }; + + const double above = log_sum_exp(above_logs); + const double below = log_sum_exp(below_logs); + const double r = above - below; + return r; +} + +double TreeStructuredParzenEstimator::log_gaussian_pdf( + const Input & input, const Input & mu, const Input & sigma) const +{ + const double log_2pi = std::log(2.0 * M_PI); + auto log_gaussian_pdf_1d = [&](const double diff, const double sigma) { + return -0.5 * log_2pi - std::log(sigma) - (diff * diff) / (2.0 * sigma * sigma); + }; + + const int64_t n = input.size(); + + double result = 0.0; + for (int64_t i = 0; i < n; i++) { + double diff = input[i] - mu[i]; + if (is_loop_variable_[i]) { + diff = normalize_loop_variable(diff); + } + result += log_gaussian_pdf_1d(diff, sigma[i]); + } + return result; +} + +std::vector TreeStructuredParzenEstimator::get_weights(const int64_t n) +{ + // See optuna + // https://github.com/optuna/optuna/blob/4bfab78e98bf786f6a2ce6e593a26e3f8403e08d/optuna/samplers/_tpe/sampler.py#L50-L58 + std::vector weights; + constexpr int64_t WEIGHT_ALPHA = 25; + if (n == 0) { + return weights; + } else if (n < WEIGHT_ALPHA) { + weights.resize(n, 1.0); + } else { + weights.resize(n); + const double unit = (1.0 - 1.0 / n) / (n - WEIGHT_ALPHA); + for (int64_t i = 0; i < n; i++) { + weights[i] = (i < WEIGHT_ALPHA ? 1.0 : 1.0 - unit * (i - WEIGHT_ALPHA)); + } + } + + return weights; +} + +double TreeStructuredParzenEstimator::normalize_loop_variable(const double value) +{ + // Normalize the loop variable to [-1, 1) + double result = value; + while (result >= MAX_VALUE) { + result -= VALUE_WIDTH; + } + while (result < MIN_VALUE) { + result += VALUE_WIDTH; + } + return result; +} diff --git a/localization/tree_structured_parzen_estimator/test/test_tpe.cpp b/localization/tree_structured_parzen_estimator/test/test_tpe.cpp new file mode 100644 index 0000000000000..32eb66e70fb16 --- /dev/null +++ b/localization/tree_structured_parzen_estimator/test/test_tpe.cpp @@ -0,0 +1,65 @@ +// Copyright 2023 The Autoware Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tree_structured_parzen_estimator/tree_structured_parzen_estimator.hpp" + +#include + +TEST(TreeStructuredParzenEstimatorTest, TPE_is_better_than_random_search_on_sphere_function) +{ + auto sphere_function = [](const TreeStructuredParzenEstimator::Input & input) { + double value = 0.0; + const int64_t n = input.size(); + for (int64_t i = 0; i < n; i++) { + const double v = input[i] * 10; + value += v * v; + } + return value; + }; + + constexpr int64_t kOuterTrialsNum = 10; + constexpr int64_t kInnerTrialsNum = 100; + std::cout << std::fixed; + std::vector mean_scores; + for (const int64_t n_startup_trials : {kInnerTrialsNum, kInnerTrialsNum / 10}) { + const std::string method = ((n_startup_trials == kInnerTrialsNum) ? "Random" : "TPE"); + + std::vector scores; + for (int64_t i = 0; i < kOuterTrialsNum; i++) { + double best_score = std::numeric_limits::lowest(); + const std::vector is_loop_variable(6, false); + TreeStructuredParzenEstimator estimator( + TreeStructuredParzenEstimator::Direction::MAXIMIZE, n_startup_trials, is_loop_variable); + for (int64_t trial = 0; trial < kInnerTrialsNum; trial++) { + const TreeStructuredParzenEstimator::Input input = estimator.get_next_input(); + const double score = -sphere_function(input); + estimator.add_trial({input, score}); + best_score = std::max(best_score, score); + } + scores.push_back(best_score); + } + + const double sum = std::accumulate(scores.begin(), scores.end(), 0.0); + const double mean = sum / scores.size(); + mean_scores.push_back(mean); + double sq_sum = 0.0; + for (const double score : scores) { + sq_sum += (score - mean) * (score - mean); + } + const double stddev = std::sqrt(sq_sum / scores.size()); + + std::cout << method << ", mean = " << mean << ", stddev = " << stddev << std::endl; + } + ASSERT_LT(mean_scores[0], mean_scores[1]); +} diff --git a/map/map_height_fitter/package.xml b/map/map_height_fitter/package.xml index 8f6ccf969c9a4..f50eba9218d67 100644 --- a/map/map_height_fitter/package.xml +++ b/map/map_height_fitter/package.xml @@ -19,7 +19,6 @@ pcl_conversions rclcpp sensor_msgs - tf2 tf2_geometry_msgs tf2_ros tier4_localization_msgs diff --git a/map/map_loader/CMakeLists.txt b/map/map_loader/CMakeLists.txt index 745edd0daca99..b94eaac7ee34d 100644 --- a/map/map_loader/CMakeLists.txt +++ b/map/map_loader/CMakeLists.txt @@ -4,7 +4,7 @@ project(map_loader) find_package(autoware_cmake REQUIRED) autoware_package() -find_package(PCL REQUIRED COMPONENTS io) +find_package(PCL REQUIRED COMPONENTS common io filters) ament_auto_add_library(pointcloud_map_loader_node SHARED src/pointcloud_map_loader/pointcloud_map_loader_node.cpp diff --git a/map/map_loader/package.xml b/map/map_loader/package.xml index e663d19d516ac..5230d4bd03214 100644 --- a/map/map_loader/package.xml +++ b/map/map_loader/package.xml @@ -24,13 +24,8 @@ lanelet2_extension libpcl-all-dev pcl_conversions - pcl_ros rclcpp rclcpp_components - std_msgs - tf2_geometry_msgs - tf2_ros - tier4_autoware_utils tier4_map_msgs visualization_msgs yaml-cpp diff --git a/map/map_projection_loader/package.xml b/map/map_projection_loader/package.xml index 03053533d3ead..b128c2cf15e15 100644 --- a/map/map_projection_loader/package.xml +++ b/map/map_projection_loader/package.xml @@ -16,7 +16,6 @@ component_interface_utils lanelet2_extension rclcpp - rclcpp_components tier4_map_msgs yaml-cpp diff --git a/map/util/lanelet2_map_preprocessor/CMakeLists.txt b/map/util/lanelet2_map_preprocessor/CMakeLists.txt index caa1d27b3e292..8a6b16c05011b 100644 --- a/map/util/lanelet2_map_preprocessor/CMakeLists.txt +++ b/map/util/lanelet2_map_preprocessor/CMakeLists.txt @@ -4,12 +4,18 @@ project(lanelet2_map_preprocessor) find_package(autoware_cmake REQUIRED) autoware_package() +find_package(PCL REQUIRED COMPONENTS common io kdtree) + include_directories( include SYSTEM ${PCL_INCLUDE_DIRS} ) +link_libraries( + ${PCL_LIBRARIES} +) + ament_auto_add_executable(fix_z_value_by_pcd src/fix_z_value_by_pcd.cpp) ament_auto_add_executable(transform_maps src/transform_maps.cpp) ament_auto_add_executable(merge_close_lines src/merge_close_lines.cpp) diff --git a/map/util/lanelet2_map_preprocessor/package.xml b/map/util/lanelet2_map_preprocessor/package.xml index 15f061369e649..79fce94f87a95 100644 --- a/map/util/lanelet2_map_preprocessor/package.xml +++ b/map/util/lanelet2_map_preprocessor/package.xml @@ -11,7 +11,7 @@ autoware_cmake lanelet2_extension - pcl_ros + libpcl-all-dev rclcpp ament_lint_auto diff --git a/perception/detected_object_validation/include/detected_object_filter/object_lanelet_filter.hpp b/perception/detected_object_validation/include/detected_object_filter/object_lanelet_filter.hpp index 600da31df5868..ae6162542a1c3 100644 --- a/perception/detected_object_validation/include/detected_object_filter/object_lanelet_filter.hpp +++ b/perception/detected_object_validation/include/detected_object_filter/object_lanelet_filter.hpp @@ -51,6 +51,7 @@ class ObjectLaneletFilterNode : public rclcpp::Node lanelet::LaneletMapPtr lanelet_map_ptr_; lanelet::ConstLanelets road_lanelets_; + lanelet::ConstLanelets shoulder_lanelets_; std::string lanelet_frame_id_; tf2_ros::Buffer tf_buffer_; diff --git a/perception/detected_object_validation/src/object_lanelet_filter.cpp b/perception/detected_object_validation/src/object_lanelet_filter.cpp index 3b78ae449e2da..f9b208cca02bc 100644 --- a/perception/detected_object_validation/src/object_lanelet_filter.cpp +++ b/perception/detected_object_validation/src/object_lanelet_filter.cpp @@ -62,6 +62,7 @@ void ObjectLaneletFilterNode::mapCallback( lanelet::utils::conversion::fromBinMsg(*map_msg, lanelet_map_ptr_); const lanelet::ConstLanelets all_lanelets = lanelet::utils::query::laneletLayer(lanelet_map_ptr_); road_lanelets_ = lanelet::utils::query::roadLanelets(all_lanelets); + shoulder_lanelets_ = lanelet::utils::query::shoulderLanelets(all_lanelets); } void ObjectLaneletFilterNode::objectCallback( @@ -87,7 +88,10 @@ void ObjectLaneletFilterNode::objectCallback( // calculate convex hull const auto convex_hull = getConvexHull(transformed_objects); // get intersected lanelets - lanelet::ConstLanelets intersected_lanelets = getIntersectedLanelets(convex_hull, road_lanelets_); + lanelet::ConstLanelets intersected_road_lanelets = + getIntersectedLanelets(convex_hull, road_lanelets_); + lanelet::ConstLanelets intersected_shoulder_lanelets = + getIntersectedLanelets(convex_hull, shoulder_lanelets_); int index = 0; for (const auto & object : transformed_objects.objects) { @@ -101,7 +105,9 @@ void ObjectLaneletFilterNode::objectCallback( polygon.outer().emplace_back(point_transformed.x, point_transformed.y); } polygon.outer().push_back(polygon.outer().front()); - if (isPolygonOverlapLanelets(polygon, intersected_lanelets)) { + if (isPolygonOverlapLanelets(polygon, intersected_road_lanelets)) { + output_object_msg.objects.emplace_back(input_msg->objects.at(index)); + } else if (isPolygonOverlapLanelets(polygon, intersected_shoulder_lanelets)) { output_object_msg.objects.emplace_back(input_msg->objects.at(index)); } } else { diff --git a/perception/euclidean_cluster/README.md b/perception/euclidean_cluster/README.md index 6c8995a44c56c..b57204c84cc3b 100644 --- a/perception/euclidean_cluster/README.md +++ b/perception/euclidean_cluster/README.md @@ -10,7 +10,7 @@ This package has two clustering methods: `euclidean_cluster` and `voxel_grid_bas ### euclidean_cluster -`pcl::EuclideanClusterExtraction` is applied to points. See [official document](https://pcl.readthedocs.io/en/latest/cluster_extraction.html) for details. +`pcl::EuclideanClusterExtraction` is applied to points. See [official document](https://pcl.readthedocs.io/projects/tutorials/en/master/cluster_extraction.html) for details. ### voxel_grid_based_euclidean_cluster diff --git a/perception/ground_segmentation/docs/image/scan_ground_filter_parameters.drawio.svg b/perception/ground_segmentation/docs/image/scan_ground_filter_parameters.drawio.svg new file mode 100644 index 0000000000000..985b99d82ba9b --- /dev/null +++ b/perception/ground_segmentation/docs/image/scan_ground_filter_parameters.drawio.svg @@ -0,0 +1,225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + detection_range_z_max + + + + + + detection_range_z_max + + + + + + + + + + + + + + + + global_slope + + + + + + global_slope + + + + + + + + + + + + + + + + + local_slope + + + + + + local_slope + + + + + + + + + + + + + + + + split_height_distance + + + + + + split_height_distance + + + + + + + + + + + + + + + grid_size_m + + + + + + + grid_size_m + + + + + + + + + + + + + + + + + z + + + + + + z + + + + + + + Text is not SVG - cannot display + + + diff --git a/perception/ground_segmentation/docs/scan-ground-filter.md b/perception/ground_segmentation/docs/scan-ground-filter.md index 0c07ce600f625..bc44544fa298c 100644 --- a/perception/ground_segmentation/docs/scan-ground-filter.md +++ b/perception/ground_segmentation/docs/scan-ground-filter.md @@ -29,25 +29,26 @@ This implementation inherits `pointcloud_preprocessor::Filter` class, please ref #### Core Parameters -| Name | Type | Default Value | Description | -| --------------------------------- | ------ | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_frame` | string | "base_link" | frame id of input pointcloud | -| `output_frame` | string | "base_link" | frame id of output pointcloud | -| `global_slope_max_angle_deg` | double | 8.0 | The global angle to classify as the ground or object [deg] | -| `local_slope_max_angle_deg` | double | 10.0 | The local angle to classify as the ground or object [deg] | -| `radial_divider_angle_deg` | double | 1.0 | The angle which divide the whole pointcloud to sliced group [deg] | -| `split_points_distance_tolerance` | double | 0.2 | The xy-distance threshold to to distinguishing far and near [m] | -| `split_height_distance` | double | 0.2 | The height threshold to distinguishing far and near [m] | -| `use_virtual_ground_point` | bool | true | whether to use the ground center of front wheels as the virtual ground point. | -| `detection_range_z_max` | float | 2.5 | Maximum height of detection range [m], applied only for elevation_grid_mode | -| `center_pcl_shift` | float | 0.0 | The x-axis offset of addition LiDARs from vehicle center of mass [m], recommended to use only for additional LiDARs in elevation_grid_mode | -| `non_ground_height_threshold` | float | 0.2 | Height threshold of non ground objects [m], applied only for elevation_grid_mode | -| `grid_mode_switch_radius` | float | 20.0 | The distance where grid division mode change from by distance to by vertical angle [m], applied only for elevation_grid_mode | -| `grid_size_m` | float | 0.5 | The first grid size [m], applied only for elevation_grid_mode | -| `gnd_grid_buffer_size` | uint16 | 4 | Number of grids using to estimate local ground slope, applied only for elevation_grid_mode | -| `low_priority_region_x` | float | -20.0 | The non-zero x threshold in back side from which small objects detection is low priority [m] | -| `elevation_grid_mode` | bool | true | Elevation grid scan mode option | -| `use_recheck_ground_cluster` | bool | true | Enable recheck ground cluster | +![scan_ground_parameter](./image/scan_ground_filter_parameters.drawio.svg) +| Name | Type | Default Value | Description | +| --------------------------------- | ------ | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_frame` | string | "base_link" | frame id of input pointcloud | +| `output_frame` | string | "base_link" | frame id of output pointcloud | +| `global_slope_max_angle_deg` | double | 8.0 | The global angle to classify as the ground or object [deg].A large threshold may reduce false positive of high slope road classification but it may lead to increase false negative of non-ground classification, particularly for small objects. | +| `local_slope_max_angle_deg` | double | 10.0 | The local angle to classify as the ground or object [deg] when comparing with adjacent point.A small value enhance accuracy classification of object with inclined surface. This should be considered together with `split_points_distance_tolerance` value. | +| `radial_divider_angle_deg` | double | 1.0 | The angle which divide the whole pointcloud to sliced group [deg] | +| `split_points_distance_tolerance` | double | 0.2 | The xy-distance threshold to distinguish far and near [m] | +| `split_height_distance` | double | 0.2 | The height threshold to distinguish ground and non-ground pointcloud when comparing with adjacent points [m]. A small threshold improves classification of non-ground point, especially for high elevation resolution pointcloud lidar. However, it might cause false positive for small step-like road surface or misaligned multiple lidar configuration. | +| `use_virtual_ground_point` | bool | true | whether to use the ground center of front wheels as the virtual ground point. | +| `detection_range_z_max` | float | 2.5 | Maximum height of detection range [m], applied only for elevation_grid_mode | +| `center_pcl_shift` | float | 0.0 | The x-axis offset of addition LiDARs from vehicle center of mass [m], recommended to use only for additional LiDARs in elevation_grid_mode | +| `non_ground_height_threshold` | float | 0.2 | Height threshold of non ground objects [m] as `split_height_distance` and applied only for elevation_grid_mode | +| `grid_mode_switch_radius` | float | 20.0 | The distance where grid division mode change from by distance to by vertical angle [m], applied only for elevation_grid_mode | +| `grid_size_m` | float | 0.5 | The first grid size [m], applied only for elevation_grid_mode.A large value enhances the prediction stability for ground surface. suitable for rough surface or multiple lidar configuration. | +| `gnd_grid_buffer_size` | uint16 | 4 | Number of grids using to estimate local ground slope, applied only for elevation_grid_mode | +| `low_priority_region_x` | float | -20.0 | The non-zero x threshold in back side from which small objects detection is low priority [m] | +| `elevation_grid_mode` | bool | true | Elevation grid scan mode option | +| `use_recheck_ground_cluster` | bool | true | Enable recheck ground cluster | ## Assumptions / Known limits diff --git a/perception/map_based_prediction/README.md b/perception/map_based_prediction/README.md index 7631b47772862..6d3d7f5e035a9 100644 --- a/perception/map_based_prediction/README.md +++ b/perception/map_based_prediction/README.md @@ -109,6 +109,21 @@ For the additional information, here we show how we calculate lateral velocity. Currently, we use the upper method with a low-pass filter to calculate lateral velocity. +### Path generation + +Path generation is generated on the frenet frame. The path is generated by the following steps: + +1. Get the frenet frame of the reference path. +2. Generate the frenet frame of the current position of the object and the finite position of the object. +3. Optimize the path in each longitudinal and lateral coordinate of the frenet frame. (Use the quintic polynomial to fit start and end conditions.) +4. Convert the path to the global coordinate. + +See paper [2] for more details. + +#### Tuning lateral path shape + +`lateral_control_time_horizon` parameter supports the tuning of the lateral path shape. This parameter is used to calculate the time to reach the reference path. The smaller the value, the more the path will be generated to reach the reference path quickly. (Mostly the center of the lane.) + ### Path prediction for crosswalk users This module treats **Pedestrians** and **Bicycles** as objects using the crosswalk, and outputs prediction path based on map and estimated object's velocity, assuming the object has intention to cross the crosswalk, if the objects satisfies at least one of the following conditions: @@ -155,6 +170,7 @@ If the target object is inside the road or crosswalk, this module outputs one or | ---------------------------------------------------------------- | ----- | ------ | ------------------------------------------------------------------------------------------------------------------------------------- | | `enable_delay_compensation` | [-] | bool | flag to enable the time delay compensation for the position of the object | | `prediction_time_horizon` | [s] | double | predict time duration for predicted path | +| `lateral_control_time_horizon` | [s] | double | time duration for predicted path will reach the reference path (mostly center of the lane) | | `prediction_sampling_delta_time` | [s] | double | sampling time for points in predicted path | | `min_velocity_for_map_based_prediction` | [m/s] | double | apply map-based prediction to the objects with higher velocity than this value | | `min_crosswalk_user_velocity` | [m/s] | double | minimum velocity used when crosswalk user's velocity is calculated | diff --git a/perception/map_based_prediction/config/map_based_prediction.param.yaml b/perception/map_based_prediction/config/map_based_prediction.param.yaml index 31fae9c811092..260ae45da2e05 100644 --- a/perception/map_based_prediction/config/map_based_prediction.param.yaml +++ b/perception/map_based_prediction/config/map_based_prediction.param.yaml @@ -2,6 +2,7 @@ ros__parameters: enable_delay_compensation: true prediction_time_horizon: 10.0 #[s] + lateral_control_time_horizon: 5.0 #[s] prediction_sampling_delta_time: 0.5 #[s] min_velocity_for_map_based_prediction: 1.39 #[m/s] min_crosswalk_user_velocity: 1.39 #[m/s] diff --git a/perception/map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp b/perception/map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp index 3d01ab96e9b62..01f39057aef36 100644 --- a/perception/map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp +++ b/perception/map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp @@ -135,6 +135,7 @@ class MapBasedPredictionNode : public rclcpp::Node // Parameters bool enable_delay_compensation_; double prediction_time_horizon_; + double lateral_control_time_horizon_; double prediction_time_horizon_rate_for_validate_lane_length_; double prediction_sampling_time_interval_; double min_velocity_for_map_based_prediction_; @@ -165,6 +166,14 @@ class MapBasedPredictionNode : public rclcpp::Node void mapCallback(const HADMapBin::ConstSharedPtr msg); void objectsCallback(const TrackedObjects::ConstSharedPtr in_objects); + bool doesPathCrossAnyFence(const PredictedPath & predicted_path); + bool doesPathCrossFence( + const PredictedPath & predicted_path, const lanelet::ConstLineString3d & fence_line); + lanelet::BasicLineString2d convertToFenceLine(const lanelet::ConstLineString3d & fence); + bool isIntersecting( + const geometry_msgs::msg::Point & point1, const geometry_msgs::msg::Point & point2, + const lanelet::ConstPoint3d & point3, const lanelet::ConstPoint3d & point4); + PredictedObjectKinematics convertToPredictedKinematics( const TrackedObjectKinematics & tracked_object); diff --git a/perception/map_based_prediction/include/map_based_prediction/path_generator.hpp b/perception/map_based_prediction/include/map_based_prediction/path_generator.hpp index 7bb1542557d2c..4da4f62be2ede 100644 --- a/perception/map_based_prediction/include/map_based_prediction/path_generator.hpp +++ b/perception/map_based_prediction/include/map_based_prediction/path_generator.hpp @@ -81,8 +81,8 @@ class PathGenerator { public: PathGenerator( - const double time_horizon, const double sampling_time_interval, - const double min_crosswalk_user_velocity); + const double time_horizon, const double lateral_time_horizon, + const double sampling_time_interval, const double min_crosswalk_user_velocity); PredictedPath generatePathForNonVehicleObject(const TrackedObject & object); @@ -102,6 +102,7 @@ class PathGenerator private: // Parameters double time_horizon_; + double lateral_time_horizon_; double sampling_time_interval_; double min_crosswalk_user_velocity_; diff --git a/perception/map_based_prediction/src/map_based_prediction_node.cpp b/perception/map_based_prediction/src/map_based_prediction_node.cpp index 58356f4f96ed6..08fc06330b1d8 100644 --- a/perception/map_based_prediction/src/map_based_prediction_node.cpp +++ b/perception/map_based_prediction/src/map_based_prediction_node.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -369,17 +370,13 @@ bool withinLanelet( boost::geometry::correct(polygon); bool with_in_polygon = boost::geometry::within(p_object, polygon); - if (!use_yaw_information) { - return with_in_polygon; - } else { - // use yaw angle to compare - const double abs_yaw_diff = calcAbsYawDiffBetweenLaneletAndObject(object, lanelet); - if (abs_yaw_diff < yaw_threshold) { - return with_in_polygon; - } else { - return false; - } - } + if (!use_yaw_information) return with_in_polygon; + + // use yaw angle to compare + const double abs_yaw_diff = calcAbsYawDiffBetweenLaneletAndObject(object, lanelet); + if (abs_yaw_diff < yaw_threshold) return with_in_polygon; + + return false; } bool withinRoadLanelet( @@ -594,53 +591,53 @@ ObjectClassification::_label_type changeLabelForPrediction( const lanelet::LaneletMapPtr & lanelet_map_ptr_) { // for car like vehicle do not change labels - if ( - label == ObjectClassification::CAR || label == ObjectClassification::BUS || - label == ObjectClassification::TRUCK || label == ObjectClassification::TRAILER || - label == ObjectClassification::UNKNOWN) { - return label; - } else if ( // for bicycle and motorcycle - label == ObjectClassification::MOTORCYCLE || label == ObjectClassification::BICYCLE) { - // if object is within road lanelet and satisfies yaw constraints - const bool within_road_lanelet = withinRoadLanelet(object, lanelet_map_ptr_, true); - const float high_speed_threshold = 25.0 / 18.0 * 5.0; // High speed bicycle 25 km/h - // calc abs speed from x and y velocity - const double abs_speed = std::hypot( - object.kinematics.twist_with_covariance.twist.linear.x, - object.kinematics.twist_with_covariance.twist.linear.y); - const bool high_speed_object = abs_speed > high_speed_threshold; - - // if the object is within lanelet, do the same estimation with vehicle - if (within_road_lanelet) { - return ObjectClassification::MOTORCYCLE; - } else if (high_speed_object) { + switch (label) { + case ObjectClassification::CAR: + case ObjectClassification::BUS: + case ObjectClassification::TRUCK: + case ObjectClassification::TRAILER: + case ObjectClassification::UNKNOWN: + return label; + + case ObjectClassification::MOTORCYCLE: + case ObjectClassification::BICYCLE: { // if object is within road lanelet and satisfies yaw + // constraints + const bool within_road_lanelet = withinRoadLanelet(object, lanelet_map_ptr_, true); + const float high_speed_threshold = + tier4_autoware_utils::kmph2mps(25.0); // High speed bicycle 25 km/h + // calc abs speed from x and y velocity + const double abs_speed = std::hypot( + object.kinematics.twist_with_covariance.twist.linear.x, + object.kinematics.twist_with_covariance.twist.linear.y); + const bool high_speed_object = abs_speed > high_speed_threshold; + + // if the object is within lanelet, do the same estimation with vehicle + if (within_road_lanelet) return ObjectClassification::MOTORCYCLE; // high speed object outside road lanelet will move like unknown object // return ObjectClassification::UNKNOWN; // temporary disabled - return label; - } else { + if (high_speed_object) return label; // Do nothing for now return ObjectClassification::BICYCLE; } - } else if (label == ObjectClassification::PEDESTRIAN) { - const bool within_road_lanelet = withinRoadLanelet(object, lanelet_map_ptr_, true); - const float max_velocity_for_human_mps = - 25.0 / 18.0 * 5.0; // Max human being motion speed is 25km/h - const double abs_speed = std::hypot( - object.kinematics.twist_with_covariance.twist.linear.x, - object.kinematics.twist_with_covariance.twist.linear.y); - const bool high_speed_object = abs_speed > max_velocity_for_human_mps; - // fast, human-like object: like segway - if (within_road_lanelet && high_speed_object) { - return label; // currently do nothing + + case ObjectClassification::PEDESTRIAN: { + const bool within_road_lanelet = withinRoadLanelet(object, lanelet_map_ptr_, true); + const float max_velocity_for_human_mps = + tier4_autoware_utils::kmph2mps(25.0); // Max human being motion speed is 25km/h + const double abs_speed = std::hypot( + object.kinematics.twist_with_covariance.twist.linear.x, + object.kinematics.twist_with_covariance.twist.linear.y); + const bool high_speed_object = abs_speed > max_velocity_for_human_mps; + // fast, human-like object: like segway + if (within_road_lanelet && high_speed_object) return label; // currently do nothing // return ObjectClassification::MOTORCYCLE; - } else if (high_speed_object) { - return label; // currently do nothing + if (high_speed_object) return label; // currently do nothing // fast human outside road lanelet will move like unknown object // return ObjectClassification::UNKNOWN; - } else { return label; } - } else { - return label; + + default: + return label; } } @@ -697,6 +694,8 @@ MapBasedPredictionNode::MapBasedPredictionNode(const rclcpp::NodeOptions & node_ { enable_delay_compensation_ = declare_parameter("enable_delay_compensation"); prediction_time_horizon_ = declare_parameter("prediction_time_horizon"); + lateral_control_time_horizon_ = + declare_parameter("lateral_control_time_horizon"); // [s] for lateral control point prediction_sampling_time_interval_ = declare_parameter("prediction_sampling_delta_time"); min_velocity_for_map_based_prediction_ = declare_parameter("min_velocity_for_map_based_prediction"); @@ -743,7 +742,8 @@ MapBasedPredictionNode::MapBasedPredictionNode(const rclcpp::NodeOptions & node_ declare_parameter("prediction_time_horizon_rate_for_validate_shoulder_lane_length"); path_generator_ = std::make_shared( - prediction_time_horizon_, prediction_sampling_time_interval_, min_crosswalk_user_velocity_); + prediction_time_horizon_, lateral_control_time_horizon_, prediction_sampling_time_interval_, + min_crosswalk_user_velocity_); sub_objects_ = this->create_subscription( "~/input/objects", 1, @@ -850,102 +850,99 @@ void MapBasedPredictionNode::objectsCallback(const TrackedObjects::ConstSharedPt const auto & label_ = transformed_object.classification.front().label; const auto label = changeLabelForPrediction(label_, object, lanelet_map_ptr_); - // For crosswalk user - if (label == ObjectClassification::PEDESTRIAN || label == ObjectClassification::BICYCLE) { - const auto predicted_object = getPredictedObjectAsCrosswalkUser(transformed_object); - output.objects.push_back(predicted_object); - // For road user - } else if ( - label == ObjectClassification::CAR || label == ObjectClassification::BUS || - label == ObjectClassification::TRAILER || label == ObjectClassification::MOTORCYCLE || - label == ObjectClassification::TRUCK) { - // Update object yaw and velocity - updateObjectData(transformed_object); - - // Get Closest Lanelet - const auto current_lanelets = getCurrentLanelets(transformed_object); - - // Update Objects History - updateObjectsHistory(output.header, transformed_object, current_lanelets); - - // For off lane obstacles - if (current_lanelets.empty()) { - PredictedPath predicted_path = - path_generator_->generatePathForOffLaneVehicle(transformed_object); - predicted_path.confidence = 1.0; - if (predicted_path.path.empty()) { - continue; - } - - auto predicted_object = convertToPredictedObject(transformed_object); - predicted_object.kinematics.predicted_paths.push_back(predicted_path); - output.objects.push_back(predicted_object); - continue; + switch (label) { + case ObjectClassification::PEDESTRIAN: + case ObjectClassification::BICYCLE: { + const auto predicted_object_crosswalk = + getPredictedObjectAsCrosswalkUser(transformed_object); + output.objects.push_back(predicted_object_crosswalk); + break; } - - // For too-slow vehicle - const double abs_obj_speed = std::hypot( - transformed_object.kinematics.twist_with_covariance.twist.linear.x, - transformed_object.kinematics.twist_with_covariance.twist.linear.y); - if (std::fabs(abs_obj_speed) < min_velocity_for_map_based_prediction_) { - PredictedPath predicted_path = - path_generator_->generatePathForLowSpeedVehicle(transformed_object); - predicted_path.confidence = 1.0; - if (predicted_path.path.empty()) { - continue; + case ObjectClassification::CAR: + case ObjectClassification::BUS: + case ObjectClassification::TRAILER: + case ObjectClassification::MOTORCYCLE: + case ObjectClassification::TRUCK: { + // Update object yaw and velocity + updateObjectData(transformed_object); + + // Get Closest Lanelet + const auto current_lanelets = getCurrentLanelets(transformed_object); + + // Update Objects History + updateObjectsHistory(output.header, transformed_object, current_lanelets); + + // For off lane obstacles + if (current_lanelets.empty()) { + PredictedPath predicted_path = + path_generator_->generatePathForOffLaneVehicle(transformed_object); + predicted_path.confidence = 1.0; + if (predicted_path.path.empty()) break; + + auto predicted_object_vehicle = convertToPredictedObject(transformed_object); + predicted_object_vehicle.kinematics.predicted_paths.push_back(predicted_path); + output.objects.push_back(predicted_object_vehicle); + break; } - auto predicted_object = convertToPredictedObject(transformed_object); - predicted_object.kinematics.predicted_paths.push_back(predicted_path); - output.objects.push_back(predicted_object); - continue; - } - - // Get Predicted Reference Path for Each Maneuver and current lanelets - // return: - const auto ref_paths = - getPredictedReferencePath(transformed_object, current_lanelets, objects_detected_time); - - // If predicted reference path is empty, assume this object is out of the lane - if (ref_paths.empty()) { - PredictedPath predicted_path = - path_generator_->generatePathForLowSpeedVehicle(transformed_object); - predicted_path.confidence = 1.0; - if (predicted_path.path.empty()) { - continue; + // For too-slow vehicle + const double abs_obj_speed = std::hypot( + transformed_object.kinematics.twist_with_covariance.twist.linear.x, + transformed_object.kinematics.twist_with_covariance.twist.linear.y); + if (std::fabs(abs_obj_speed) < min_velocity_for_map_based_prediction_) { + PredictedPath predicted_path = + path_generator_->generatePathForLowSpeedVehicle(transformed_object); + predicted_path.confidence = 1.0; + if (predicted_path.path.empty()) break; + + auto predicted_slow_object = convertToPredictedObject(transformed_object); + predicted_slow_object.kinematics.predicted_paths.push_back(predicted_path); + output.objects.push_back(predicted_slow_object); + break; } - auto predicted_object = convertToPredictedObject(transformed_object); - predicted_object.kinematics.predicted_paths.push_back(predicted_path); - output.objects.push_back(predicted_object); - continue; - } + // Get Predicted Reference Path for Each Maneuver and current lanelets + // return: + const auto ref_paths = + getPredictedReferencePath(transformed_object, current_lanelets, objects_detected_time); + + // If predicted reference path is empty, assume this object is out of the lane + if (ref_paths.empty()) { + PredictedPath predicted_path = + path_generator_->generatePathForLowSpeedVehicle(transformed_object); + predicted_path.confidence = 1.0; + if (predicted_path.path.empty()) break; + + auto predicted_object_out_of_lane = convertToPredictedObject(transformed_object); + predicted_object_out_of_lane.kinematics.predicted_paths.push_back(predicted_path); + output.objects.push_back(predicted_object_out_of_lane); + break; + } - // Get Debug Marker for On Lane Vehicles - const auto max_prob_path = std::max_element( - ref_paths.begin(), ref_paths.end(), - [](const PredictedRefPath & a, const PredictedRefPath & b) { - return a.probability < b.probability; - }); - const auto debug_marker = - getDebugMarker(object, max_prob_path->maneuver, debug_markers.markers.size()); - debug_markers.markers.push_back(debug_marker); - - // Generate Predicted Path - std::vector predicted_paths; - for (const auto & ref_path : ref_paths) { - PredictedPath predicted_path = - path_generator_->generatePathForOnLaneVehicle(transformed_object, ref_path.path); - if (predicted_path.path.empty()) { - continue; + // Get Debug Marker for On Lane Vehicles + const auto max_prob_path = std::max_element( + ref_paths.begin(), ref_paths.end(), + [](const PredictedRefPath & a, const PredictedRefPath & b) { + return a.probability < b.probability; + }); + const auto debug_marker = + getDebugMarker(object, max_prob_path->maneuver, debug_markers.markers.size()); + debug_markers.markers.push_back(debug_marker); + + // Generate Predicted Path + std::vector predicted_paths; + for (const auto & ref_path : ref_paths) { + PredictedPath predicted_path = + path_generator_->generatePathForOnLaneVehicle(transformed_object, ref_path.path); + if (predicted_path.path.empty()) { + continue; + } + predicted_path.confidence = ref_path.probability; + predicted_paths.push_back(predicted_path); } - predicted_path.confidence = ref_path.probability; - predicted_paths.push_back(predicted_path); - } + // Normalize Path Confidence and output the predicted object - // Normalize Path Confidence and output the predicted object - { float sum_confidence = 0.0; for (const auto & predicted_path : predicted_paths) { sum_confidence += predicted_path.confidence; @@ -953,25 +950,25 @@ void MapBasedPredictionNode::objectsCallback(const TrackedObjects::ConstSharedPt const float min_sum_confidence_value = 1e-3; sum_confidence = std::max(sum_confidence, min_sum_confidence_value); + auto predicted_object = convertToPredictedObject(transformed_object); + for (auto & predicted_path : predicted_paths) { predicted_path.confidence = predicted_path.confidence / sum_confidence; - } - - auto predicted_object = convertToPredictedObject(transformed_object); - for (const auto & predicted_path : predicted_paths) { predicted_object.kinematics.predicted_paths.push_back(predicted_path); } output.objects.push_back(predicted_object); + break; } - // For unknown object - } else { - auto predicted_object = convertToPredictedObject(transformed_object); - PredictedPath predicted_path = - path_generator_->generatePathForNonVehicleObject(transformed_object); - predicted_path.confidence = 1.0; + default: { + auto predicted_unknown_object = convertToPredictedObject(transformed_object); + PredictedPath predicted_path = + path_generator_->generatePathForNonVehicleObject(transformed_object); + predicted_path.confidence = 1.0; - predicted_object.kinematics.predicted_paths.push_back(predicted_path); - output.objects.push_back(predicted_object); + predicted_unknown_object.kinematics.predicted_paths.push_back(predicted_path); + output.objects.push_back(predicted_unknown_object); + break; + } } } @@ -982,6 +979,46 @@ void MapBasedPredictionNode::objectsCallback(const TrackedObjects::ConstSharedPt pub_calculation_time_->publish(calculation_time_msg); } +bool MapBasedPredictionNode::doesPathCrossAnyFence(const PredictedPath & predicted_path) +{ + const lanelet::ConstLineStrings3d & all_fences = + lanelet::utils::query::getAllFences(lanelet_map_ptr_); + for (const auto & fence_line : all_fences) { + if (doesPathCrossFence(predicted_path, fence_line)) { + return true; + } + } + return false; +} + +bool MapBasedPredictionNode::doesPathCrossFence( + const PredictedPath & predicted_path, const lanelet::ConstLineString3d & fence_line) +{ + // check whether the predicted path cross with fence + for (size_t i = 0; i < predicted_path.path.size(); ++i) { + for (size_t j = 0; j < fence_line.size() - 1; ++j) { + if (isIntersecting( + predicted_path.path[i].position, predicted_path.path[i + 1].position, fence_line[j], + fence_line[j + 1])) { + return true; + } + } + } + return false; +} + +bool MapBasedPredictionNode::isIntersecting( + const geometry_msgs::msg::Point & point1, const geometry_msgs::msg::Point & point2, + const lanelet::ConstPoint3d & point3, const lanelet::ConstPoint3d & point4) +{ + const auto p1 = tier4_autoware_utils::createPoint(point1.x, point1.y, 0.0); + const auto p2 = tier4_autoware_utils::createPoint(point2.x, point2.y, 0.0); + const auto p3 = tier4_autoware_utils::createPoint(point3.x(), point3.y(), 0.0); + const auto p4 = tier4_autoware_utils::createPoint(point4.x(), point4.y(), 0.0); + const auto intersection = tier4_autoware_utils::intersect(p1, p2, p3, p4); + return intersection.has_value(); +} + PredictedObject MapBasedPredictionNode::getPredictedObjectAsCrosswalkUser( const TrackedObject & object) { @@ -1001,6 +1038,7 @@ PredictedObject MapBasedPredictionNode::getPredictedObjectAsCrosswalkUser( } } + // If the object is in the crosswalk, generate path to the crosswalk edge if (crossing_crosswalk) { const auto edge_points = getCrosswalkEdgePoints(crossing_crosswalk.get()); @@ -1024,6 +1062,8 @@ PredictedObject MapBasedPredictionNode::getPredictedObjectAsCrosswalkUser( predicted_object.kinematics.predicted_paths.push_back(predicted_path); } + // If the object is not crossing the crosswalk, in the road lanelets, try to find the closest + // crosswalk and generate path to the crosswalk edge } else if (withinRoadLanelet(object, lanelet_map_ptr_)) { lanelet::ConstLanelet closest_crosswalk{}; const auto & obj_pose = object.kinematics.pose_with_covariance.pose; @@ -1054,6 +1094,8 @@ PredictedObject MapBasedPredictionNode::getPredictedObjectAsCrosswalkUser( } } + // If the object is not crossing the crosswalk, not in the road lanelets, try to find the edge + // points for all crosswalks and generate path to the crosswalk edge } else { for (const auto & crosswalk : crosswalks_) { const auto edge_points = getCrosswalkEdgePoints(crosswalk); @@ -1086,6 +1128,10 @@ PredictedObject MapBasedPredictionNode::getPredictedObjectAsCrosswalkUser( if (predicted_path.path.empty()) { continue; } + // If the predicted path to the crosswalk is crossing the fence, don't use it + if (doesPathCrossAnyFence(predicted_path)) { + continue; + } predicted_object.kinematics.predicted_paths.push_back(predicted_path); } @@ -1114,26 +1160,28 @@ void MapBasedPredictionNode::updateObjectData(TrackedObject & object) object_pose, object_twist.linear.x * 0.1, object_twist.linear.y * 0.1, 0.0); // assumption: the object vx is much larger than vy - if (object.kinematics.twist_with_covariance.twist.linear.x < 0.0) { - if ( - object.kinematics.orientation_availability == - autoware_auto_perception_msgs::msg::DetectedObjectKinematics::SIGN_UNKNOWN) { + if (object.kinematics.twist_with_covariance.twist.linear.x >= 0.0) return; + + switch (object.kinematics.orientation_availability) { + case autoware_auto_perception_msgs::msg::DetectedObjectKinematics::SIGN_UNKNOWN: { const auto original_yaw = tf2::getYaw(object.kinematics.pose_with_covariance.pose.orientation); // flip the angle object.kinematics.pose_with_covariance.pose.orientation = tier4_autoware_utils::createQuaternionFromYaw(tier4_autoware_utils::pi + original_yaw); - } else { + break; + } + default: { const auto updated_object_yaw = tier4_autoware_utils::calcAzimuthAngle(object_pose.position, future_object_pose.position); object.kinematics.pose_with_covariance.pose.orientation = tier4_autoware_utils::createQuaternionFromYaw(updated_object_yaw); + break; } - - object.kinematics.twist_with_covariance.twist.linear.x *= -1.0; - object.kinematics.twist_with_covariance.twist.linear.y *= -1.0; } + object.kinematics.twist_with_covariance.twist.linear.x *= -1.0; + object.kinematics.twist_with_covariance.twist.linear.y *= -1.0; return; } @@ -1864,16 +1912,10 @@ ManeuverProbability MapBasedPredictionNode::calculateManeuverProbability( constexpr float LC_PROB = 1.0; // probability for left lane change constexpr float RC_PROB = 1.0; // probability for right lane change lane_follow_probability = 0.0; - if (!left_paths.empty() && right_paths.empty()) { - left_lane_change_probability = LC_PROB; - right_lane_change_probability = 0.0; - } else if (left_paths.empty() && !right_paths.empty()) { - left_lane_change_probability = 0.0; - right_lane_change_probability = RC_PROB; - } else { - left_lane_change_probability = LC_PROB; - right_lane_change_probability = RC_PROB; - } + + // If the given lane is empty, the probability goes to 0 + left_lane_change_probability = left_paths.empty() ? 0.0 : LC_PROB; + right_lane_change_probability = right_paths.empty() ? 0.0 : RC_PROB; } const float MIN_PROBABILITY = 1e-3; diff --git a/perception/map_based_prediction/src/path_generator.cpp b/perception/map_based_prediction/src/path_generator.cpp index 547132410badf..5cb7e186b7cc4 100644 --- a/perception/map_based_prediction/src/path_generator.cpp +++ b/perception/map_based_prediction/src/path_generator.cpp @@ -23,9 +23,10 @@ namespace map_based_prediction { PathGenerator::PathGenerator( - const double time_horizon, const double sampling_time_interval, + const double time_horizon, const double lateral_time_horizon, const double sampling_time_interval, const double min_crosswalk_user_velocity) : time_horizon_(time_horizon), + lateral_time_horizon_(lateral_time_horizon), sampling_time_interval_(sampling_time_interval), min_crosswalk_user_velocity_(min_crosswalk_user_velocity) { @@ -213,18 +214,25 @@ FrenetPath PathGenerator::generateFrenetPath( { FrenetPath path; const double duration = time_horizon_; + const double lateral_duration = lateral_time_horizon_; // Compute Lateral and Longitudinal Coefficients to generate the trajectory - const Eigen::Vector3d lat_coeff = calcLatCoefficients(current_point, target_point, duration); + const Eigen::Vector3d lat_coeff = + calcLatCoefficients(current_point, target_point, lateral_duration); const Eigen::Vector2d lon_coeff = calcLonCoefficients(current_point, target_point, duration); path.reserve(static_cast(duration / sampling_time_interval_)); for (double t = 0.0; t <= duration; t += sampling_time_interval_) { - const double d_next = current_point.d + current_point.d_vel * t + 0 * 2 * std::pow(t, 2) + - lat_coeff(0) * std::pow(t, 3) + lat_coeff(1) * std::pow(t, 4) + - lat_coeff(2) * std::pow(t, 5); - const double s_next = current_point.s + current_point.s_vel * t + 2 * 0 * std::pow(t, 2) + - lon_coeff(0) * std::pow(t, 3) + lon_coeff(1) * std::pow(t, 4); + const double current_acc = + 0.0; // Currently we assume the object is traveling at a constant speed + const double d_next_ = current_point.d + current_point.d_vel * t + + current_acc * 2.0 * std::pow(t, 2) + lat_coeff(0) * std::pow(t, 3) + + lat_coeff(1) * std::pow(t, 4) + lat_coeff(2) * std::pow(t, 5); + // t > lateral_duration: 0.0, else d_next_ + const double d_next = t > lateral_duration ? 0.0 : d_next_; + const double s_next = current_point.s + current_point.s_vel * t + + 2.0 * current_acc * std::pow(t, 2) + lon_coeff(0) * std::pow(t, 3) + + lon_coeff(1) * std::pow(t, 4); if (s_next > max_length) { break; } diff --git a/perception/tracking_object_merger/CMakeLists.txt b/perception/tracking_object_merger/CMakeLists.txt new file mode 100644 index 0000000000000..108749c5f91a7 --- /dev/null +++ b/perception/tracking_object_merger/CMakeLists.txt @@ -0,0 +1,50 @@ +cmake_minimum_required(VERSION 3.8) +project(tracking_object_merger VERSION 1.0.0) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic -Werror -Wconversion) +endif() + +find_package(autoware_cmake REQUIRED) +autoware_package() +find_package(nlohmann_json REQUIRED) # for debug + + +# find dependencies +find_package(eigen3_cmake_module REQUIRED) +find_package(Eigen3 REQUIRED) +# uncomment the following section in order to fill in +# further dependencies manually. +# find_package( REQUIRED) + +include_directories( + SYSTEM + ${EIGEN3_INCLUDE_DIR} +) + +ament_auto_add_library(mu_successive_shortest_path SHARED + src/data_association/mu_successive_shortest_path/mu_successive_shortest_path_wrapper.cpp +) + +ament_auto_add_library(decorative_tracker_merger_node SHARED + src/data_association/data_association.cpp + src/decorative_tracker_merger.cpp + src/utils/utils.cpp + src/utils/tracker_state.cpp +) + +target_link_libraries(decorative_tracker_merger_node + mu_successive_shortest_path + Eigen3::Eigen + nlohmann_json::nlohmann_json # for debug +) + +rclcpp_components_register_node(decorative_tracker_merger_node + PLUGIN "tracking_object_merger::DecorativeTrackerMergerNode" + EXECUTABLE decorative_tracker_merger +) + +ament_auto_package(INSTALL_TO_SHARE + launch + config +) diff --git a/perception/tracking_object_merger/README.md b/perception/tracking_object_merger/README.md new file mode 100644 index 0000000000000..9964534372037 --- /dev/null +++ b/perception/tracking_object_merger/README.md @@ -0,0 +1,114 @@ +# Tracking Object Merger + +## Purpose + +This package try to merge two tracking objects from different sensor. + +## Inner-workings / Algorithms + +Merging tracking objects from different sensor is a combination of data association and state fusion algorithms. + +Detailed process depends on the merger policy. + +### decorative_tracker_merger + +In decorative_tracker_merger, we assume there are dominant tracking objects and sub tracking objects. +The name `decorative` means that sub tracking objects are used to complement the main objects. + +Usually the dominant tracking objects are from LiDAR and sub tracking objects are from Radar or Camera. + +Here show the processing pipeline. + +![decorative_tracker_merger](./image/decorative_tracker_merger.drawio.svg) + +#### time sync + +Sub object(Radar or Camera) often has higher frequency than dominant object(LiDAR). So we need to sync the time of sub object to dominant object. + +![time sync](image/time_sync.drawio.svg) + +#### data association + +In the data association, we use the following rules to determine whether two tracking objects are the same object. + +- gating + - `distance gate`: distance between two tracking objects + - `angle gate`: angle between two tracking objects + - `mahalanobis_distance_gate`: Mahalanobis distance between two tracking objects + - `min_iou_gate`: minimum IoU between two tracking objects + - `max_velocity_gate`: maximum velocity difference between two tracking objects +- score + - score used in matching is equivalent to the distance between two tracking objects + +#### tracklet update + +Sub tracking objects are merged into dominant tracking objects. + +Depends on the tracklet input sensor state, we update the tracklet state with different rules. + +| state\priority | 1st | 2nd | 3rd | +| -------------------------- | ------ | ----- | ------ | +| Kinematics except velocity | LiDAR | Radar | Camera | +| Forward velocity | Radar | LiDAR | Camera | +| Object classification | Camera | LiDAR | Radar | + +#### tracklet management + +We use the `existence_probability` to manage tracklet. + +- When we create a new tracklet, we set the `existence_probability` to $p_{sensor}$ value. +- In each update with specific sensor, we set the `existence_probability` to $p_{sensor}$ value. +- When tracklet does not have update with specific sensor, we reduce the `existence_probability` by `decay_rate` +- Object can be published if `existence_probability` is larger than `publish_probability_threshold` +- Object will be removed if `existence_probability` is smaller than `remove_probability_threshold` + +![tracklet_management](./image/tracklet_management.drawio.svg) + +These parameter can be set in `config/decorative_tracker_merger.param.yaml`. + +```yaml +tracker_state_parameter: + remove_probability_threshold: 0.3 + publish_probability_threshold: 0.6 + default_lidar_existence_probability: 0.7 + default_radar_existence_probability: 0.6 + default_camera_existence_probability: 0.6 + decay_rate: 0.1 + max_dt: 1.0 +``` + +#### input/parameters + +| topic name | message type | description | +| ------------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------- | +| `~input/main_object` | `autoware_auto_perception_msgs::TrackedObjects` | Dominant tracking objects. Output will be published with this dominant object stamps. | +| `~input/sub_object` | `autoware_auto_perception_msgs::TrackedObjects` | Sub tracking objects. | +| `output/object` | `autoware_auto_perception_msgs::TrackedObjects` | Merged tracking objects. | +| `debug/interpolated_sub_object` | `autoware_auto_perception_msgs::TrackedObjects` | Interpolated sub tracking objects. | + +Default parameters are set in [config/decorative_tracker_merger.param.yaml](./config/decorative_tracker_merger.param.yaml). + +| parameter name | description | default value | +| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `base_link_frame_id` | base link frame id. This is used to transform the tracking object. | "base_link" | +| `time_sync_threshold` | time sync threshold. If the time difference between two tracking objects is smaller than this value, we consider these two tracking objects are the same object. | 0.05 | +| `sub_object_timeout_sec` | sub object timeout. If the sub object is not updated for this time, we consider this object is not exist. | 0.5 | +| `main_sensor_type` | main sensor type. This is used to determine the dominant tracking object. | "lidar" | +| `sub_sensor_type` | sub sensor type. This is used to determine the sub tracking object. | "radar" | +| `tracker_state_parameter` | tracker state parameter. This is used to manage the tracklet. | | + +- the detail of `tracker_state_parameter` is described in [tracklet management](#tracklet-management) + +#### tuning + +As explained in [tracklet management](#tracklet-management), this tracker merger tend to maintain the both input tracking objects. + +If there are many false positive tracking objects, + +- decrease `default__existence_probability` of that sensor +- increase `decay_rate` +- increase `publish_probability_threshold` to publish only reliable tracking objects + +### equivalent_tracker_merger + +This is future work. diff --git a/perception/tracking_object_merger/config/data_association_matrix.param.yaml b/perception/tracking_object_merger/config/data_association_matrix.param.yaml new file mode 100644 index 0000000000000..702809b3cede1 --- /dev/null +++ b/perception/tracking_object_merger/config/data_association_matrix.param.yaml @@ -0,0 +1,168 @@ +/**: + ros__parameters: + lidar-lidar: + can_assign_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE,PEDESTRIAN + [0, 0, 0, 0, 0, 0, 0, 0, #UNKNOWN + 0, 1, 1, 1, 1, 0, 0, 0, #CAR + 0, 1, 1, 1, 1, 0, 0, 0, #TRUCK + 0, 1, 1, 1, 1, 0, 0, 0, #BUS + 0, 1, 1, 1, 1, 0, 0, 0, #TRAILER + 0, 0, 0, 0, 0, 1, 1, 1, #MOTORBIKE + 0, 0, 0, 0, 0, 1, 1, 1, #BICYCLE + 0, 0, 0, 0, 0, 1, 1, 1] #PEDESTRIAN + + max_dist_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [4.0, 4.0, 5.0, 5.0, 5.0, 2.0, 2.0, 2.0, #UNKNOWN + 4.0, 2.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, #CAR + 5.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, #TRUCK + 5.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, #BUS + 5.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, #TRAILER + 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #MOTORBIKE + 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #BICYCLE + 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 2.0] #PEDESTRIAN + + max_rad_matrix: # If value is greater than pi, it will be ignored. + #UNKNOWN, CAR, TRUCK, BUS, TRAILER MOTORBIKE, BICYCLE, PEDESTRIAN + [3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #UNKNOWN + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #CAR + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRUCK + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #BUS + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRAILER + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #MOTORBIKE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #BICYCLE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150] #PEDESTRIAN + + max_velocity_diff_matrix: # Ignored when value is larger than 100.0 + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #UNKNOWN + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #CAR + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRUCK + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #BUS + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRAILER + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #MOTORBIKE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #BICYCLE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0] #PEDESTRIAN + + min_iou_matrix: # If value is negative, it will be ignored. + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, #UNKNOWN + 0.1, 0.3, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, #CAR + 0.1, 0.2, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1, #TRUCK + 0.1, 0.2, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1, #BUS + 0.1, 0.2, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1, #TRAILER + 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, #MOTORBIKE + 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, #BICYCLE + 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] #PEDESTRIAN + + lidar-radar: + can_assign_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE,PEDESTRIAN + [0, 0, 0, 0, 0, 0, 0, 0, #UNKNOWN + 0, 1, 1, 1, 1, 0, 0, 0, #CAR + 0, 1, 1, 1, 1, 0, 0, 0, #TRUCK + 0, 1, 1, 1, 1, 0, 0, 0, #BUS + 0, 1, 1, 1, 1, 0, 0, 0, #TRAILER + 0, 0, 0, 0, 0, 1, 1, 1, #MOTORBIKE + 0, 0, 0, 0, 0, 1, 1, 1, #BICYCLE + 0, 0, 0, 0, 0, 1, 1, 1] #PEDESTRIAN + + max_dist_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [4.0, 4.0, 5.0, 5.0, 5.0, 3.5, 3.5, 3.5, #UNKNOWN + 4.0, 5.5, 6.0, 6.0, 6.0, 1.0, 1.0, 1.0, #CAR + 5.0, 6.0, 6.5, 6.5, 6.5, 1.0, 1.0, 1.0, #TRUCK + 5.0, 6.0, 6.5, 6.5, 6.5, 1.0, 1.0, 1.0, #BUS + 5.0, 6.0, 6.5, 6.5, 6.5, 1.0, 1.0, 1.0, #TRAILER + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #MOTORBIKE + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #BICYCLE + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.5] #PEDESTRIAN + + max_rad_matrix: # If value is greater than pi, it will be ignored. + #UNKNOWN, CAR, TRUCK, BUS, TRAILER MOTORBIKE, BICYCLE, PEDESTRIAN + [3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #UNKNOWN + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #CAR + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRUCK + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #BUS + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRAILER + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #MOTORBIKE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #BICYCLE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150] #PEDESTRIAN + + max_velocity_diff_matrix: # Ignored when value is larger than 100.0 + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #UNKNOWN + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #CAR + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRUCK + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #BUS + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRAILER + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #MOTORBIKE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #BICYCLE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0] #PEDESTRIAN + + min_iou_matrix: # set all value to 0.0 to disable this constraint + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #UNKNOWN + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #CAR + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #TRUCK + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #BUS + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #TRAILER + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #MOTORBIKE + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #BICYCLE + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #PEDESTRIAN + + radar-radar: + can_assign_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE,PEDESTRIAN + [0, 0, 0, 0, 0, 0, 0, 0, #UNKNOWN + 0, 1, 1, 1, 1, 0, 0, 0, #CAR + 0, 1, 1, 1, 1, 0, 0, 0, #TRUCK + 0, 1, 1, 1, 1, 0, 0, 0, #BUS + 0, 1, 1, 1, 1, 0, 0, 0, #TRAILER + 0, 0, 0, 0, 0, 1, 1, 1, #MOTORBIKE + 0, 0, 0, 0, 0, 1, 1, 1, #BICYCLE + 0, 0, 0, 0, 0, 1, 1, 1] #PEDESTRIAN + + max_dist_matrix: + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [4.0, 4.0, 5.0, 5.0, 5.0, 3.5, 3.5, 3.5, #UNKNOWN + 4.0, 7.0, 7.5, 7.5, 7.5, 1.0, 1.0, 1.0, #CAR + 5.0, 7.5, 7.5, 7.5, 7.5, 1.0, 1.0, 1.0, #TRUCK + 5.0, 7.5, 7.5, 7.5, 7.5, 1.0, 1.0, 1.0, #BUS + 5.0, 7.5, 7.5, 7.5, 7.5, 1.0, 1.0, 1.0, #TRAILER + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #MOTORBIKE + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, #BICYCLE + 3.5, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.5] #PEDESTRIAN + max_rad_matrix: # If value is greater than pi, it will be ignored. + #UNKNOWN, CAR, TRUCK, BUS, TRAILER MOTORBIKE, BICYCLE, PEDESTRIAN + [3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #UNKNOWN + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #CAR + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRUCK + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #BUS + 3.150, 1.047, 1.047, 1.047, 1.047, 3.150, 3.150, 3.150, #TRAILER + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #MOTORBIKE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, #BICYCLE + 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150, 3.150] #PEDESTRIAN + + max_velocity_diff_matrix: # Ignored when value is larger than 100.0 + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #UNKNOWN + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #CAR + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRUCK + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #BUS + 100.0, 8.0, 8.0, 8.0, 8.0, 100.0, 100.0, 100.0, #TRAILER + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #MOTORBIKE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0, #BICYCLE + 100.0, 100.0,100.0,100.0,100.0, 100.0, 100.0, 100.0] #PEDESTRIAN + + min_iou_matrix: # set all value to 0.0 to disable this constraint + #UNKNOWN, CAR, TRUCK, BUS, TRAILER, MOTORBIKE, BICYCLE, PEDESTRIAN + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #UNKNOWN + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #CAR + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #TRUCK + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #BUS + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #TRAILER + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #MOTORBIKE + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, #BICYCLE + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #PEDESTRIAN diff --git a/perception/tracking_object_merger/config/decorative_tracker_merger.param.yaml b/perception/tracking_object_merger/config/decorative_tracker_merger.param.yaml new file mode 100644 index 0000000000000..4a108be657a27 --- /dev/null +++ b/perception/tracking_object_merger/config/decorative_tracker_merger.param.yaml @@ -0,0 +1,26 @@ +# Node parameters +/**: + ros__parameters: + base_link_frame_id: "base_link" + time_sync_threshold: 0.999 + sub_object_timeout_sec: 0.8 + publish_interpolated_sub_objects: true #for debug + + # choose the input sensor type for each object type + # "lidar", "radar", "camera" are available + main_sensor_type: "lidar" + sub_sensor_type: "radar" + + # tracker settings + tracker_state_parameter: + remove_probability_threshold: 0.3 + publish_probability_threshold: 0.5 + default_lidar_existence_probability: 0.7 + default_radar_existence_probability: 0.6 + default_camera_existence_probability: 0.5 + decay_rate: 0.1 + max_dt: 1.0 + + # logging settings + enable_logging: false + log_file_path: "/tmp/decorative_tracker_merger.log" diff --git a/perception/tracking_object_merger/config/decorative_tracker_merger_policy.param.yaml b/perception/tracking_object_merger/config/decorative_tracker_merger_policy.param.yaml new file mode 100644 index 0000000000000..0b98e1b202957 --- /dev/null +++ b/perception/tracking_object_merger/config/decorative_tracker_merger_policy.param.yaml @@ -0,0 +1,16 @@ +# Merger policy for decorative tracker merger node +# decorative tracker merger works by merging the sub-object trackers into a main object tracker result +# There are 3 merger policy: +# 1. "skip": skip the sub-object tracker result +# 2. "overwrite": overwrite the main object tracker result with sub-object tracker result +# 3. "fusion": merge the main object tracker result with sub-object tracker result by using covariance based fusion +/**: + ros__parameters: + kinematics_to_be_merged: "velocity" # currently only support "velocity" + + # choose the merger policy for each object type + # : "skip", "overwrite", "fusion" + kinematics_merge_policy: "overwrite" + classification_merge_policy: "skip" + existence_prob_merge_policy: "skip" + shape_merge_policy: "skip" diff --git a/perception/tracking_object_merger/image/decorative_tracker_merger.drawio.svg b/perception/tracking_object_merger/image/decorative_tracker_merger.drawio.svg new file mode 100644 index 0000000000000..3e35fa4d1c338 --- /dev/null +++ b/perception/tracking_object_merger/image/decorative_tracker_merger.drawio.svg @@ -0,0 +1,345 @@ + + + + + + + + + + + + + + + + main_object + + + + + main_object + + + + + + + + + + decorative_tracker_merger_node + + + + + decorative_tracker_merger_node + + + + + + + + + + + + sub_object + + + + + sub_object + + + + + + + + + + + + time-sync + + (delay compensation) + + + + + time-sync... + + + + + + + + + + + + buffer + + + + + buffer + + + + + + + + + + main obj + + timestamp + + + + + main obj... + + + + + + + + + + + + + + inner + + tracklet + + + + + inner... + + + + + + + + + + + not + + found + + + + + not... + + + + + + + + + + + found + + + + + found + + + + + + + + + + association + + + + + association + + + + + + + + + + + + update + + tracklet + + + + + update... + + + + + + + + + + + + add new + + tracklet + + + + + add new... + + + + + + + + + + + converted + + tracked object + + + + + converted... + + + + + + + + + + + + tracklet management + + (publisher) + + + + + tracklet management... + + + + + + + + + + merged object + + + + + merged object + + + + + + + + + msg: TrackedObjects + + + + msg: TrackedObjects + + + + + + + + + + remove when low existence + + probability objects + + + + + remove when low existence... + + + + + + + Text is not SVG - cannot display + + + diff --git a/perception/tracking_object_merger/image/time_sync.drawio.svg b/perception/tracking_object_merger/image/time_sync.drawio.svg new file mode 100644 index 0000000000000..dad2eb1a7fd27 --- /dev/null +++ b/perception/tracking_object_merger/image/time_sync.drawio.svg @@ -0,0 +1,223 @@ + + + + + + + + + + + + + + time + + + + time + + + + + + + + + + main object topic + + + + main object topic + + + + + + + + + sub object topic + + + + sub object topic + + + + + + + + + + + + + + + + + + + + + + + publish timing + + + + publish timing + + + + + + + + + + interpolated sub object + + + + + interpolated sub object + + + + + + + + + + + raw main/sub object + + + + raw main/sub object + + + + + + + + + + + + + time sync threshold + + + + + time sync threshold + + + + + + + + + + time synchronize prediction pattern + + + + + time synchronize prediction pattern + + + + + + + + + + + + + 1. forward + + + + 2. backward + + + + 3. interpolation + + + + + + 1. forward... + + + + + + + + + + + + + + + + + + + + + legend + + + + legend + + + + + + + + + + disabled + + now + + + + + disabled... + + + + + + + Text is not SVG - cannot display + + + diff --git a/perception/tracking_object_merger/image/tracklet_management.drawio.svg b/perception/tracking_object_merger/image/tracklet_management.drawio.svg new file mode 100644 index 0000000000000..4f2a00dfd4dfd --- /dev/null +++ b/perception/tracking_object_merger/image/tracklet_management.drawio.svg @@ -0,0 +1,216 @@ + + + + + + + + + + + 0.0 + + + + 0.0 + + + + + + + + + 1.0 + + + + 1.0 + + + + + + + + + Existence Probability + + + + Existence Probability + + + + + + + + + + remove threshold + + + + remove threshold + + + + + + + + + can publish threshold + + + + can publish threshold + + + + + + + + + + + + + decay when + + not observed + + + + + decay when... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + init or update probability + + from sensor + + + + + init or update probabilit... + + + + + + + Text is not SVG - cannot display + + + diff --git a/perception/tracking_object_merger/include/tracking_object_merger/data_association/data_association.hpp b/perception/tracking_object_merger/include/tracking_object_merger/data_association/data_association.hpp new file mode 100644 index 0000000000000..05fc9f6caccb5 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/data_association/data_association.hpp @@ -0,0 +1,73 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +// Author: v1.0 Yukihiro Saito +// + +#ifndef TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__DATA_ASSOCIATION_HPP_ +#define TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__DATA_ASSOCIATION_HPP_ + +// #include // for debug json library + +#include +#include +#include +#include + +#define EIGEN_MPL2_ONLY +#include "tracking_object_merger/data_association/solver/gnn_solver.hpp" +#include "tracking_object_merger/utils/tracker_state.hpp" + +#include +#include + +#include +#include + +#include + +class DataAssociation +{ +private: + Eigen::MatrixXi can_assign_matrix_; + Eigen::MatrixXd max_dist_matrix_; + Eigen::MatrixXd max_rad_matrix_; + Eigen::MatrixXd min_iou_matrix_; + Eigen::MatrixXd max_velocity_diff_matrix_; + const double score_threshold_; + std::unique_ptr gnn_solver_ptr_; + +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DataAssociation( + std::vector can_assign_vector, std::vector max_dist_vector, + std::vector max_rad_vector, std::vector min_iou_vector, + std::vector max_velocity_diff_vector); + void assign( + const Eigen::MatrixXd & src, std::unordered_map & direct_assignment, + std::unordered_map & reverse_assignment); + Eigen::MatrixXd calcScoreMatrix( + const autoware_auto_perception_msgs::msg::TrackedObjects & objects0, + const autoware_auto_perception_msgs::msg::TrackedObjects & objects1); + Eigen::MatrixXd calcScoreMatrix( + const autoware_auto_perception_msgs::msg::TrackedObjects & objects0, + const std::vector & trackers); + double calcScoreBetweenObjects( + const autoware_auto_perception_msgs::msg::TrackedObject & object0, + const autoware_auto_perception_msgs::msg::TrackedObject & object1) const; + virtual ~DataAssociation() {} +}; + +#endif // TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__DATA_ASSOCIATION_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver.hpp b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver.hpp new file mode 100644 index 0000000000000..31240848f1977 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver.hpp @@ -0,0 +1,22 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_HPP_ +#define TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_HPP_ + +#include "tracking_object_merger/data_association/solver/gnn_solver_interface.hpp" +#include "tracking_object_merger/data_association/solver/mu_successive_shortest_path.hpp" +#include "tracking_object_merger/data_association/solver/successive_shortest_path.hpp" + +#endif // TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver_interface.hpp b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver_interface.hpp new file mode 100644 index 0000000000000..e915b5d2a9e7b --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/gnn_solver_interface.hpp @@ -0,0 +1,35 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_INTERFACE_HPP_ +#define TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_INTERFACE_HPP_ + +#include +#include + +namespace gnn_solver +{ +class GnnSolverInterface +{ +public: + GnnSolverInterface() = default; + virtual ~GnnSolverInterface() = default; + + virtual void maximizeLinearAssignment( + const std::vector> & cost, std::unordered_map * direct_assignment, + std::unordered_map * reverse_assignment) = 0; +}; +} // namespace gnn_solver + +#endif // TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__GNN_SOLVER_INTERFACE_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/mu_successive_shortest_path.hpp b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/mu_successive_shortest_path.hpp new file mode 100644 index 0000000000000..5c0ebc04fdde3 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/mu_successive_shortest_path.hpp @@ -0,0 +1,37 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__MU_SUCCESSIVE_SHORTEST_PATH_HPP_ +#define TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__MU_SUCCESSIVE_SHORTEST_PATH_HPP_ + +#include "tracking_object_merger/data_association/solver/gnn_solver_interface.hpp" + +#include +#include + +namespace gnn_solver +{ +class MuSSP : public GnnSolverInterface +{ +public: + MuSSP() = default; + ~MuSSP() = default; + + void maximizeLinearAssignment( + const std::vector> & cost, std::unordered_map * direct_assignment, + std::unordered_map * reverse_assignment) override; +}; +} // namespace gnn_solver + +#endif // TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__MU_SUCCESSIVE_SHORTEST_PATH_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/successive_shortest_path.hpp b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/successive_shortest_path.hpp new file mode 100644 index 0000000000000..47a737cf58298 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/data_association/solver/successive_shortest_path.hpp @@ -0,0 +1,37 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__SUCCESSIVE_SHORTEST_PATH_HPP_ +#define TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__SUCCESSIVE_SHORTEST_PATH_HPP_ + +#include "tracking_object_merger/data_association/solver/gnn_solver_interface.hpp" + +#include +#include + +namespace gnn_solver +{ +class SSP : public GnnSolverInterface +{ +public: + SSP() = default; + ~SSP() = default; + + void maximizeLinearAssignment( + const std::vector> & cost, std::unordered_map * direct_assignment, + std::unordered_map * reverse_assignment) override; +}; +} // namespace gnn_solver + +#endif // TRACKING_OBJECT_MERGER__DATA_ASSOCIATION__SOLVER__SUCCESSIVE_SHORTEST_PATH_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/decorative_tracker_merger.hpp b/perception/tracking_object_merger/include/tracking_object_merger/decorative_tracker_merger.hpp new file mode 100644 index 0000000000000..0509fb2a07dc5 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/decorative_tracker_merger.hpp @@ -0,0 +1,134 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__DECORATIVE_TRACKER_MERGER_HPP_ +#define TRACKING_OBJECT_MERGER__DECORATIVE_TRACKER_MERGER_HPP_ + +#include "tracking_object_merger/data_association/data_association.hpp" +#include "tracking_object_merger/utils/tracker_state.hpp" +#include "tracking_object_merger/utils/utils.hpp" + +#include + +#include +#include + +#ifdef ROS_DISTRO_GALACTIC +#include +#else +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace tracking_object_merger +{ + +class DecorativeTrackerMergerNode : public rclcpp::Node +{ +public: + explicit DecorativeTrackerMergerNode(const rclcpp::NodeOptions & node_options); + enum class PriorityMode : int { Object0 = 0, Object1 = 1, Confidence = 2 }; + +private: + void set3dDataAssociation( + const std::string & prefix, + std::unordered_map> & data_association_map); + + void mainObjectsCallback( + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg); + void subObjectsCallback( + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg); + + bool decorativeMerger( + const MEASUREMENT_STATE input_index, + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg); + autoware_auto_perception_msgs::msg::TrackedObjects predictFutureState( + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg, + const std_msgs::msg::Header & header); + std::optional interpolateObjectState( + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg1, + const autoware_auto_perception_msgs::msg::TrackedObjects::ConstSharedPtr & input_object_msg2, + const std_msgs::msg::Header & header); + TrackedObjects getTrackedObjects(const std_msgs::msg::Header & header); + TrackerState createNewTracker( + const MEASUREMENT_STATE input_index, rclcpp::Time current_time, + const autoware_auto_perception_msgs::msg::TrackedObject & input_object); + +private: + tf2_ros::Buffer tf_buffer_; + tf2_ros::TransformListener tf_listener_; + rclcpp::Publisher::SharedPtr + merged_object_pub_; + rclcpp::Subscription::SharedPtr + sub_main_objects_; + rclcpp::Subscription::SharedPtr + sub_sub_objects_; + // debug object publisher + rclcpp::Publisher::SharedPtr + debug_object_pub_; + bool publish_interpolated_sub_objects_; + + /* handle objects */ + std::unordered_map> + input_merger_map_; + MEASUREMENT_STATE main_sensor_type_; + MEASUREMENT_STATE sub_sensor_type_; + std::vector inner_tracker_objects_; + std::unordered_map> data_association_map_; + std::string target_frame_; + std::string base_link_frame_id_; + // buffer to save the sub objects + std::vector + sub_objects_buffer_; + double sub_object_timeout_sec_; + double time_sync_threshold_; + + // tracker default settings + TrackerStateParameter tracker_state_parameter_; + + // merge policy (currently not used) + struct + { + std::string kinematics_to_be_merged; + merger_utils::MergePolicy kinematics_merge_policy; + merger_utils::MergePolicy classification_merge_policy; + merger_utils::MergePolicy existence_prob_merge_policy; + merger_utils::MergePolicy shape_merge_policy; + } merger_policy_params_; + + std::map merger_policy_map_ = { + {"skip", merger_utils::MergePolicy::SKIP}, + {"overwrite", merger_utils::MergePolicy::OVERWRITE}, + {"fusion", merger_utils::MergePolicy::FUSION}}; + + // debug parameters + struct logging + { + bool enable = false; + std::string path; + } logging_; +}; + +} // namespace tracking_object_merger + +#endif // TRACKING_OBJECT_MERGER__DECORATIVE_TRACKER_MERGER_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/utils/tracker_state.hpp b/perception/tracking_object_merger/include/tracking_object_merger/utils/tracker_state.hpp new file mode 100644 index 0000000000000..6bedf7db8d727 --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/utils/tracker_state.hpp @@ -0,0 +1,148 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRACKING_OBJECT_MERGER__UTILS__TRACKER_STATE_HPP_ +#define TRACKING_OBJECT_MERGER__UTILS__TRACKER_STATE_HPP_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +using autoware_auto_perception_msgs::msg::TrackedObject; +using autoware_auto_perception_msgs::msg::TrackedObjects; + +// Enum classes to distinguish input sources +enum class MEASUREMENT_STATE : int { + NONE = 0, // 0000 + LIDAR = 1, // 0001 + RADAR = 2, // 0010 + CAMERA = 4, // 0100 + LIDAR_RADAR = 3, // 0011 + LIDAR_CAMERA = 5, // 0101 + RADAR_CAMERA = 6, // 0110 + LIDAR_RADAR_CAMERA = 7, // 0111 +}; + +// Operator overloading for MEASUREMENT_STATE +// 1. operator| for MEASUREMENT_STATE +// e.g. MEASUREMENT_STATE::LIDAR | MEASUREMENT_STATE::RADAR == MEASUREMENT_STATE::LIDAR_RADAR +// 2. operator& for MEASUREMENT_STATE +// e.g. MEASUREMENT_STATE::LIDAR_RADAR & MEASUREMENT_STATE::LIDAR == true +// e.g. MEASUREMENT_STATE::LIDAR_RADAR & MEASUREMENT_STATE::CAMERA == false +inline MEASUREMENT_STATE operator|(MEASUREMENT_STATE lhs, MEASUREMENT_STATE rhs) +{ + return static_cast(static_cast(lhs) | static_cast(rhs)); +} +inline bool operator&(MEASUREMENT_STATE combined, MEASUREMENT_STATE target) +{ + return (static_cast(combined) & static_cast(target)) != 0; +} + +// Struct to handle tracker state public parameter +struct TrackerStateParameter +{ + double publish_probability_threshold = 0.5; + double remove_probability_threshold = 0.3; + double default_lidar_existence_probability = 0.8; + double default_radar_existence_probability = 0.7; + double default_camera_existence_probability = 0.6; + double decay_rate = 0.1; + double max_dt = 2.0; +}; + +// Class to handle tracker state +class TrackerState +{ +private: + /* data */ + TrackedObject tracked_object_; + rclcpp::Time last_update_time_; + // Eigen::MatrixXf state_matrix_; + // Eigen::MatrixXf covariance_matrix_; + + // timer handle + std::unordered_map last_updated_time_map_; + double max_dt_ = 2.0; + +public: + TrackerState( + const MEASUREMENT_STATE input, const rclcpp::Time & last_update_time, + const TrackedObject & tracked_object); + TrackerState( + const MEASUREMENT_STATE input_source, const rclcpp::Time & last_update_time, + const autoware_auto_perception_msgs::msg::TrackedObject & tracked_object, + const unique_identifier_msgs::msg::UUID & uuid); + + ~TrackerState(); + +public: + void setParameter(const TrackerStateParameter & parameter); + bool predict(const rclcpp::Time & time); + bool predict(const double dt, std::function func); + MEASUREMENT_STATE getCurrentMeasurementState(const rclcpp::Time & current_time) const; + bool updateState( + const MEASUREMENT_STATE input, const rclcpp::Time & current_time, + const TrackedObject & tracked_object); + void updateWithLIDAR(const rclcpp::Time & current_time, const TrackedObject & tracked_object); + void updateWithRADAR(const rclcpp::Time & current_time, const TrackedObject & tracked_object); + void updateWithCAMERA(const rclcpp::Time & current_time, const TrackedObject & tracked_object); + void updateWithoutSensor(const rclcpp::Time & current_time); + bool update(const MEASUREMENT_STATE input, const TrackedObject & tracked_object); + bool updateWithFunction( + const MEASUREMENT_STATE input, const rclcpp::Time & current_time, + const TrackedObject & tracked_object, + std::function update_func); + // const functions + bool hasUUID(const MEASUREMENT_STATE input, const unique_identifier_msgs::msg::UUID & uuid) const; + bool isValid() const; + bool canPublish() const; + TrackedObject getObject() const; + +public: + // handle uuid + unique_identifier_msgs::msg::UUID const_uuid_; + // each sensor input to uuid map + std::unordered_map> + input_uuid_map_; + MEASUREMENT_STATE measurement_state_; + + // following lifecycle control parameter is overwritten by TrackerStateParameter + std::unordered_map default_existence_probability_map_ = { + {MEASUREMENT_STATE::LIDAR, 0.8}, + {MEASUREMENT_STATE::RADAR, 0.7}, + {MEASUREMENT_STATE::CAMERA, 0.6}, + }; + double existence_probability_ = 0.0; + double publish_probability_threshold_ = 0.5; + double remove_probability_threshold_ = 0.3; + double decay_rate_ = 0.1; +}; + +TrackedObjects getTrackedObjectsFromTrackerStates( + std::vector & tracker_states, const rclcpp::Time & time); + +#endif // TRACKING_OBJECT_MERGER__UTILS__TRACKER_STATE_HPP_ diff --git a/perception/tracking_object_merger/include/tracking_object_merger/utils/utils.hpp b/perception/tracking_object_merger/include/tracking_object_merger/utils/utils.hpp new file mode 100644 index 0000000000000..bc6dfef9b80bf --- /dev/null +++ b/perception/tracking_object_merger/include/tracking_object_merger/utils/utils.hpp @@ -0,0 +1,133 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#ifndef TRACKING_OBJECT_MERGER__UTILS__UTILS_HPP_ +#define TRACKING_OBJECT_MERGER__UTILS__UTILS_HPP_ + +// #include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +using autoware_auto_perception_msgs::msg::TrackedObject; +using autoware_auto_perception_msgs::msg::TrackedObjects; +namespace utils +{ +enum MSG_COV_IDX { + X_X = 0, + X_Y = 1, + X_Z = 2, + X_ROLL = 3, + X_PITCH = 4, + X_YAW = 5, + Y_X = 6, + Y_Y = 7, + Y_Z = 8, + Y_ROLL = 9, + Y_PITCH = 10, + Y_YAW = 11, + Z_X = 12, + Z_Y = 13, + Z_Z = 14, + Z_ROLL = 15, + Z_PITCH = 16, + Z_YAW = 17, + ROLL_X = 18, + ROLL_Y = 19, + ROLL_Z = 20, + ROLL_ROLL = 21, + ROLL_PITCH = 22, + ROLL_YAW = 23, + PITCH_X = 24, + PITCH_Y = 25, + PITCH_Z = 26, + PITCH_ROLL = 27, + PITCH_PITCH = 28, + PITCH_YAW = 29, + YAW_X = 30, + YAW_Y = 31, + YAW_Z = 32, + YAW_ROLL = 33, + YAW_PITCH = 34, + YAW_YAW = 35 +}; + +// linear interpolation for tracked objects +TrackedObject linearInterpolationForTrackedObject( + const TrackedObject & obj1, const TrackedObject & obj2); + +// predict tracked objects +TrackedObject predictPastOrFutureTrackedObject(const TrackedObject & obj, const double dt); + +TrackedObjects predictPastOrFutureTrackedObjects( + const TrackedObjects & obj, const std_msgs::msg::Header & header); + +// predict tracked objects +TrackedObjects interpolateTrackedObjects( + const TrackedObjects & objects1, const TrackedObjects & objects2, std_msgs::msg::Header header); + +} // namespace utils + +namespace merger_utils +{ +// merge policy +enum MergePolicy : int { SKIP = 0, OVERWRITE = 1, FUSION = 2 }; + +// object kinematics velocity merger +autoware_auto_perception_msgs::msg::TrackedObjectKinematics objectKinematicsVXMerger( + const TrackedObject & main_obj, const TrackedObject & sub_obj, const MergePolicy policy); + +// object classification merger +TrackedObject objectClassificationMerger( + const TrackedObject & main_obj, const TrackedObject & sub_obj, const MergePolicy policy); + +// probability merger +float probabilityMerger(const float main_prob, const float sub_prob, const MergePolicy policy); + +// shape merger +autoware_auto_perception_msgs::msg::Shape shapeMerger( + const TrackedObject & main_obj, const TrackedObject & sub_obj, const MergePolicy policy); + +// update tracked object +void updateExceptVelocity(TrackedObject & main_obj, const TrackedObject & sub_obj); + +void updateOnlyObjectVelocity(TrackedObject & main_obj, const TrackedObject & sub_obj); + +void updateOnlyClassification(TrackedObject & main_obj, const TrackedObject & sub_obj); + +void updateWholeTrackedObject(TrackedObject & main_obj, const TrackedObject & sub_obj); + +} // namespace merger_utils + +#endif // TRACKING_OBJECT_MERGER__UTILS__UTILS_HPP_ diff --git a/perception/tracking_object_merger/launch/decorative_tracker_merger.launch.xml b/perception/tracking_object_merger/launch/decorative_tracker_merger.launch.xml new file mode 100644 index 0000000000000..5affbe474e8ae --- /dev/null +++ b/perception/tracking_object_merger/launch/decorative_tracker_merger.launch.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/perception/tracking_object_merger/package.xml b/perception/tracking_object_merger/package.xml new file mode 100644 index 0000000000000..d74a8449b20e6 --- /dev/null +++ b/perception/tracking_object_merger/package.xml @@ -0,0 +1,32 @@ + + + + tracking_object_merger + 0.0.0 + merge tracking object + Yukihiro Saito + Yoshi Ri + Apache License 2.0 + + ament_cmake_auto + autoware_cmake + eigen3_cmake_module + + autoware_auto_perception_msgs + eigen + mussp + object_recognition_utils + rclcpp + rclcpp_components + tf2 + tf2_ros + tier4_autoware_utils + tier4_perception_msgs + + ament_lint_auto + autoware_lint_common + + + ament_cmake + + diff --git a/perception/tracking_object_merger/src/data_association/data_association.cpp b/perception/tracking_object_merger/src/data_association/data_association.cpp new file mode 100644 index 0000000000000..e7ab6077250f8 --- /dev/null +++ b/perception/tracking_object_merger/src/data_association/data_association.cpp @@ -0,0 +1,238 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tracking_object_merger/data_association/data_association.hpp" + +#include "object_recognition_utils/object_recognition_utils.hpp" +#include "tier4_autoware_utils/geometry/geometry.hpp" +#include "tracking_object_merger/data_association/solver/gnn_solver.hpp" +#include "tracking_object_merger/utils/utils.hpp" + +#include +#include +#include +#include +#include +#include +namespace +{ +double getFormedYawAngle( + const geometry_msgs::msg::Quaternion & quat0, const geometry_msgs::msg::Quaternion & quat1, + const bool distinguish_front_or_back = true) +{ + const double yaw0 = tier4_autoware_utils::normalizeRadian(tf2::getYaw(quat0)); + const double yaw1 = tier4_autoware_utils::normalizeRadian(tf2::getYaw(quat1)); + const double angle_range = distinguish_front_or_back ? M_PI : M_PI_2; + const double angle_step = distinguish_front_or_back ? 2.0 * M_PI : M_PI; + // Fixed yaw0 to be in the range of +-90 or 180 degrees of yaw1 + double fixed_yaw0 = yaw0; + while (angle_range <= yaw1 - fixed_yaw0) { + fixed_yaw0 = fixed_yaw0 + angle_step; + } + while (angle_range <= fixed_yaw0 - yaw1) { + fixed_yaw0 = fixed_yaw0 - angle_step; + } + return std::fabs(fixed_yaw0 - yaw1); +} +} // namespace + +DataAssociation::DataAssociation( + std::vector can_assign_vector, std::vector max_dist_vector, + std::vector max_rad_vector, std::vector min_iou_vector, + std::vector max_velocity_diff_vector) +: score_threshold_(0.01) +{ + { + const int assign_label_num = static_cast(std::sqrt(can_assign_vector.size())); + Eigen::Map can_assign_matrix_tmp( + can_assign_vector.data(), assign_label_num, assign_label_num); + can_assign_matrix_ = can_assign_matrix_tmp.transpose(); + } + { + const int max_dist_label_num = static_cast(std::sqrt(max_dist_vector.size())); + Eigen::Map max_dist_matrix_tmp( + max_dist_vector.data(), max_dist_label_num, max_dist_label_num); + max_dist_matrix_ = max_dist_matrix_tmp.transpose(); + } + { + const int max_rad_label_num = static_cast(std::sqrt(max_rad_vector.size())); + Eigen::Map max_rad_matrix_tmp( + max_rad_vector.data(), max_rad_label_num, max_rad_label_num); + max_rad_matrix_ = max_rad_matrix_tmp.transpose(); + } + { + const int min_iou_label_num = static_cast(std::sqrt(min_iou_vector.size())); + Eigen::Map min_iou_matrix_tmp( + min_iou_vector.data(), min_iou_label_num, min_iou_label_num); + min_iou_matrix_ = min_iou_matrix_tmp.transpose(); + } + { + const int max_velocity_diff_label_num = + static_cast(std::sqrt(max_velocity_diff_vector.size())); + Eigen::Map max_velocity_diff_matrix_tmp( + max_velocity_diff_vector.data(), max_velocity_diff_label_num, max_velocity_diff_label_num); + max_velocity_diff_matrix_ = max_velocity_diff_matrix_tmp.transpose(); + } + + gnn_solver_ptr_ = std::make_unique(); +} + +void DataAssociation::assign( + const Eigen::MatrixXd & src, std::unordered_map & direct_assignment, + std::unordered_map & reverse_assignment) +{ + std::vector