From 2ca276fdb0f3d8188131318b4ee295d7f6784d77 Mon Sep 17 00:00:00 2001 From: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Date: Mon, 24 Jun 2024 10:38:28 +0900 Subject: [PATCH] feat(tensorrt yolox): inference and publish mask image from yolox model with semantic segmentation header (#5553) * add segmentation model Signed-off-by: badai-nguyen fix: add multitask for segment Signed-off-by: badai-nguyen * fix: publish mask Signed-off-by: badai-nguyen * feat: publish colorized mask Signed-off-by: badai-nguyen * fix: resize yolox mask Signed-off-by: badai-nguyen * fix: add memory allocate operations Signed-off-by: Manato HIRABAYASHI * refactor: remove underscore for a local variable Signed-off-by: Manato HIRABAYASHI * chore: add condition to check the number of subscriber for newly added topics Signed-off-by: Manato HIRABAYASHI * chore: pre-commit Signed-off-by: badai-nguyen * fix: add roi overlapping segment Signed-off-by: badai-nguyen * fix: roi overlay segment Signed-off-by: badai-nguyen * chore: refactor Signed-off-by: badai-nguyen * docs: update readme Signed-off-by: badai-nguyen * fix: update model name Signed-off-by: badai-nguyen * fix: add utils into tensorrt_yolox Signed-off-by: badai-nguyen * fix: launch file Signed-off-by: badai-nguyen * chore: remove unnecessary depend Signed-off-by: badai-nguyen * chore: fix yaml file Signed-off-by: badai-nguyen * chore: remove duplicated param in launch Signed-off-by: badai-nguyen * fix: semantic class Signed-off-by: badai-nguyen * docs: update readme Signed-off-by: badai-nguyen * chore: update default param Signed-off-by: badai-nguyen * fix: add processing time topic Signed-off-by: badai-nguyen * chore: typo Signed-off-by: badai-nguyen * style(pre-commit): autofix * chore: fix cspell error Signed-off-by: badai-nguyen * fix: yolox default param Signed-off-by: badai-nguyen * chore: rename debug topics Signed-off-by: badai-nguyen * chore: rename debug topics Signed-off-by: badai-nguyen * docs: update model description Signed-off-by: badai-nguyen * style(pre-commit): autofix * fix: launch Signed-off-by: badai-nguyen * refactor: unpublish mask for single task Signed-off-by: badai-nguyen * Update perception/tensorrt_yolox/src/tensorrt_yolox.cpp Co-authored-by: Manato Hirabayashi <3022416+manato@users.noreply.github.com> * Update perception/tensorrt_yolox/src/tensorrt_yolox.cpp Co-authored-by: Manato Hirabayashi <3022416+manato@users.noreply.github.com> * Update perception/tensorrt_yolox/src/tensorrt_yolox.cpp Co-authored-by: Manato Hirabayashi <3022416+manato@users.noreply.github.com> * style(pre-commit): autofix * docs: update reamde Signed-off-by: badai-nguyen * style(pre-commit): autofix * fix: skip mask size as yolox output Signed-off-by: badai-nguyen --------- Signed-off-by: badai-nguyen Signed-off-by: Manato HIRABAYASHI Co-authored-by: Manato HIRABAYASHI Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Manato Hirabayashi <3022416+manato@users.noreply.github.com> --- .../tensorrt_common/tensorrt_common.hpp | 4 + perception/tensorrt_yolox/README.md | 84 ++++-- .../config/yolox_s_plus_opt.param.yaml | 22 ++ .../include/tensorrt_yolox/preprocess.hpp | 17 +- .../include/tensorrt_yolox/tensorrt_yolox.hpp | 67 ++++- .../tensorrt_yolox/tensorrt_yolox_node.hpp | 47 ++- .../launch/yolox_s_plus_opt.launch.xml | 12 +- perception/tensorrt_yolox/src/preprocess.cu | 35 +++ .../tensorrt_yolox/src/tensorrt_yolox.cpp | 269 +++++++++++++++++- .../src/tensorrt_yolox_node.cpp | 96 ++++++- .../src/yolox_single_image_inference_node.cpp | 4 +- 11 files changed, 614 insertions(+), 43 deletions(-) diff --git a/common/tensorrt_common/include/tensorrt_common/tensorrt_common.hpp b/common/tensorrt_common/include/tensorrt_common/tensorrt_common.hpp index aabaea7ca6339..6691c1fb9e97d 100644 --- a/common/tensorrt_common/include/tensorrt_common/tensorrt_common.hpp +++ b/common/tensorrt_common/include/tensorrt_common/tensorrt_common.hpp @@ -15,7 +15,9 @@ #ifndef TENSORRT_COMMON__TENSORRT_COMMON_HPP_ #define TENSORRT_COMMON__TENSORRT_COMMON_HPP_ +#ifndef YOLOX_STANDALONE #include +#endif #include #include @@ -86,6 +88,7 @@ struct BuildConfig profile_per_layer(profile_per_layer), clip_value(clip_value) { +#ifndef YOLOX_STANDALONE if ( std::find(valid_calib_type.begin(), valid_calib_type.end(), calib_type_str) == valid_calib_type.end()) { @@ -95,6 +98,7 @@ struct BuildConfig << "Default calibration type will be used: MinMax" << std::endl; std::cerr << message.str(); } +#endif } }; diff --git a/perception/tensorrt_yolox/README.md b/perception/tensorrt_yolox/README.md index ca407b1ff6811..af88e73cc04a6 100644 --- a/perception/tensorrt_yolox/README.md +++ b/perception/tensorrt_yolox/README.md @@ -2,13 +2,13 @@ ## Purpose -This package detects target objects e.g., cars, trucks, bicycles, and pedestrians on a image based on [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) model. +This package detects target objects e.g., cars, trucks, bicycles, and pedestrians and segment target objects such as cars, trucks, buses and pedestrian, building, vegetation, road, sidewalk on a image based on [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) model with multi-header structure. ## Inner-workings / Algorithms ### Cite - + Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, Jian Sun, "YOLOX: Exceeding YOLO Series in 2021", arXiv preprint arXiv:2107.08430, 2021 [[ref](https://arxiv.org/abs/2107.08430)] @@ -22,10 +22,12 @@ Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, Jian Sun, "YOLOX: Exceeding YOLO Se ### Output -| Name | Type | Description | -| ------------- | -------------------------------------------------- | -------------------------------------------------- | -| `out/objects` | `tier4_perception_msgs/DetectedObjectsWithFeature` | The detected objects with 2D bounding boxes | -| `out/image` | `sensor_msgs/Image` | The image with 2D bounding boxes for visualization | +| Name | Type | Description | +| ---------------- | -------------------------------------------------- | ------------------------------------------------------------------- | +| `out/objects` | `tier4_perception_msgs/DetectedObjectsWithFeature` | The detected objects with 2D bounding boxes | +| `out/image` | `sensor_msgs/Image` | The image with 2D bounding boxes for visualization | +| `out/mask` | `sensor_msgs/Image` | The semantic segmentation mask | +| `out/color_mask` | `sensor_msgs/Image` | The colorized image of semantic segmentation mask for visualization | ## Parameters @@ -40,20 +42,33 @@ Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, Jian Sun, "YOLOX: Exceeding YOLO Se ### Node Parameters -| Name | Type | Default Value | Description | -| ----------------------------- | ------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model_path` | string | "" | The onnx file name for yolox model | -| `label_path` | string | "" | The label file with label names for detected objects written on it | -| `precision` | string | "fp16" | The inference mode: "fp32", "fp16", "int8" | -| `build_only` | bool | false | shutdown node after TensorRT engine file is built | -| `calibration_algorithm` | string | "MinMax" | Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: Entropy",("Legacy" \| "Percentile"), "MinMax"] | -| `dla_core_id` | int | -1 | If positive ID value is specified, the node assign inference task to the DLA core | -| `quantize_first_layer` | bool | false | If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8 | -| `quantize_last_layer` | bool | false | If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8 | -| `profile_per_layer` | bool | false | If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose. | -| `clip_value` | double | 0.0 | If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration | -| `preprocess_on_gpu` | bool | true | If true, pre-processing is performed on GPU | -| `calibration_image_list_path` | string | "" | Path to a file which contains path to images. Those images will be used for int8 quantization. | +| Name | Type | Default Value | Description | +| -------------------------------------- | ------ | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model_path` | string | "" | The onnx file name for yolox model | +| `model_name` | string | "" | The yolox model name:
"yolox-sPlus-T4-960x960-pseudo-finetune" for detection only, could reduce resource and processing_time
"yolox-sPlus-opt-pseudoV2-T4-960x960-T4-seg16cls" for multi-task including semantic segmentation | +| `label_path` | string | "" | The label file with label names for detected objects written on it | +| `precision` | string | "fp16" | The inference mode: "fp32", "fp16", "int8" | +| `build_only` | bool | false | shutdown node after TensorRT engine file is built | +| `calibration_algorithm` | string | "MinMax" | Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: Entropy",("Legacy" \| "Percentile"), "MinMax"] | +| `dla_core_id` | int | -1 | If positive ID value is specified, the node assign inference task to the DLA core | +| `quantize_first_layer` | bool | false | If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8 | +| `quantize_last_layer` | bool | false | If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8 | +| `profile_per_layer` | bool | false | If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose. | +| `clip_value` | double | 0.0 | If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration | +| `preprocess_on_gpu` | bool | true | If true, pre-processing is performed on GPU | +| `calibration_image_list_path` | string | "" | Path to a file which contains path to images. Those images will be used for int8 quantization. | +| `yolox_s_plus_opt_param_path` | string | "" | Path to parameter file | +| `is_publish_color_mask` | bool | false | If true, publish color mask for result visualization | +| `is_roi_overlap_segment` | bool | true | If true, overlay detected object roi onto semantic segmentation to avoid over-filtering pointcloud especially small size objects | +| `overlap_roi_score_threshold` | float | 0.3 | minimum existence_probability of detected roi considered to replace segmentation | +| `roi_overlay_segment_label.UNKNOWN` | bool | true | If true, unknown objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.CAR` | bool | false | If true, car objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.TRUCK` | bool | false | If true, truck objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.BUS` | bool | false | If true, bus objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.MOTORCYCLE` | bool | true | If true, motorcycle objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.BICYCLE` | bool | true | If true, bicycle objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.PEDESTRIAN` | bool | true | If true, pedestrian objects roi will be overlaid onto sematic segmentation mask. | +| `roi_overlay_segment_label.ANIMAL` | bool | true | If true, animal objects roi will be overlaid onto sematic segmentation mask. | ## Assumptions / Known limits @@ -69,6 +84,27 @@ The label contained in detected 2D bounding boxes (i.e., `out/objects`) will be If other labels (case insensitive) are contained in the file specified via the `label_file` parameter, those are labeled as `UNKNOWN`, while detected rectangles are drawn in the visualization result (`out/image`). +The semantic segmentation mask is a gray image whose each pixel is index of one following class: + +| index | semantic name | +| ----- | ---------------- | +| 0 | road | +| 1 | building | +| 2 | wall | +| 3 | obstacle | +| 4 | traffic_light | +| 5 | traffic_sign | +| 6 | person | +| 7 | vehicle | +| 8 | bike | +| 9 | road | +| 10 | sidewalk | +| 11 | roadPaint | +| 12 | curbstone | +| 13 | crosswalk_others | +| 14 | vegetation | +| 15 | sky | + ## Onnx model A sample model (named `yolox-tiny.onnx`) is downloaded by ansible script on env preparation stage, if not, please, follow [Manual downloading of artifacts](https://github.com/autowarefoundation/autoware/tree/main/ansible/roles/artifacts). @@ -79,11 +115,12 @@ hence these parameters are ignored when users specify ONNX models including this This package accepts both `EfficientNMS_TRT` attached ONNXs and [models published from the official YOLOX repository](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/ONNXRuntime#download-onnx-models) (we referred to them as "plain" models). -In addition to `yolox-tiny.onnx`, a custom model named `yolox-sPlus-opt.onnx` is either available. -This model is based on YOLOX-s and tuned to perform more accurate detection with almost comparable execution speed with `yolox-tiny`. +In addition to `yolox-tiny.onnx`, a custom model named `yolox-sPlus-opt-pseudoV2-T4-960x960-T4-seg16cls` is either available. +This model is multi-header structure model which is based on YOLOX-s and tuned to perform more accurate detection with almost comparable execution speed with `yolox-tiny`. To get better results with this model, users are recommended to use some specific running arguments such as `precision:=int8`, `calibration_algorithm:=Entropy`, `clip_value:=6.0`. Users can refer `launch/yolox_sPlus_opt.launch.xml` to see how this model can be used. +Beside detection result, this model also output image semantic segmentation result for pointcloud filtering purpose. All models are automatically converted to TensorRT format. These converted files will be saved in the same directory as specified ONNX files @@ -146,7 +183,7 @@ Please refer [the official document](https://github.com/Megvii-BaseDetection/YOL ## Label file -A sample label file (named `label.txt`)is also downloaded automatically during env preparation process +A sample label file (named `label.txt`) and semantic segmentation color map file (name `semseg_color_map.csv`) are also downloaded automatically during env preparation process (**NOTE:** This file is incompatible with models that output labels for the COCO dataset (e.g., models from the official YOLOX repository)). This file represents the correspondence between class index (integer outputted from YOLOX network) and @@ -157,3 +194,4 @@ with labels according to the order in this file. - - +- diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml index bc67173442094..57c1b40c44a4d 100644 --- a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml +++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml @@ -1,7 +1,29 @@ +# cspell: ignore semseg /**: ros__parameters: + # refine segmentation mask by overlay roi class + # disable when sematic segmentation accuracy is good enough + is_roi_overlap_segment: true + + # minimum existence_probability of detected roi considered to replace segmentation + overlap_roi_score_threshold: 0.3 + + # publish color mask for result visualization + is_publish_color_mask: false + + roi_overlay_segment_label: + UNKNOWN : true + CAR : false + TRUCK : false + BUS : false + MOTORCYCLE : true + BICYCLE : true + PEDESTRIAN : true + ANIMAL: true + model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx" label_path: "$(var data_path)/tensorrt_yolox/label.txt" + color_map_path: "$(var data_path)/tensorrt_yolox/semseg_color_map.csv" score_threshold: 0.35 nms_threshold: 0.7 precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]. diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp index 3549ae35e70ea..faac6de4e3284 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp @@ -179,6 +179,21 @@ extern void crop_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( extern void multi_scale_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( float * dst, unsigned char * src, int d_w, int d_h, int d_c, Roi * d_roi, int s_w, int s_h, int s_c, int batch, float norm, cudaStream_t stream); -} // namespace tensorrt_yolox +/** + * @brief Argmax on GPU + * @param[out] dst processed image + * @param[in] src probability map + * @param[in] d_w width for output + * @param[in] d_h height for output + * @param[in] s_w width for input + * @param[in] s_h height for input + * @param[in] s_c channel for input + * @param[in] batch batch size + * @param[in] stream cuda stream + */ +extern void argmax_gpu( + unsigned char * dst, float * src, int d_w, int d_h, int s_w, int s_h, int s_c, int batch, + cudaStream_t stream); +} // namespace tensorrt_yolox #endif // TENSORRT_YOLOX__PREPROCESS_HPP_ diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp index c42222a70c96b..d287c8a44d4cf 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp @@ -52,6 +52,13 @@ struct GridAndStride int stride; }; +typedef struct Colormap_ +{ + int id; + std::string name; + std::vector color; +} Colormap; + /** * @class TrtYoloX * @brief TensorRT YOLOX for faster inference @@ -85,7 +92,7 @@ class TrtYoloX const bool use_gpu_preprocess = false, std::string calibration_image_list_file = std::string(), const double norm_factor = 1.0, [[maybe_unused]] const std::string & cache_dir = "", const tensorrt_common::BatchConfig & batch_config = {1, 1, 1}, - const size_t max_workspace_size = (1 << 30)); + const size_t max_workspace_size = (1 << 30), const std::string & color_map_path = ""); /** * @brief Deconstruct TrtYoloX */ @@ -96,7 +103,9 @@ class TrtYoloX * @param[out] objects results for object detection * @param[in] images batched images */ - bool doInference(const std::vector & images, ObjectArrays & objects); + bool doInference( + const std::vector & images, ObjectArrays & objects, std::vector & masks, + std::vector & color_masks); /** * @brief run inference including pre-process and post-process @@ -130,6 +139,22 @@ class TrtYoloX */ void printProfiling(void); + /** + * @brief get num for multitask heads + */ + int getMultitaskNum(void); + + /** + * @brief get colorized masks from index using specific colormap + * @param[out] cmask colorized mask + * @param[in] index multitask index + * @param[in] colormap colormap for masks + */ + void getColorizedMask( + const std::vector & colormap, const cv::Mat & mask, + cv::Mat & colorized_mask); + inline std::vector getColorMap() { return sematic_color_map_; } + private: /** * @brief run preprocess including resizing, letterbox, NHWC2NCHW and toFloat on CPU @@ -177,7 +202,9 @@ class TrtYoloX const cv::Mat & images, int batch_size, ObjectArrays & objects); bool feedforward(const std::vector & images, ObjectArrays & objects); - bool feedforwardAndDecode(const std::vector & images, ObjectArrays & objects); + bool feedforwardAndDecode( + const std::vector & images, ObjectArrays & objects, std::vector & masks, + std::vector & color_masks); void decodeOutputs(float * prob, ObjectArray & objects, float scale, cv::Size & img_size) const; void generateGridsAndStride( const int target_w, const int target_h, const std::vector & strides, @@ -206,6 +233,26 @@ class TrtYoloX void nmsSortedBboxes( const ObjectArray & face_objects, std::vector & picked, float nms_threshold) const; + /** + * @brief get a mask image for a segmentation head + * @param[out] argmax argmax results + * @param[in] prob probability map + * @param[in] dims dimension for probability map + * @param[in] out_w mask width excluding letterbox + * @param[in] out_h mask height excluding letterbox + */ + cv::Mat getMaskImage(float * prob, nvinfer1::Dims dims, int out_w, int out_h); + + /** + * @brief get a mask image on GPUs for a segmentation head + * @param[out] mask image + * @param[in] prob probability map on device + * @param[in] out_w mask width excluding letterbox + * @param[in] out_h mask height excluding letterbox + * @param[in] b current batch + */ + cv::Mat getMaskImageGpu(float * d_prob, nvinfer1::Dims dims, int out_w, int out_h, int b); + std::unique_ptr trt_common_; std::vector input_h_; @@ -249,6 +296,20 @@ class TrtYoloX CudaUniquePtrHost roi_h_; // device pointer for ROI CudaUniquePtr roi_d_; + + // flag whether model has multitasks + int multitask_; + // buff size for segmentation heads + CudaUniquePtr segmentation_out_prob_d_; + CudaUniquePtrHost segmentation_out_prob_h_; + size_t segmentation_out_elem_num_; + size_t segmentation_out_elem_num_per_batch_; + std::vector segmentation_masks_; + // host buffer for argmax postprocessing on GPU + CudaUniquePtrHost argmax_buf_h_; + // device buffer for argmax postprocessing on GPU + CudaUniquePtr argmax_buf_d_; + std::vector sematic_color_map_; }; } // namespace tensorrt_yolox diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox_node.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox_node.hpp index 6044148a932ae..48ffedb98441d 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox_node.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox_node.hpp @@ -15,6 +15,8 @@ #ifndef TENSORRT_YOLOX__TENSORRT_YOLOX_NODE_HPP_ #define TENSORRT_YOLOX__TENSORRT_YOLOX_NODE_HPP_ +#include "object_recognition_utils/object_recognition_utils.hpp" + #include #include #include @@ -25,6 +27,7 @@ #include #include #include +#include #if __has_include() #include @@ -41,10 +44,30 @@ namespace tensorrt_yolox { +// cspell: ignore Semseg using LabelMap = std::map; - +using Label = tier4_perception_msgs::msg::Semantic; class TrtYoloXNode : public rclcpp::Node { + struct RoiOverlaySemsegLabel + { + bool UNKNOWN; + bool CAR; + bool TRUCK; + bool BUS; + bool MOTORCYCLE; + bool BICYCLE; + bool PEDESTRIAN; + bool ANIMAL; + bool isOverlay(const uint8_t label) const + { + return (label == Label::UNKNOWN && UNKNOWN) || (label == Label::CAR && CAR) || + (label == Label::TRUCK && TRUCK) || (label == Label::BUS && BUS) || + (label == Label::ANIMAL && ANIMAL) || (label == Label::MOTORBIKE && MOTORCYCLE) || + (label == Label::BICYCLE && BICYCLE) || (label == Label::PEDESTRIAN && PEDESTRIAN); + }; + }; // struct RoiOverlaySemsegLabel + public: explicit TrtYoloXNode(const rclcpp::NodeOptions & node_options); @@ -53,8 +76,13 @@ class TrtYoloXNode : public rclcpp::Node void onImage(const sensor_msgs::msg::Image::ConstSharedPtr msg); bool readLabelFile(const std::string & label_path); void replaceLabelMap(); - + void overlapSegmentByRoi( + const tensorrt_yolox::Object & object, cv::Mat & mask, const int width, const int height); + int mapRoiLabel2SegLabel(const int32_t roi_label_index); image_transport::Publisher image_pub_; + image_transport::Publisher mask_pub_; + image_transport::Publisher color_mask_pub_; + rclcpp::Publisher::SharedPtr objects_pub_; image_transport::Subscriber image_sub_; @@ -63,6 +91,21 @@ class TrtYoloXNode : public rclcpp::Node LabelMap label_map_; std::unique_ptr trt_yolox_; + bool is_roi_overlap_segment_; + bool is_publish_color_mask_; + float overlap_roi_score_threshold_; + // TODO(badai-nguyen): change to function + std::map remap_roi_to_semantic_ = { + {"UNKNOWN", 3}, // other + {"ANIMAL", 0}, // other + {"PEDESTRIAN", 6}, // person + {"CAR", 7}, // car + {"TRUCK", 7}, // truck + {"BUS", 7}, // bus + {"BICYCLE", 8}, // bicycle + {"MOTORBIKE", 8}, // motorcycle + }; + RoiOverlaySemsegLabel roi_overlay_segment_labels_; std::unique_ptr> stop_watch_ptr_; std::unique_ptr debug_publisher_; }; diff --git a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml index dd15eda2913ce..e4436a0424be5 100644 --- a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml +++ b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml @@ -1,22 +1,30 @@ - + - + + + + + diff --git a/perception/tensorrt_yolox/src/preprocess.cu b/perception/tensorrt_yolox/src/preprocess.cu index 3c3087c536f10..f384de2975aa4 100644 --- a/perception/tensorrt_yolox/src/preprocess.cu +++ b/perception/tensorrt_yolox/src/preprocess.cu @@ -594,4 +594,39 @@ void multi_scale_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( cuda_gridsize(N), block, 0, stream>>>(N, dst, src, d_h, d_w, s_h, s_w, d_roi, norm, batch); } +__global__ void argmax_gpu_kernel( + int N, unsigned char * dst, float * src, int dst_h, int dst_w, int src_c, int src_h, int src_w, + int batch) +{ + // NHWC + int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; + + if (index >= N) return; + int c = 0; + int w = index % dst_w; + int h = index / (dst_w); + + int b; + for (b = 0; b < batch; b++) { + float max_prob = 0.0; + int max_index = 0; + int dst_index = w + dst_w * h + b * dst_h * dst_w; + for (c = 0; c < src_c; c++) { + int src_index = w + src_w * h + c * src_h * src_w + b * src_c * src_h * src_w; + max_index = max_prob < src[src_index] ? c : max_index; + max_prob = max_prob < src[src_index] ? src[src_index] : max_prob; + } + dst[dst_index] = max_index; + } +} + +void argmax_gpu( + unsigned char * dst, float * src, int d_w, int d_h, int s_w, int s_h, int s_c, int batch, + cudaStream_t stream) +{ + int N = d_w * d_h; + argmax_gpu_kernel<<>>( + N, dst, src, d_h, d_w, s_c, s_h, s_w, batch); +} + } // namespace tensorrt_yolox diff --git a/perception/tensorrt_yolox/src/tensorrt_yolox.cpp b/perception/tensorrt_yolox/src/tensorrt_yolox.cpp index 11f68f580acc2..af6cecd2eff7d 100644 --- a/perception/tensorrt_yolox/src/tensorrt_yolox.cpp +++ b/perception/tensorrt_yolox/src/tensorrt_yolox.cpp @@ -15,12 +15,18 @@ #include "cuda_utils/cuda_check_error.hpp" #include "cuda_utils/cuda_unique_ptr.hpp" +#include #include #include #include +#include + #include +#include #include +#include +#include #include #include #include @@ -97,6 +103,53 @@ std::vector loadImageList(const std::string & filename, const std:: } return fileList; } + +std::vector get_seg_colormap(const std::string & filename) +{ + std::vector seg_cmap; + if (filename != "not-specified") { + std::vector color_list = loadListFromTextFile(filename); + for (int i = 0; i < static_cast(color_list.size()); i++) { + if (i == 0) { + // Skip header + continue; + } + std::string colormapString = color_list[i]; + tensorrt_yolox::Colormap cmap; + std::vector rgb; + size_t npos = colormapString.find_first_of(','); + assert(npos != std::string::npos); + std::string substr = colormapString.substr(0, npos); + int id = static_cast(std::stoi(trim(substr))); + colormapString.erase(0, npos + 1); + + npos = colormapString.find_first_of(','); + assert(npos != std::string::npos); + substr = colormapString.substr(0, npos); + std::string name = (trim(substr)); + cmap.id = id; + cmap.name = name; + colormapString.erase(0, npos + 1); + while (!colormapString.empty()) { + size_t npos = colormapString.find_first_of(','); + if (npos != std::string::npos) { + substr = colormapString.substr(0, npos); + unsigned char c = (unsigned char)std::stoi(trim(substr)); + cmap.color.push_back(c); + colormapString.erase(0, npos + 1); + } else { + unsigned char c = (unsigned char)std::stoi(trim(colormapString)); + cmap.color.push_back(c); + break; + } + } + + seg_cmap.push_back(cmap); + } + } + return seg_cmap; +} + } // anonymous namespace namespace tensorrt_yolox @@ -106,12 +159,14 @@ TrtYoloX::TrtYoloX( const float score_threshold, const float nms_threshold, tensorrt_common::BuildConfig build_config, const bool use_gpu_preprocess, std::string calibration_image_list_path, const double norm_factor, [[maybe_unused]] const std::string & cache_dir, const tensorrt_common::BatchConfig & batch_config, - const size_t max_workspace_size) + const size_t max_workspace_size, const std::string & color_map_path) { src_width_ = -1; src_height_ = -1; norm_factor_ = norm_factor; batch_size_ = batch_config[2]; + multitask_ = 0; + sematic_color_map_ = get_seg_colormap(color_map_path); if (precision == "int8") { if (build_config.clip_value <= 0.0) { if (calibration_image_list_path.empty()) { @@ -196,9 +251,18 @@ TrtYoloX::TrtYoloX( needs_output_decode_ = false; break; default: + needs_output_decode_ = true; + // The following three values are considered only if the specified model is plain one + num_class_ = num_class; + score_threshold_ = score_threshold; + nms_threshold_ = nms_threshold; + // Todo : Support multiple segmentation heads + multitask_++; + /* std::stringstream s; s << "\"" << model_path << "\" is unsupported format"; throw std::runtime_error{s.str()}; + */ } // GPU memory allocation @@ -234,10 +298,31 @@ TrtYoloX::TrtYoloX( out_scores_d_ = cuda_utils::make_unique(batch_config[2] * max_detections_); out_classes_d_ = cuda_utils::make_unique(batch_config[2] * max_detections_); } + if (multitask_) { + // Allocate buffer for segmentation + segmentation_out_elem_num_ = 0; + for (int m = 0; m < multitask_; m++) { + const auto output_dims = + trt_common_->getBindingDimensions(m + 2); // 0 : input, 1 : output for detections + size_t out_elem_num = std::accumulate( + output_dims.d + 1, output_dims.d + output_dims.nbDims, 1, std::multiplies()); + out_elem_num = out_elem_num * batch_config[2]; + segmentation_out_elem_num_ += out_elem_num; + } + segmentation_out_elem_num_per_batch_ = + static_cast(segmentation_out_elem_num_ / batch_config[2]); + segmentation_out_prob_d_ = cuda_utils::make_unique(segmentation_out_elem_num_); + segmentation_out_prob_h_ = + cuda_utils::make_unique_host(segmentation_out_elem_num_, cudaHostAllocPortable); + } if (use_gpu_preprocess) { use_gpu_preprocess_ = true; image_buf_h_ = nullptr; image_buf_d_ = nullptr; + if (multitask_) { + argmax_buf_h_ = nullptr; + argmax_buf_d_ = nullptr; + } } else { use_gpu_preprocess_ = false; } @@ -252,6 +337,9 @@ TrtYoloX::~TrtYoloX() if (image_buf_d_) { image_buf_d_.reset(); } + if (argmax_buf_d_) { + argmax_buf_d_.reset(); + } } } @@ -294,6 +382,26 @@ void TrtYoloX::initPreprocessBuffer(int width, int height) width * height * 3 * batch_size_, cudaHostAllocWriteCombined); image_buf_d_ = cuda_utils::make_unique(width * height * 3 * batch_size_); } + if (multitask_) { + size_t argmax_out_elem_num = 0; + for (int m = 0; m < multitask_; m++) { + const auto output_dims = + trt_common_->getBindingDimensions(m + 2); // 0 : input, 1 : output for detections + const float scale = std::min( + output_dims.d[3] / static_cast(width), + output_dims.d[2] / static_cast(height)); + int out_w = static_cast(width * scale); + int out_h = static_cast(height * scale); + // size_t out_elem_num = std::accumulate( + // output_dims.d + 1, output_dims.d + output_dims.nbDims, 1, std::multiplies()); + // out_elem_num = out_elem_num * batch_size_; + size_t out_elem_num = out_w * out_h * batch_size_; + argmax_out_elem_num += out_elem_num; + } + argmax_buf_h_ = + cuda_utils::make_unique_host(argmax_out_elem_num, cudaHostAllocPortable); + argmax_buf_d_ = cuda_utils::make_unique(argmax_out_elem_num); + } } } @@ -321,6 +429,12 @@ void TrtYoloX::preprocessGpu(const std::vector & images) if (image_buf_d_) { image_buf_d_.reset(); } + if (argmax_buf_h_) { + argmax_buf_h_.reset(); + } + if (argmax_buf_d_) { + argmax_buf_d_.reset(); + } } } src_width_ = width; @@ -333,6 +447,7 @@ void TrtYoloX::preprocessGpu(const std::vector & images) const float input_height = static_cast(input_dims.d[2]); const float input_width = static_cast(input_dims.d[3]); int b = 0; + size_t argmax_out_elem_num = 0; for (const auto & image : images) { if (!image_buf_h_) { const float scale = std::min(input_width / image.cols, input_height / image.rows); @@ -348,7 +463,31 @@ void TrtYoloX::preprocessGpu(const std::vector & images) image_buf_h_.get() + index, &image.data[0], image.cols * image.rows * 3 * sizeof(unsigned char)); b++; + + if (multitask_) { + for (int m = 0; m < multitask_; m++) { + const auto output_dims = + trt_common_->getBindingDimensions(m + 2); // 0: input, 1: output for detections + const float scale = std::min( + output_dims.d[3] / static_cast(image.cols), + output_dims.d[2] / static_cast(image.rows)); + int out_w = static_cast(image.cols * scale); + int out_h = static_cast(image.rows * scale); + argmax_out_elem_num += out_w * out_h * batch_size; + } + } + } + + if (multitask_) { + if (!argmax_buf_h_) { + argmax_buf_h_ = + cuda_utils::make_unique_host(argmax_out_elem_num, cudaHostAllocPortable); + } + if (!argmax_buf_d_) { + argmax_buf_d_ = cuda_utils::make_unique(argmax_out_elem_num); + } } + // Copy into device memory CHECK_CUDA_ERROR(cudaMemcpyAsync( image_buf_d_.get(), image_buf_h_.get(), @@ -406,7 +545,9 @@ void TrtYoloX::preprocess(const std::vector & images) // No Need for Sync } -bool TrtYoloX::doInference(const std::vector & images, ObjectArrays & objects) +bool TrtYoloX::doInference( + const std::vector & images, ObjectArrays & objects, std::vector & masks, + [[maybe_unused]] std::vector & color_masks) { if (!trt_common_->isInitialized()) { return false; @@ -419,7 +560,7 @@ bool TrtYoloX::doInference(const std::vector & images, ObjectArrays & o } if (needs_output_decode_) { - return feedforwardAndDecode(images, objects); + return feedforwardAndDecode(images, objects, masks, color_masks); } else { return feedforward(images, objects); } @@ -659,6 +800,8 @@ void TrtYoloX::multiScalePreprocess(const cv::Mat & image, const std::vector & images, ObjectArrays & objects, const std::vector & rois) { + std::vector masks; + std::vector color_masks; if (!trt_common_->isInitialized()) { return false; } @@ -669,7 +812,7 @@ bool TrtYoloX::doInferenceWithRoi( } if (needs_output_decode_) { - return feedforwardAndDecode(images, objects); + return feedforwardAndDecode(images, objects, masks, color_masks); } else { return feedforward(images, objects); } @@ -747,10 +890,14 @@ bool TrtYoloX::feedforward(const std::vector & images, ObjectArrays & o return true; } -bool TrtYoloX::feedforwardAndDecode(const std::vector & images, ObjectArrays & objects) +bool TrtYoloX::feedforwardAndDecode( + const std::vector & images, ObjectArrays & objects, std::vector & out_masks, + [[maybe_unused]] std::vector & color_masks) { std::vector buffers = {input_d_.get(), out_prob_d_.get()}; - + if (multitask_) { + buffers = {input_d_.get(), out_prob_d_.get(), segmentation_out_prob_d_.get()}; + } trt_common_->enqueueV2(buffers.data(), *stream_, nullptr); const auto batch_size = images.size(); @@ -758,6 +905,11 @@ bool TrtYoloX::feedforwardAndDecode(const std::vector & images, ObjectA CHECK_CUDA_ERROR(cudaMemcpyAsync( out_prob_h_.get(), out_prob_d_.get(), sizeof(float) * out_elem_num_, cudaMemcpyDeviceToHost, *stream_)); + if (multitask_ && !use_gpu_preprocess_) { + CHECK_CUDA_ERROR(cudaMemcpyAsync( + segmentation_out_prob_h_.get(), segmentation_out_prob_d_.get(), + sizeof(float) * segmentation_out_elem_num_, cudaMemcpyDeviceToHost, *stream_)); + } cudaStreamSynchronize(*stream_); objects.clear(); @@ -766,7 +918,43 @@ bool TrtYoloX::feedforwardAndDecode(const std::vector & images, ObjectA float * batch_prob = out_prob_h_.get() + (i * out_elem_num_per_batch_); ObjectArray object_array; decodeOutputs(batch_prob, object_array, scales_[i], image_size); + // add refine mask using object objects.emplace_back(object_array); + if (multitask_) { + segmentation_masks_.clear(); + + size_t counter = 0; + int batch = + static_cast(segmentation_out_elem_num_ / segmentation_out_elem_num_per_batch_); + for (int m = 0; m < multitask_; m++) { + const auto output_dims = + trt_common_->getBindingDimensions(m + 2); // 0 : input, 1 : output for detections + size_t out_elem_num = std::accumulate( + output_dims.d + 1, output_dims.d + output_dims.nbDims, 1, std::multiplies()); + out_elem_num = out_elem_num * batch; + const float scale = std::min( + output_dims.d[3] / static_cast(image_size.width), + output_dims.d[2] / static_cast(image_size.height)); + int out_w = static_cast(image_size.width * scale); + int out_h = static_cast(image_size.height * scale); + cv::Mat mask; + if (use_gpu_preprocess_) { + float * d_segmentation_results = + segmentation_out_prob_d_.get() + (i * segmentation_out_elem_num_per_batch_); + mask = getMaskImageGpu(&(d_segmentation_results[counter]), output_dims, out_w, out_h, i); + } else { + float * segmentation_results = + segmentation_out_prob_h_.get() + (i * segmentation_out_elem_num_per_batch_); + mask = getMaskImage(&(segmentation_results[counter]), output_dims, out_w, out_h); + } + segmentation_masks_.emplace_back(std::move(mask)); + counter += out_elem_num; + } + // semantic segmentation was fixed as first task + out_masks.at(i) = segmentation_masks_.at(0); + } else { + continue; + } } return true; } @@ -1036,4 +1224,73 @@ void TrtYoloX::nmsSortedBboxes( } } +cv::Mat TrtYoloX::getMaskImageGpu(float * d_prob, nvinfer1::Dims dims, int out_w, int out_h, int b) +{ + // NCHW + int classes = dims.d[1]; + int height = dims.d[2]; + int width = dims.d[3]; + cv::Mat mask = cv::Mat::zeros(out_h, out_w, CV_8UC1); + int index = b * out_w * out_h; + argmax_gpu( + (unsigned char *)argmax_buf_d_.get() + index, d_prob, out_w, out_h, width, height, classes, 1, + *stream_); + CHECK_CUDA_ERROR(cudaMemcpyAsync( + argmax_buf_h_.get(), argmax_buf_d_.get(), sizeof(unsigned char) * 1 * out_w * out_h, + cudaMemcpyDeviceToHost, *stream_)); + cudaStreamSynchronize(*stream_); + std::memcpy(mask.data, argmax_buf_h_.get() + index, sizeof(unsigned char) * 1 * out_w * out_h); + return mask; +} + +cv::Mat TrtYoloX::getMaskImage(float * prob, nvinfer1::Dims dims, int out_w, int out_h) +{ + // NCHW + int classes = dims.d[1]; + int height = dims.d[2]; + int width = dims.d[3]; + cv::Mat mask = cv::Mat::zeros(out_h, out_w, CV_8UC1); + // argmax + // #pragma omp parallel for + for (int y = 0; y < out_h; y++) { + for (int x = 0; x < out_w; x++) { + float max = 0.0; + int index = 0; + for (int c = 0; c < classes; c++) { + float value = prob[c * height * width + y * width + x]; + if (max < value) { + max = value; + index = c; + } + } + mask.at(y, x) = index; + } + } + return mask; +} + +int TrtYoloX::getMultitaskNum(void) +{ + return multitask_; +} + +void TrtYoloX::getColorizedMask( + const std::vector & colormap, const cv::Mat & mask, cv::Mat & cmask) +{ + int width = mask.cols; + int height = mask.rows; + if ((cmask.cols != mask.cols) || (cmask.rows != mask.rows)) { + throw std::runtime_error("input and output image have difference size."); + return; + } + for (int y = 0; y < height; y++) { + for (int x = 0; x < width; x++) { + unsigned char id = mask.at(y, x); + cmask.at(y, x)[0] = colormap[id].color[2]; + cmask.at(y, x)[1] = colormap[id].color[1]; + cmask.at(y, x)[2] = colormap[id].color[0]; + } + } +} + } // namespace tensorrt_yolox diff --git a/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp b/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp index 4ee18b99e4bc6..c13384918bd9c 100644 --- a/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp +++ b/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp @@ -33,7 +33,7 @@ TrtYoloXNode::TrtYoloXNode(const rclcpp::NodeOptions & node_options) stop_watch_ptr_ = std::make_unique>(); debug_publisher_ = - std::make_unique(this, "tensorrt_yolox"); + std::make_unique(this, this->get_name()); stop_watch_ptr_->tic("cyclic_time"); stop_watch_ptr_->tic("processing_time"); } @@ -96,25 +96,51 @@ TrtYoloXNode::TrtYoloXNode(const rclcpp::NodeOptions & node_options) ("Path to a file which contains path to images." "Those images will be used for int8 quantization.")); + std::string color_map_path = declare_parameter_with_description( + "color_map_path", "", ("Path to a file which contains path to color map.")); if (!readLabelFile(label_path)) { RCLCPP_ERROR(this->get_logger(), "Could not find label file"); rclcpp::shutdown(); } + + is_roi_overlap_segment_ = declare_parameter("is_roi_overlap_segment"); + is_publish_color_mask_ = declare_parameter("is_publish_color_mask"); + overlap_roi_score_threshold_ = declare_parameter("overlap_roi_score_threshold"); + roi_overlay_segment_labels_.UNKNOWN = + declare_parameter("roi_overlay_segment_label.UNKNOWN"); + roi_overlay_segment_labels_.CAR = declare_parameter("roi_overlay_segment_label.CAR"); + roi_overlay_segment_labels_.TRUCK = declare_parameter("roi_overlay_segment_label.TRUCK"); + roi_overlay_segment_labels_.BUS = declare_parameter("roi_overlay_segment_label.BUS"); + roi_overlay_segment_labels_.MOTORCYCLE = + declare_parameter("roi_overlay_segment_label.MOTORCYCLE"); + roi_overlay_segment_labels_.BICYCLE = + declare_parameter("roi_overlay_segment_label.BICYCLE"); + roi_overlay_segment_labels_.PEDESTRIAN = + declare_parameter("roi_overlay_segment_label.PEDESTRIAN"); + roi_overlay_segment_labels_.ANIMAL = declare_parameter("roi_overlay_segment_label.ANIMAL"); replaceLabelMap(); tensorrt_common::BuildConfig build_config( calibration_algorithm, dla_core_id, quantize_first_layer, quantize_last_layer, profile_per_layer, clip_value); + const double norm_factor = 1.0; + const std::string cache_dir = ""; + const tensorrt_common::BatchConfig batch_config{1, 1, 1}; + const size_t max_workspace_size = (1 << 30); + trt_yolox_ = std::make_unique( model_path, precision, label_map_.size(), score_threshold, nms_threshold, build_config, - preprocess_on_gpu, calibration_image_list_path); + preprocess_on_gpu, calibration_image_list_path, norm_factor, cache_dir, batch_config, + max_workspace_size, color_map_path); timer_ = rclcpp::create_timer(this, get_clock(), 100ms, std::bind(&TrtYoloXNode::onConnect, this)); objects_pub_ = this->create_publisher( "~/out/objects", 1); + mask_pub_ = image_transport::create_publisher(this, "~/out/mask"); + color_mask_pub_ = image_transport::create_publisher(this, "~/out/color_mask"); image_pub_ = image_transport::create_publisher(this, "~/out/image"); if (declare_parameter("build_only", false)) { @@ -129,7 +155,8 @@ void TrtYoloXNode::onConnect() if ( objects_pub_->get_subscription_count() == 0 && objects_pub_->get_intra_process_subscription_count() == 0 && - image_pub_.getNumSubscribers() == 0) { + image_pub_.getNumSubscribers() == 0 && mask_pub_.getNumSubscribers() == 0 && + color_mask_pub_.getNumSubscribers() == 0) { image_sub_.shutdown(); } else if (!image_sub_) { image_sub_ = image_transport::create_subscription( @@ -154,10 +181,16 @@ void TrtYoloXNode::onImage(const sensor_msgs::msg::Image::ConstSharedPtr msg) const auto height = in_image_ptr->image.rows; tensorrt_yolox::ObjectArrays objects; - if (!trt_yolox_->doInference({in_image_ptr->image}, objects)) { + std::vector masks = {cv::Mat(cv::Size(height, width), CV_8UC1, cv::Scalar(0))}; + std::vector color_masks = { + cv::Mat(cv::Size(height, width), CV_8UC3, cv::Scalar(0, 0, 0))}; + + if (!trt_yolox_->doInference({in_image_ptr->image}, objects, masks, color_masks)) { RCLCPP_WARN(this->get_logger(), "Fail to inference"); return; } + auto & mask = masks.at(0); + for (const auto & yolox_object : objects.at(0)) { tier4_perception_msgs::msg::DetectedObjectWithFeature object; object.feature.roi.x_offset = yolox_object.x_offset; @@ -177,9 +210,21 @@ void TrtYoloXNode::onImage(const sensor_msgs::msg::Image::ConstSharedPtr msg) cv::rectangle( in_image_ptr->image, cv::Point(left, top), cv::Point(right, bottom), cv::Scalar(0, 0, 255), 3, 8, 0); + // Refine mask: replacing segmentation mask by roi class + // This should remove when the segmentation accuracy is high + if (is_roi_overlap_segment_ && trt_yolox_->getMultitaskNum() > 0) { + overlapSegmentByRoi(yolox_object, mask, width, height); + } + } + // TODO(badai-nguyen): consider to change to 4bits data transfer + if (trt_yolox_->getMultitaskNum() > 0) { + sensor_msgs::msg::Image::SharedPtr out_mask_msg = + cv_bridge::CvImage(std_msgs::msg::Header(), sensor_msgs::image_encodings::MONO8, mask) + .toImageMsg(); + out_mask_msg->header = msg->header; + mask_pub_.publish(out_mask_msg); } image_pub_.publish(in_image_ptr->toImageMsg()); - out_objects.header = msg->header; objects_pub_->publish(out_objects); @@ -198,6 +243,16 @@ void TrtYoloXNode::onImage(const sensor_msgs::msg::Image::ConstSharedPtr msg) debug_publisher_->publish( "debug/pipeline_latency_ms", pipeline_latency_ms); } + + if (is_publish_color_mask_ && trt_yolox_->getMultitaskNum() > 0) { + cv::Mat color_mask = cv::Mat::zeros(mask.rows, mask.cols, CV_8UC3); + trt_yolox_->getColorizedMask(trt_yolox_->getColorMap(), mask, color_mask); + sensor_msgs::msg::Image::SharedPtr output_color_mask_msg = + cv_bridge::CvImage(std_msgs::msg::Header(), sensor_msgs::image_encodings::BGR8, color_mask) + .toImageMsg(); + output_color_mask_msg->header = msg->header; + color_mask_pub_.publish(output_color_mask_msg); + } } bool TrtYoloXNode::readLabelFile(const std::string & label_path) @@ -234,6 +289,37 @@ void TrtYoloXNode::replaceLabelMap() } } +int TrtYoloXNode::mapRoiLabel2SegLabel(const int32_t roi_label_index) +{ + if (roi_overlay_segment_labels_.isOverlay(static_cast(roi_label_index))) { + std::string label = label_map_[roi_label_index]; + + return remap_roi_to_semantic_[label]; + } + return -1; +} + +void TrtYoloXNode::overlapSegmentByRoi( + const tensorrt_yolox::Object & roi_object, cv::Mat & mask, const int orig_width, + const int orig_height) +{ + if (roi_object.score < overlap_roi_score_threshold_) return; + int seg_class_index = mapRoiLabel2SegLabel(roi_object.type); + if (seg_class_index < 0) return; + + const float scale_x = static_cast(mask.cols) / static_cast(orig_width); + const float scale_y = static_cast(mask.rows) / static_cast(orig_height); + const int roi_width = static_cast(roi_object.width * scale_x); + const int roi_height = static_cast(roi_object.height * scale_y); + const int roi_x_offset = static_cast(roi_object.x_offset * scale_x); + const int roi_y_offset = static_cast(roi_object.y_offset * scale_y); + + cv::Mat replace_roi( + cv::Size(roi_width, roi_height), mask.type(), static_cast(seg_class_index)); + replace_roi.copyTo(mask.colRange(roi_x_offset, roi_x_offset + roi_width) + .rowRange(roi_y_offset, roi_y_offset + roi_height)); +} + } // namespace tensorrt_yolox #include "rclcpp_components/register_node_macro.hpp" diff --git a/perception/tensorrt_yolox/src/yolox_single_image_inference_node.cpp b/perception/tensorrt_yolox/src/yolox_single_image_inference_node.cpp index 0657f0096b07e..360f41e470e38 100644 --- a/perception/tensorrt_yolox/src/yolox_single_image_inference_node.cpp +++ b/perception/tensorrt_yolox/src/yolox_single_image_inference_node.cpp @@ -47,7 +47,9 @@ class YoloXSingleImageInferenceNode : public rclcpp::Node auto trt_yolox = std::make_unique(model_path, precision); auto image = cv::imread(image_path); tensorrt_yolox::ObjectArrays objects; - trt_yolox->doInference({image}, objects); + std::vector masks; + std::vector color_masks; + trt_yolox->doInference({image}, objects, masks, color_masks); for (const auto & object : objects[0]) { const auto left = object.x_offset; const auto top = object.y_offset;