diff --git a/perception/tensorrt_yolox/README.md b/perception/tensorrt_yolox/README.md index ca407b1ff6811..659f4dbf0bfa0 100644 --- a/perception/tensorrt_yolox/README.md +++ b/perception/tensorrt_yolox/README.md @@ -29,31 +29,9 @@ Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, Jian Sun, "YOLOX: Exceeding YOLO Se ## Parameters -### Core Parameters - -| Name | Type | Default Value | Description | -| ----------------- | ----- | ------------- | -------------------------------------------------------------------------------------- | -| `score_threshold` | float | 0.3 | If the objectness score is less than this value, the object is ignored in yolox layer. | -| `nms_threshold` | float | 0.7 | The IoU threshold for NMS method | - -**NOTE:** These two parameters are only valid for "plain" model (described later). - -### Node Parameters - -| Name | Type | Default Value | Description | -| ----------------------------- | ------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model_path` | string | "" | The onnx file name for yolox model | -| `label_path` | string | "" | The label file with label names for detected objects written on it | -| `precision` | string | "fp16" | The inference mode: "fp32", "fp16", "int8" | -| `build_only` | bool | false | shutdown node after TensorRT engine file is built | -| `calibration_algorithm` | string | "MinMax" | Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: Entropy",("Legacy" \| "Percentile"), "MinMax"] | -| `dla_core_id` | int | -1 | If positive ID value is specified, the node assign inference task to the DLA core | -| `quantize_first_layer` | bool | false | If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8 | -| `quantize_last_layer` | bool | false | If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8 | -| `profile_per_layer` | bool | false | If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose. | -| `clip_value` | double | 0.0 | If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration | -| `preprocess_on_gpu` | bool | true | If true, pre-processing is performed on GPU | -| `calibration_image_list_path` | string | "" | Path to a file which contains path to images. Those images will be used for int8 quantization. | +{{ json_to_markdown("perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json") }} +{{ json_to_markdown("perception/tensorrt_yolox/schema/yolox_tiny.schema.json") }} + ## Assumptions / Known limits diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml index bc67173442094..e296289860e65 100644 --- a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml +++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml @@ -1,9 +1,9 @@ /**: ros__parameters: - model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx" - label_path: "$(var data_path)/tensorrt_yolox/label.txt" - score_threshold: 0.35 - nms_threshold: 0.7 + model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx" # The onnx file name for YOLOX model. + label_path: "$(var data_path)/tensorrt_yolox/label.txt" # The label file path for YOLOX model. + score_threshold: 0.35 # Objects with a score lower than this value will be ignored. This threshold will be ignored if specified model contains EfficientNMS_TRT module in it. + nms_threshold: 0.7 # Detection results will be ignored if IoU over this value. This threshold will be ignored if specified model contains EfficientNMS_TRT module in it. precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]. calibration_algorithm: "Entropy" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]. dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core. diff --git a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml index e45742a7afb95..051aa22f52d7d 100644 --- a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml +++ b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml @@ -1,9 +1,9 @@ /**: ros__parameters: - model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx" - label_path: "$(var data_path)/tensorrt_yolox/label.txt" - score_threshold: 0.35 - nms_threshold: 0.7 + model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx" # The onnx file name for YOLOX model. + label_path: "$(var data_path)/tensorrt_yolox/label.txt" # The label file path for YOLOX model. + score_threshold: 0.35 # Objects with a score lower than this value will be ignored. This threshold will be ignored if specified model contains EfficientNMS_TRT module in it. + nms_threshold: 0.7 # Detection results will be ignored if IoU over this value. This threshold will be ignored if specified model contains EfficientNMS_TRT module in it. precision: "fp16" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]. calibration_algorithm: "MinMax" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]. dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core. diff --git a/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp b/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp index c1c4c42741cee..2167df104af19 100644 --- a/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp +++ b/perception/tensorrt_yolox/src/tensorrt_yolox_node.cpp @@ -47,54 +47,20 @@ TrtYoloXNode::TrtYoloXNode(const rclcpp::NodeOptions & node_options) return this->declare_parameter(name, default_val, param_desc); }; - std::string model_path = - declare_parameter_with_description("model_path", "", "The onnx file name for YOLOX model"); - std::string label_path = declare_parameter_with_description( - "label_path", "", - "The label file that consists of label name texts for detected object categories"); - std::string precision = declare_parameter_with_description( - "precision", "fp32", - "operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"); - float score_threshold = declare_parameter_with_description( - "score_threshold", 0.3, - ("Objects with a score lower than this value will be ignored. " - "This threshold will be ignored if specified model contains EfficientNMS_TRT module in it")); - float nms_threshold = declare_parameter_with_description( - "nms_threshold", 0.7, - ("Detection results will be ignored if IoU over this value. " - "This threshold will be ignored if specified model contains EfficientNMS_TRT module in it")); - std::string calibration_algorithm = declare_parameter_with_description( - "calibration_algorithm", "MinMax", - ("Calibration algorithm to be used for quantization when precision==int8. " - "Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]")); - int dla_core_id = declare_parameter_with_description( - "dla_core_id", -1, - "If positive ID value is specified, the node assign inference task to the DLA core"); - bool quantize_first_layer = declare_parameter_with_description( - "quantize_first_layer", false, - ("If true, set the operating precision for the first (input) layer to be fp16. " - "This option is valid only when precision==int8")); - bool quantize_last_layer = declare_parameter_with_description( - "quantize_last_layer", false, - ("If true, set the operating precision for the last (output) layer to be fp16. " - "This option is valid only when precision==int8")); - bool profile_per_layer = declare_parameter_with_description( - "profile_per_layer", false, - ("If true, profiler function will be enabled. " - "Since the profile function may affect execution speed, it is recommended " - "to set this flag true only for development purpose.")); - double clip_value = declare_parameter_with_description( - "clip_value", 0.0, - ("If positive value is specified, " - "the value of each layer output will be clipped between [0.0, clip_value]. " - "This option is valid only when precision==int8 and used to manually specify " - "the dynamic range instead of using any calibration")); - bool preprocess_on_gpu = declare_parameter_with_description( - "preprocess_on_gpu", true, "If true, pre-processing is performed on GPU"); - std::string calibration_image_list_path = declare_parameter_with_description( - "calibration_image_list_path", "", - ("Path to a file which contains path to images." - "Those images will be used for int8 quantization.")); + + const std::string model_path = this->declare_parameter("model_path"); + const std::string label_path = this->declare_parameter("label_path"); + const std::string precision = this->declare_parameter("precision"); + const float score_threshold = static_cast(this->declare_parameter("score_threshold")); + const float nms_threshold = static_cast(this->declare_parameter("nms_threshold")); + const std::string calibration_algorithm = this->declare_parameter("calibration_algorithm"); + const int dla_core_id = static_cast(this->declare_parameter("dla_core_id")); + const bool quantize_first_layer = this->declare_parameter("quantize_first_layer"); + const bool quantize_last_layer = this->declare_parameter("quantize_last_layer"); + const bool profile_per_layer = this->declare_parameter("profile_per_layer"); + const double clip_value = static_cast(this->declare_parameter("clip_value")); + const bool preprocess_on_gpu = this->declare_parameter("preprocess_on_gpu"); + const std::string calibration_image_list_path = this->declare_parameter("calibration_image_list_path"); if (!readLabelFile(label_path)) { RCLCPP_ERROR(this->get_logger(), "Could not find label file");