Skip to content

OpenPCDet LiDar Object Detection Node #40

OpenPCDet LiDar Object Detection Node

OpenPCDet LiDar Object Detection Node #40

Triggered via pull request February 9, 2024 18:52
Status Success
Total duration 22s
Artifacts

linting_auto.yml

on: pull_request
Run C++/Python linters
10s
Run C++/Python linters
Fit to window
Zoom out
Zoom in

Annotations

23 errors and 2 warnings
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L1
+from visualization_msgs.msg import Marker, MarkerArray +from pcdet.utils import common_utils +from pcdet.models import build_network, load_data_to_gpu +from pcdet.datasets import DatasetTemplate +from pcdet.config import cfg, cfg_from_yaml_file import argparse import glob from pathlib import Path
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L9
import torch import sys sys.path.append('/home/bolty/OpenPCDet') -from pcdet.config import cfg, cfg_from_yaml_file -from pcdet.datasets import DatasetTemplate -from pcdet.models import build_network, load_data_to_gpu -from pcdet.utils import common_utils -from visualization_msgs.msg import Marker, MarkerArray OPEN3D_FLAG = True
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L33
root_path=Path(args.data_path), ext=args.ext, logger=self.logger ) - self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + self.model = build_network(model_cfg=cfg.MODEL, num_class=len( + cfg.CLASS_NAMES), dataset=self.demo_dataset) self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) self.model.cuda() self.model.eval()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L61
marker.color.g = 0.0 # Green marker.color.b = 0.0 # Blue marker_array.markers.append(marker) - + self.bbox_publisher.publish(marker_array) def process_data(self):
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L74
# Convert and publish data header = self.make_header() points = data_dict['points'][:, :3] # Extract the relevant columns (x, y, z) - point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z + point_cloud_msg = self.create_cloud_xyz32( + header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L86
help='specify the config for demo') parser.add_argument('--data_path', type=str, default='/home/bolty/data/n015-2018-11-21-19-38-26+0800__LIDAR_TOP__1542801000947820.pcd.bin', help='specify the point cloud data file or directory') - parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') - parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') + parser.add_argument( + '--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') + parser.add_argument('--ext', type=str, default='.bin', + help='specify the extension of your point cloud data file') args, unknown = parser.parse_known_args() cfg_from_yaml_file(args.cfg_file, cfg)
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L98
header.stamp = self.get_clock().now().to_msg() header.frame_id = 'lidar_frame' return header - + def create_cloud_xyz32(self, header, points): """ Create a sensor_msgs/PointCloud2 message from an array of points.
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L117
y_angle = np.deg2rad(90) z_angle = np.deg2rad(-90) rotation_matrix_y = np.array([ - [np.cos(y_angle), 0, np.sin(y_angle)], - [0, 1, 0 ], - [-np.sin(y_angle), 0, np.cos(y_angle)] + [np.cos(y_angle), 0, np.sin(y_angle)], + [0, 1, 0], + [-np.sin(y_angle), 0, np.cos(y_angle)] ]) rotation_matrix_z = np.array([ - [np.cos(z_angle), -np.sin(z_angle), 0], - [np.sin(z_angle), np.cos(z_angle), 0], - [0, 0, 1] + [np.cos(z_angle), -np.sin(z_angle), 0], + [np.sin(z_angle), np.cos(z_angle), 0], + [0, 0, 1] ]) # Apply transformation points_np = points.cpu().numpy()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L145
return cloud - class DemoDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): """
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L161
) self.root_path = root_path self.ext = ext - data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] + data_file_list = glob.glob( + str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] data_file_list.sort() self.sample_file_list = data_file_list
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py#L195
if __name__ == '__main__': - main() \ No newline at end of file + main()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py#L2
from rclpy.node import Node from std_msgs.msg import String from time import sleep + class TestNode(Node):
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py#L18
self.get_logger().info('Publishing: "%s"' % msg.data) self.count += 1 + def main(): rclpy.init() test_node = TestNode()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py#L30
test_node.destroy_node() rclpy.shutdown() + if __name__ == '__main__': main()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L11
""" +import torch import rospy from visualization_msgs.msg import Marker, MarkerArray from geometry_msgs.msg import Point
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L21
This file is from OpenPCDet visualize_utils.py Check their repo: https://github.com/open-mmlab/OpenPCDet """ -import numpy as np -import torch box_colormap = [ [1, 1, 1],
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L89
corners3d += boxes3d[:, None, 0:3] return corners3d.numpy() if is_numpy else corners3d - + + class Draw3DBox: - def __init__(self, box3d_pub, marker_frame_id = 'velodyne', rate=10): + def __init__(self, box3d_pub, marker_frame_id='velodyne', rate=10): self.frame_id = marker_frame_id self.lifetime = 1.0/rate self.box3d_pub = box3d_pub
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L107
""" # (N, 8, 3) # -move_lidar_center - dt_box_lidar[:,0] = dt_box_lidar[:,0]-move_lidar_center + dt_box_lidar[:, 0] = dt_box_lidar[:, 0]-move_lidar_center corners_3d_velos = boxes_to_corners_3d(dt_box_lidar) marker_array = MarkerArray()
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L154
text_marker.lifetime = rospy.Duration(self.lifetime) text_marker.type = Marker.TEXT_VIEW_FACING - p4 = corners_3d_velo[4] # upper front left corner + p4 = corners_3d_velo[4] # upper front left corner text_marker.pose.position.x = p4[0] text_marker.pose.position.y = p4[1]
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L177
text_marker.color.b = b/255.0 text_marker.color.a = 1.0 marker_array.markers.append(text_marker) - + if publish_type: text_marker = Marker() text_marker.header.frame_id = self.frame_id
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L188
text_marker.lifetime = rospy.Duration(self.lifetime) text_marker.type = Marker.TEXT_VIEW_FACING - bc = (corners_3d_velo[6] + corners_3d_velo[7] + corners_3d_velo[2] + corners_3d_velo[3])/4 # back center + bc = (corners_3d_velo[6] + corners_3d_velo[7] + + corners_3d_velo[2] + corners_3d_velo[3])/4 # back center text_marker.pose.position.x = bc[0] text_marker.pose.position.y = bc[1]
src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py#L212
Return : 3xn in cam2 coordinate """ R = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0], [-np.sin(yaw), 0, np.cos(yaw)]]) - x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2] - y_corners = [0,0,0,0,-h,-h,-h,-h] - z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2] - corners_3d_cam2 = np.dot(R, np.vstack([x_corners,y_corners,z_corners])) - corners_3d_cam2[0,:] += x - corners_3d_cam2[1,:] += y - corners_3d_cam2[2,:] += z - return corners_3d_cam2 \ No newline at end of file + x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2] + y_corners = [0, 0, 0, 0, -h, -h, -h, -h] + z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2] + corners_3d_cam2 = np.dot(R, np.vstack([x_corners, y_corners, z_corners])) + corners_3d_cam2[0, :] += x + corners_3d_cam2[1, :] += y + corners_3d_cam2[2, :] += z + return corners_3d_cam2
src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py#L1
#!/usr/bin/env python3 from launch import LaunchDescription from launch_ros.actions import Node + def generate_launch_description(): return LaunchDescription([
Run C++/Python linters
Node.js 16 actions are deprecated. Please update the following actions to use Node.js 20: actions/setup-python@v1, WATonomous/wato-lint-action@v1. For more information see: https://github.blog/changelog/2023-09-22-github-actions-transitioning-from-node-16-to-node-20/.
Run C++/Python linters
The following actions uses node12 which is deprecated and will be forced to run on node16: actions/setup-python@v1. For more info: https://github.blog/changelog/2023-06-13-github-actions-all-actions-will-run-on-node16-instead-of-node12-by-default/