From 093e322bde8429bdd16d4f53f29ff129aa8ccf32 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 16 Jun 2024 10:04:46 +0900 Subject: [PATCH 01/34] Output topic drop error during initialization Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 9199d165..fa0840df 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -172,6 +172,10 @@ def _calc_actual_num_load_frames(self): return num_frames_in_bag = min([self._bag_reader.get_topic_count(t) for t in topic_names]) + for topic in topic_names: + count = self._bag_reader.get_topic_count(topic) + if count == 0: + raise KeyError(f"In {self._input_bag}, {topic} message is not found.") freq = 10 num_frames_to_skip = int(self._skip_timestamp * freq) max_num_frames = num_frames_in_bag - num_frames_to_skip - 1 From 9af2b62a57816ef0bf358fad2780f0de188aecb4 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 16 Jun 2024 10:07:40 +0900 Subject: [PATCH 02/34] Avoid out-of-bounds access in get_lidar_camera_synced_frame_info Signed-off-by: Shunsuke Miura --- perception_dataset/utils/misc.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/perception_dataset/utils/misc.py b/perception_dataset/utils/misc.py index a38bb304..878da79f 100644 --- a/perception_dataset/utils/misc.py +++ b/perception_dataset/utils/misc.py @@ -64,6 +64,10 @@ def get_lidar_camera_synced_frame_info( system_scan_period_sec - max_camera_jitter_sec ): current_image_index += 1 + if current_image_index >= len(image_timestamp_list): + # set dummy timestamp + image_timestamp = lidar_timestamp + lidar_to_camera_latency_sec + max_camera_jitter_sec + system_scan_period_sec + break image_timestamp = image_timestamp_list[current_image_index] if image_timestamp - lidar_timestamp > lidar_to_camera_latency_sec + ( From 9da50c210750daf2c1f69099360feb0ccfd2dd10 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 16 Jun 2024 12:22:32 +0900 Subject: [PATCH 03/34] Enhance image data handling to support CompressedImage format - Modify _generate_image_data to accept both np.ndarray and CompressedImage types - Add image_shape parameter to properly set image dimensions - Ensure correct saving of CompressedImage data Signed-off-by: Shunsuke Miura --- .../rosbag2_to_non_annotated_t4_converter.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index fa0840df..19745d01 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -672,12 +672,13 @@ def _convert_image( image_index_counter += 1 sample_data_token = self._generate_image_data( - rosbag2_utils.compressed_msg_to_numpy(image_msg), + image_msg, rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp), lidar_sample_token, calibrated_sensor_token, sensor_channel, lidar_frame_index, + image_shape, ) sample_data_token_list.append(sample_data_token) else: # camera only mode @@ -749,12 +750,13 @@ def get_move_distance(trans1: Dict[str, float], trans2: Dict[str, float]) -> flo def _generate_image_data( self, - image_arr: np.ndarray, + image_arr: Union[np.ndarray, CompressedImage], image_unix_timestamp: float, sample_token: str, calibrated_sensor_token: str, sensor_channel: str, frame_index: int, + image_shape: Tuple[int, int, int] = (0, 0, 0), output_blank_image: bool = False, is_key_frame: bool = True, ): @@ -764,6 +766,8 @@ def _generate_image_data( fileformat = EXTENSION_ENUM.JPG.value[1:] # Note: png for all images filename = misc_utils.get_sample_data_filename(sensor_channel, frame_index, fileformat) + if hasattr(image_arr, "shape"): + image_shape = image_arr.shape sample_data_token = self._sample_data_table.insert_into_table( sample_token=sample_token, ego_pose_token=ego_pose_token, @@ -772,15 +776,19 @@ def _generate_image_data( fileformat=fileformat, timestamp=misc_utils.unix_timestamp_to_nusc_timestamp(image_unix_timestamp), is_key_frame=is_key_frame, - height=image_arr.shape[0], - width=image_arr.shape[1], + height=image_shape[0], + width=image_shape[1], is_valid=is_key_frame and (not output_blank_image), ) sample_data_record: SampleDataRecord = self._sample_data_table.select_record_from_token( sample_data_token ) - cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image_arr) + if isinstance(image_arr, np.ndarray): + cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image_arr, [int(cv2.IMWRITE_JPEG_QUALITY), 95]) + elif isinstance(image_arr, CompressedImage): + with open(osp.join(self._output_scene_dir, sample_data_record.filename), "xb") as fw: + fw.write(image_arr.data) return sample_data_token From 7fb79f171dc78016152d40f2b4894e62d2129059 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 30 Jun 2024 19:36:48 +0900 Subject: [PATCH 04/34] add RX2 constants Signed-off-by: Shunsuke Miura --- perception_dataset/constants.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/perception_dataset/constants.py b/perception_dataset/constants.py index b9494de7..7527623c 100644 --- a/perception_dataset/constants.py +++ b/perception_dataset/constants.py @@ -23,6 +23,14 @@ class SENSOR_ENUM(Enum): "channel": "CAM_FRONT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, } + CAM_FRONT_NARROW = { + "channel": "CAM_FRONT_NARROW", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_FRONT_WIDE = { + "channel": "CAM_FRONT_WIDE", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } CAM_FRONT_RIGHT = { "channel": "CAM_FRONT_RIGHT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, @@ -51,6 +59,10 @@ class SENSOR_ENUM(Enum): "channel": "LIDAR_TOP", "modality": SENSOR_MODALITY_ENUM.LIDAR.value, } + LIDAR_FRONT = { + "channel": "LIDAR_FRONT", + "modality": SENSOR_MODALITY_ENUM.LIDAR.value, + } LIDAR_CONCAT = { "channel": "LIDAR_CONCAT", "modality": SENSOR_MODALITY_ENUM.LIDAR.value, From efb96347ec5898e055ab85bf46aaff738d7bf184 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 30 Jun 2024 11:35:07 +0000 Subject: [PATCH 05/34] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- perception_dataset/utils/misc.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/perception_dataset/utils/misc.py b/perception_dataset/utils/misc.py index 878da79f..eac27409 100644 --- a/perception_dataset/utils/misc.py +++ b/perception_dataset/utils/misc.py @@ -66,7 +66,12 @@ def get_lidar_camera_synced_frame_info( current_image_index += 1 if current_image_index >= len(image_timestamp_list): # set dummy timestamp - image_timestamp = lidar_timestamp + lidar_to_camera_latency_sec + max_camera_jitter_sec + system_scan_period_sec + image_timestamp = ( + lidar_timestamp + + lidar_to_camera_latency_sec + + max_camera_jitter_sec + + system_scan_period_sec + ) break image_timestamp = image_timestamp_list[current_image_index] From 13445ffc1943fd6b6b0079a6d945f30a3ccff9b5 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 22 Sep 2024 21:03:15 +0900 Subject: [PATCH 06/34] feat(rosbag2_to_non_annotated_t4): support image undistortion option Signed-off-by: Shunsuke Miura --- ...rt_rosbag2_to_non_annotated_t4_sample.yaml | 1 + .../rosbag2/converter_params.py | 1 + .../rosbag2_to_non_annotated_t4_converter.py | 42 +++++++++++++++---- 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml index 8052cee3..4095bd8c 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_sample.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml @@ -9,6 +9,7 @@ conversion: skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. accept_frame_drop: False # If true, the conversion will continue even if the LiDAR frame is dropped. + undistort_image: False # If true, the camera image will be undistorted. # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. lidar_sensor: topic: /sensing/lidar/concatenated/pointcloud diff --git a/perception_dataset/rosbag2/converter_params.py b/perception_dataset/rosbag2/converter_params.py index c1176522..fa1efd75 100644 --- a/perception_dataset/rosbag2/converter_params.py +++ b/perception_dataset/rosbag2/converter_params.py @@ -26,6 +26,7 @@ class Rosbag2ConverterParams(BaseModel): with_gt_label: bool = False # whether to use gt labels scene_description: str = "" # scene description accept_frame_drop: bool = False # whether to accept frame drop + undistort_image: bool = False # whether to undistort image # rosbag data type data_type: DataType = DataType.REAL # real or synthetic diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 25f70066..649f055c 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -5,7 +5,7 @@ import os.path as osp import shutil import sys -from typing import Dict, List, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import warnings import builtin_interfaces.msg @@ -13,7 +13,7 @@ import numpy as np from pyquaternion import Quaternion from radar_msgs.msg import RadarTracks -from sensor_msgs.msg import CompressedImage, PointCloud2 +from sensor_msgs.msg import CameraInfo, CompressedImage, PointCloud2 from perception_dataset.abstract_converter import AbstractConverter from perception_dataset.constants import ( @@ -117,6 +117,8 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: self._generate_frame_every_meter: float = params.generate_frame_every_meter self._scene_description: str = params.scene_description self._accept_frame_drop: bool = params.accept_frame_drop + self._undistort_image: bool = params.undistort_image + self._exec_undistort: bool = False # frame_id of coordinate transformation self._ego_pose_target_frame: str = params.world_frame_id @@ -608,7 +610,7 @@ def _convert_image( # Get calibrated sensor token start_time_in_time = rosbag2_utils.unix_timestamp_to_stamp(start_timestamp) - calibrated_sensor_token = self._generate_calibrated_sensor( + calibrated_sensor_token, camera_info = self._generate_calibrated_sensor( sensor_channel, start_time_in_time, topic ) @@ -675,6 +677,7 @@ def _convert_image( sensor_channel, lidar_frame_index, image_shape, + camera_info=camera_info, ) sample_data_token_list.append(sample_data_token) else: # camera only mode @@ -755,6 +758,7 @@ def _generate_image_data( image_shape: Tuple[int, int, int] = (0, 0, 0), output_blank_image: bool = False, is_key_frame: bool = True, + camera_info: Optional[CameraInfo] = None, ): ego_pose_token = self._generate_ego_pose( rosbag2_utils.unix_timestamp_to_stamp(image_unix_timestamp) @@ -787,8 +791,23 @@ def _generate_image_data( [int(cv2.IMWRITE_JPEG_QUALITY), 95], ) elif isinstance(image_arr, CompressedImage): - with open(osp.join(self._output_scene_dir, sample_data_record.filename), "xb") as fw: - fw.write(image_arr.data) + if camera_info is None: + # save compressed image as is + with open( + osp.join(self._output_scene_dir, sample_data_record.filename), "xb" + ) as fw: + fw.write(image_arr.data) + else: + # load image and undistort + image = rosbag2_utils.compressed_msg_to_numpy(image_arr) + image = cv2.undistort( + image, + camera_info.k.reshape(3, 3), + np.array(camera_info.d), + None, + camera_info.p.reshape(3, 4)[:3], + ) + cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image) return sample_data_token @@ -818,8 +837,9 @@ def _generate_ego_pose(self, stamp: builtin_interfaces.msg.Time) -> str: def _generate_calibrated_sensor( self, sensor_channel: str, start_timestamp: builtin_interfaces.msg.Time, topic_name="" - ) -> str: + ) -> Union[str, Tuple[str, CameraInfo]]: calibrated_sensor_token = str() + camera_info = None for sensor_enum in self._sensor_enums: channel = sensor_enum.value["channel"] modality = sensor_enum.value["modality"] @@ -885,7 +905,14 @@ def _generate_calibrated_sensor( info = self._bag_reader.camera_info.get(cam_info_topic) if info is None: continue - camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + if "image_rect" in topic_name: + # image is considered as already undistorted + camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + elif self._undistort_image: + camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + camera_info = info + else: + camera_intrinsic = info.k.reshape(3, 3).tolist() camera_distortion = info.d.tolist() calibrated_sensor_token = self._calibrated_sensor_table.insert_into_table( @@ -895,6 +922,7 @@ def _generate_calibrated_sensor( camera_intrinsic=camera_intrinsic, camera_distortion=camera_distortion, ) + return calibrated_sensor_token, camera_info else: raise ValueError(f"Unexpected sensor modality: {modality}") From 09884d29ea5d3c7017bb5c9bb168eca96f0dacfe Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 22 Sep 2024 21:36:07 +0900 Subject: [PATCH 07/34] feat(rosbag2_to_non_annotated_t4): add DRS related constants/config Signed-off-by: Shunsuke Miura --- ...nvert_rosbag2_to_non_annotated_t4_drs.yaml | 41 +++++++++++++++++++ perception_dataset/constants.py | 16 ++++++++ 2 files changed, 57 insertions(+) create mode 100644 config/convert_rosbag2_to_non_annotated_t4_drs.yaml diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml new file mode 100644 index 00000000..9253e5c7 --- /dev/null +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -0,0 +1,41 @@ +task: convert_rosbag2_to_non_annotated_t4 +description: + scene: "" +conversion: + input_base: ./data/rosbag2 + output_base: ./data/non_annotated_t4_format + world_frame_id: "base_link" + start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. + skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. + num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. + accept_frame_drop: True # If true, the conversion will continue even if the LiDAR frame is dropped. + undistort_image: True # If true, the camera image will be undistorted. + # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. + lidar_sensor: + topic: /sensing/lidar/concatenated/pointcloud + channel: LIDAR_CONCAT + camera_sensors: # Keep the same order as each camera exposure timing + - topic: /sensing/camera/camera0/image_raw/compressed + channel: CAM_FRONT_NARROW + delay_msec: 0.0 + - topic: /sensing/camera/camera1/image_raw/compressed + channel: CAM_FRONT_WIDE + delay_msec: 0.0 + - topic: /sensing/camera/camera2/image_raw/compressed + channel: CAM_FRONT_RIGHT + delay_msec: 0.0 + - topic: /sensing/camera/camera3/image_raw/compressed + channel: CAM_BACK_RIGHT + delay_msec: 0.0 + - topic: /sensing/camera/camera4/image_raw/compressed + channel: CAM_BACK_WIDE + delay_msec: 0.0 + - topic: /sensing/camera/camera5/image_raw/compressed + channel: CAM_BACK_NARROW + delay_msec: 0.0 + - topic: /sensing/camera/camera6/image_raw/compressed + channel: CAM_BACK_LEFT + delay_msec: 0.0 + - topic: /sensing/camera/camera7/image_raw/compressed + channel: CAM_FRONT_LEFT + delay_msec: 0.0 diff --git a/perception_dataset/constants.py b/perception_dataset/constants.py index b9494de7..54788943 100644 --- a/perception_dataset/constants.py +++ b/perception_dataset/constants.py @@ -23,6 +23,14 @@ class SENSOR_ENUM(Enum): "channel": "CAM_FRONT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, } + CAM_FRONT_NARROW = { + "channel": "CAM_FRONT_NARROW", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_FRONT_WIDE = { + "channel": "CAM_FRONT_WIDE", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } CAM_FRONT_RIGHT = { "channel": "CAM_FRONT_RIGHT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, @@ -35,6 +43,14 @@ class SENSOR_ENUM(Enum): "channel": "CAM_BACK", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, } + CAM_BACK_NARROW = { + "channel": "CAM_BACK_NARROW", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_BACK_WIDE = { + "channel": "CAM_BACK_WIDE", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } CAM_FRONT_LEFT = { "channel": "CAM_FRONT_LEFT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, From 59e860b6df152361e7529f35646728ea6a9ecf28 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 22 Sep 2024 21:59:23 +0900 Subject: [PATCH 08/34] raise error when the camera_info is not found Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 649f055c..7803967f 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -904,7 +904,7 @@ def _generate_calibrated_sensor( cam_info_topic = "/".join(topic_name_splitted[:4]) + "/camera_info" info = self._bag_reader.camera_info.get(cam_info_topic) if info is None: - continue + raise ValueError(f"Camera info not found for {cam_info_topic}") if "image_rect" in topic_name: # image is considered as already undistorted camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() From 06e16a778afeb733340527441903daa9c2437ea4 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 22 Sep 2024 23:23:53 +0900 Subject: [PATCH 09/34] overwrite distortion coefficient when applying undistortion Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 7803967f..af247c8c 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -908,12 +908,14 @@ def _generate_calibrated_sensor( if "image_rect" in topic_name: # image is considered as already undistorted camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + camera_distortion = info.d.tolist() elif self._undistort_image: camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + camera_distortion = [0.0, 0.0, 0.0, 0.0, 0.0] camera_info = info else: camera_intrinsic = info.k.reshape(3, 3).tolist() - camera_distortion = info.d.tolist() + camera_distortion = info.d.tolist() calibrated_sensor_token = self._calibrated_sensor_table.insert_into_table( sensor_token=sensor_token, From 8542a24fb0a9df1fc245a4bb52c398bc36d63ace Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Mon, 30 Sep 2024 09:10:01 +0900 Subject: [PATCH 10/34] fix config, set jpeg_quality to 100 Signed-off-by: Shunsuke Miura --- config/convert_rosbag2_to_non_annotated_t4_drs.yaml | 4 ++-- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml index 9253e5c7..f4398d97 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -28,10 +28,10 @@ conversion: channel: CAM_BACK_RIGHT delay_msec: 0.0 - topic: /sensing/camera/camera4/image_raw/compressed - channel: CAM_BACK_WIDE + channel: CAM_BACK_NARROW delay_msec: 0.0 - topic: /sensing/camera/camera5/image_raw/compressed - channel: CAM_BACK_NARROW + channel: CAM_BACK_WIDE delay_msec: 0.0 - topic: /sensing/camera/camera6/image_raw/compressed channel: CAM_BACK_LEFT diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 01f540b6..043ecce1 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -811,7 +811,7 @@ def _generate_image_data( None, camera_info.p.reshape(3, 4)[:3], ) - cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image) + cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image, [cv2.IMWRITE_JPEG_QUALITY, 100]) return sample_data_token From 9f5623125b8c279c04f6c7fd79bc7153a87eb9aa Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Mon, 14 Oct 2024 21:06:09 +0900 Subject: [PATCH 11/34] update config file Signed-off-by: Shunsuke Miura --- ...onvert_rosbag2_to_non_annotated_t4_drs.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml index f4398d97..0fadf6e7 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -6,7 +6,7 @@ conversion: output_base: ./data/non_annotated_t4_format world_frame_id: "base_link" start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. - skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. + skip_timestamp: 0.5 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. accept_frame_drop: True # If true, the conversion will continue even if the LiDAR frame is dropped. undistort_image: True # If true, the camera image will be undistorted. @@ -17,25 +17,25 @@ conversion: camera_sensors: # Keep the same order as each camera exposure timing - topic: /sensing/camera/camera0/image_raw/compressed channel: CAM_FRONT_NARROW - delay_msec: 0.0 + delay_msec: 60.0 - topic: /sensing/camera/camera1/image_raw/compressed channel: CAM_FRONT_WIDE - delay_msec: 0.0 + delay_msec: 60.0 - topic: /sensing/camera/camera2/image_raw/compressed channel: CAM_FRONT_RIGHT - delay_msec: 0.0 + delay_msec: 35.0 - topic: /sensing/camera/camera3/image_raw/compressed channel: CAM_BACK_RIGHT - delay_msec: 0.0 + delay_msec: 85.0 - topic: /sensing/camera/camera4/image_raw/compressed channel: CAM_BACK_NARROW - delay_msec: 0.0 + delay_msec: 60.0 - topic: /sensing/camera/camera5/image_raw/compressed channel: CAM_BACK_WIDE - delay_msec: 0.0 + delay_msec: 60.0 - topic: /sensing/camera/camera6/image_raw/compressed channel: CAM_BACK_LEFT - delay_msec: 0.0 + delay_msec: 35.0 - topic: /sensing/camera/camera7/image_raw/compressed channel: CAM_FRONT_LEFT - delay_msec: 0.0 + delay_msec: 85.0 From a9de6aca4380bed7bc88260d93a74cb535b626c1 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Mon, 14 Oct 2024 21:20:27 +0900 Subject: [PATCH 12/34] modity overwrite logic for rosbag2_to_non_annotated_t4 converter Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 043ecce1..0f57f162 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -78,18 +78,20 @@ def convert(self): if not self._overwrite_mode: dir_exist: bool = False - for bag_dir in bag_dirs: + for bag_dir in bag_dirs[:]: # copy to avoid modifying list while iterating bag_name: str = osp.basename(bag_dir) output_dir = osp.join(self._output_base, bag_name) if osp.exists(output_dir): logger.error(f"{output_dir} already exists.") dir_exist = True - - if dir_exist: + bag_dirs.remove(bag_dir) + if dir_exist and len(bag_dirs) == 0: + logger.error(f"{output_dir} already exists.") raise ValueError("If you want to overwrite files, use --overwrite option.") for bag_dir in bag_dirs: + logger.info(f"Start converting {bag_dir} to T4 format.") self._params.input_bag_path = bag_dir bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) bag_converter.convert() From cb9db4d72794bf40bbe9c5900639d9de12b0fb99 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Mon, 14 Oct 2024 21:22:13 +0900 Subject: [PATCH 13/34] Add elapsed time logging and increase message display interval Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 0f57f162..8514b39b 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -5,6 +5,7 @@ import os.path as osp import shutil import sys +import time from typing import Dict, List, Optional, Tuple, Union import warnings @@ -156,7 +157,7 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: self._output_data_dir = osp.join( self._output_scene_dir, T4_FORMAT_DIRECTORY_NAME.DATA.value ) - self._msg_display_interval = 10 + self._msg_display_interval = 100 shutil.rmtree(self._output_scene_dir, ignore_errors=True) self._make_directories() @@ -302,6 +303,7 @@ def _convert(self) -> None: - is_key_frame=True - fill in next/prev """ + start = time.time() sensor_channel_to_sample_data_token_list: Dict[str, List[str]] = {} self._init_tables() @@ -311,6 +313,7 @@ def _convert(self) -> None: self._connect_sample_in_scene() self._connect_sample_data_in_scene(sensor_channel_to_sample_data_token_list) self._add_scene_description(self._scene_description) + print(f"Total elapsed time: {time.time() - start:.2f} sec") def _calc_start_timestamp(self) -> float: if self._start_timestamp < sys.float_info.epsilon: @@ -329,6 +332,7 @@ def _convert_sensor_data( logger.info(f"set start_timestamp to {start_timestamp}") if self._sensor_mode == SensorMode.DEFAULT: + start = time.time() lidar_sensor_channel = self._lidar_sensor["channel"] sensor_channel_to_sample_data_token_list[lidar_sensor_channel] = ( self._convert_pointcloud( @@ -338,7 +342,7 @@ def _convert_sensor_data( scene_token=scene_token, ) ) - + print(f"LiDAR conversion. total elapsed time: {time.time() - start:.2f} sec\n") for radar_sensor in self._radar_sensors: radar_sensor_channel = radar_sensor["channel"] sensor_channel_to_sample_data_token_list[radar_sensor_channel] = ( @@ -363,6 +367,7 @@ def _convert_sensor_data( ) for camera_sensor in self._camera_sensors: + start = time.time() sensor_channel = camera_sensor["channel"] sensor_channel_to_sample_data_token_list[sensor_channel] = self._convert_image( start_timestamp=camera_start_timestamp, @@ -381,6 +386,9 @@ def _convert_sensor_data( camera_start_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( first_sample_data_record.timestamp ) + print( + f"camera {camera_sensor['channel']} conversion. total elapsed time: {time.time() - start:.2f} sec\n" + ) def _convert_static_data(self): # Log, Map From 268940d0437a6a066b6b2ed70d3e2046828bedfd Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Thu, 17 Oct 2024 18:16:40 +0900 Subject: [PATCH 14/34] add error handling, add print Signed-off-by: Shunsuke Miura --- .../rosbag2_to_non_annotated_t4_converter.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 8514b39b..519b1374 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -243,17 +243,28 @@ def _init_tables(self): self._visibility_table = VisibilityTable(level_to_description={}, default_value="") def convert(self): - self._convert() + try: + self._convert() + except Exception as e: + logger.error(f"Error occurred during conversion: {e}") + return + self._save_tables() self._save_config() if not self._without_compress: self._compress_directory() def _save_tables(self): + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) for cls_attr in self.__dict__.values(): if isinstance(cls_attr, AbstractTable): print(f"{cls_attr.FILENAME}: #rows {len(cls_attr)}") cls_attr.save_json(self._output_anno_dir) + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) def _save_config(self): config_data = { @@ -821,7 +832,11 @@ def _generate_image_data( None, camera_info.p.reshape(3, 4)[:3], ) - cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image, [cv2.IMWRITE_JPEG_QUALITY, 100]) + cv2.imwrite( + osp.join(self._output_scene_dir, sample_data_record.filename), + image, + [cv2.IMWRITE_JPEG_QUALITY, 100], + ) return sample_data_token From 56da43a2fda710bd4c81a8929b9a194430e60119 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Sun, 20 Oct 2024 04:25:48 +0900 Subject: [PATCH 15/34] add error handling Signed-off-by: Shunsuke Miura --- .../rosbag2_to_non_annotated_t4_converter.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 519b1374..5dc94179 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -94,8 +94,12 @@ def convert(self): for bag_dir in bag_dirs: logger.info(f"Start converting {bag_dir} to T4 format.") self._params.input_bag_path = bag_dir - bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) - bag_converter.convert() + try: + bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) + bag_converter.convert() + except Exception as e: + logger.error(f"Error occurred during conversion: {e}") + continue class _Rosbag2ToNonAnnotatedT4Converter: @@ -243,11 +247,7 @@ def _init_tables(self): self._visibility_table = VisibilityTable(level_to_description={}, default_value="") def convert(self): - try: - self._convert() - except Exception as e: - logger.error(f"Error occurred during conversion: {e}") - return + self._convert() self._save_tables() self._save_config() From d4ed3a786854b096bc9f9b07c7f45f0a93e13fe4 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 22 Oct 2024 02:06:14 +0900 Subject: [PATCH 16/34] update drs config file Signed-off-by: Shunsuke Miura --- .../convert_rosbag2_to_non_annotated_t4_drs.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml index 0fadf6e7..b59072d3 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -17,25 +17,25 @@ conversion: camera_sensors: # Keep the same order as each camera exposure timing - topic: /sensing/camera/camera0/image_raw/compressed channel: CAM_FRONT_NARROW - delay_msec: 60.0 + delay_msec: 16.0 - topic: /sensing/camera/camera1/image_raw/compressed channel: CAM_FRONT_WIDE - delay_msec: 60.0 + delay_msec: 16.0 - topic: /sensing/camera/camera2/image_raw/compressed channel: CAM_FRONT_RIGHT - delay_msec: 35.0 + delay_msec: -9.0 - topic: /sensing/camera/camera3/image_raw/compressed channel: CAM_BACK_RIGHT - delay_msec: 85.0 + delay_msec: -59.0 - topic: /sensing/camera/camera4/image_raw/compressed channel: CAM_BACK_NARROW - delay_msec: 60.0 + delay_msec: -34.0 - topic: /sensing/camera/camera5/image_raw/compressed channel: CAM_BACK_WIDE - delay_msec: 60.0 + delay_msec: -84.0 - topic: /sensing/camera/camera6/image_raw/compressed channel: CAM_BACK_LEFT - delay_msec: 35.0 + delay_msec: -59.0 - topic: /sensing/camera/camera7/image_raw/compressed channel: CAM_FRONT_LEFT - delay_msec: 85.0 + delay_msec: -59.0 From c6f06bd795e63f977d7f48b2d85d8abd3f8b51d7 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 22 Oct 2024 20:02:57 +0900 Subject: [PATCH 17/34] add output when finish conversion Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 5dc94179..4a922e53 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -1,3 +1,4 @@ +import datetime import enum import glob import json @@ -100,6 +101,11 @@ def convert(self): except Exception as e: logger.error(f"Error occurred during conversion: {e}") continue + dt_now = datetime.datetime.now() + logger.info(f"Conversion of {bag_dir} is completed at {dt_now.isoformat()}") + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) class _Rosbag2ToNonAnnotatedT4Converter: From 04bc398271d3717e755b4b2830b63550854053e3 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 22 Oct 2024 20:02:57 +0900 Subject: [PATCH 18/34] add output when finish conversion Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 5dc94179..a315a1a0 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -91,7 +91,7 @@ def convert(self): logger.error(f"{output_dir} already exists.") raise ValueError("If you want to overwrite files, use --overwrite option.") - for bag_dir in bag_dirs: + for bag_dir in sorted(bag_dirs): logger.info(f"Start converting {bag_dir} to T4 format.") self._params.input_bag_path = bag_dir try: @@ -100,6 +100,10 @@ def convert(self): except Exception as e: logger.error(f"Error occurred during conversion: {e}") continue + logger.info(f"Conversion of {bag_dir} is completed") + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) class _Rosbag2ToNonAnnotatedT4Converter: From d45d573f1feffc8ca1c486565fc03f4da7bbeb31 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 29 Oct 2024 13:07:18 +0900 Subject: [PATCH 19/34] [tentative] add camera info patch Signed-off-by: Shunsuke Miura --- perception_dataset/rosbag2/rosbag2_reader.py | 49 ++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/perception_dataset/rosbag2/rosbag2_reader.py b/perception_dataset/rosbag2/rosbag2_reader.py index 863839f0..898a4d3e 100644 --- a/perception_dataset/rosbag2/rosbag2_reader.py +++ b/perception_dataset/rosbag2/rosbag2_reader.py @@ -9,6 +9,7 @@ from rclpy.time import Time from rosbag2_py import StorageFilter from rosidl_runtime_py.utilities import get_message +from sensor_msgs.msg import CameraInfo import tf2_ros from perception_dataset.utils.rosbag2 import create_reader, get_topic_count, get_topic_type_dict @@ -107,6 +108,54 @@ def _set_camera_info(self): return self.camera_info[topic_name] = message + import numpy as np + + for i in range(8): + topic_name = f"/sensing/camera/camera{i}/camera_info" + camera_info_msg = CameraInfo() + camera_info_msg.header.frame_id = f"camera{i}/camera_optical_link" + camera_info_msg.header.stamp = builtin_interfaces.msg.Time( + sec=int(self.start_timestamp + 1), nanosec=0 + ) + camera_info_msg.distortion_model = "plumb_bob" + camera_info_msg.r = np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]) + camera_info_msg.width = 2880 + camera_info_msg.height = 1860 + if "camera0" in topic_name: + camera_info_msg.k = np.array([5368.25873, 0.0, 1412.70938, 0.0, 5364.46693, 958.59729, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([5305.15088, 0.0, 1412.64275, 0.0, 0.0, 5342.61084, 958.70113, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [-0.08849, -0.90255, 0.00075, 0.00111, 0.0] + elif "camera1" in topic_name: + camera_info_msg.k = np.array([1496.73395, 0.0, 1424.70018, 0.0, 1497.10726, 945.6712, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([1015.1003418, 0.0, 1466.52248505, 0.0, 0.0, 1284.54455566, 950.87123341, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [-0.08989, -0.1186, -0.00016, -0.00007, 0.0067, 0.30995, -0.24648, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + elif "camera2" in topic_name: + camera_info_msg.k = np.array([1502.98471, 0.0, 1422.35349, 0.0, 1504.5042, 931.99575, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([878.42378, 0.0, 1402.49031, 0.0, 0.0, 1258.01633, 933.10245, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [0.32864, -0.03183, 2e-05, 0.0002, 0.00267, 0.73261, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + elif "camera3" in topic_name: + camera_info_msg.k = np.array([1500.05060, 0.00000, 1430.78876, 0.00000, 1499.95752, 940.95613, 0.00000, 0.00000, 1.00000]) + camera_info_msg.p = np.array([877.863525, 0.00000000, 1418.95998, 0.00000000, 0.0, 1254.34375, 945.262686, 0.00000000, 0.00000000, 0.0, 1.00000000, 0.0]) + camera_info_msg.d = [0.27430142, -0.02073177, 0.00007407, 0.00008116, 0.00128976, 0.67045534, 0.00000000, 0.00000000, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + elif "camera4" in topic_name: + camera_info_msg.k = np.array([5363.43736, 0.0, 1507.51202, 0.0, 5341.55785, 1076.26984, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([5296.04052734, 0.0, 1511.62903545, 0.0, 0.0, 5311.76367188, 1077.67061308, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [-0.12858, -0.44056, 0.00123, 0.00215, 0.0] + elif "camera5" in topic_name: + camera_info_msg.k = np.array([1500.35853, 0.0, 1419.21658, 0.0, 1501.15968, 936.4509, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([871.67853, 0.0, 1390.37965, 0.0, 0.0, 1251.62366, 939.62595, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [3.49528, 0.71004, 0.00028, 0.0001, -0.03621, 3.91361, 1.98308, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + elif "camera6" in topic_name: + camera_info_msg.k = np.array([1543.88646, 0.0, 1455.51234, 0.0, 1542.117, 955.83491, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([940.59991455, 0.0, 1472.20666395, 0.0, 0.0, 1302.85144043, 965.17800362, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [0.45661, -0.00186, -0.00003, -0.00015, 0.00153, 0.85654, 0.08203, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + elif "camera7" in topic_name: + camera_info_msg.k = np.array([1493.89305, 0.0, 1434.05368, 0.0, 1494.11047, 938.13478, 0.0, 0.0, 1.0]) + camera_info_msg.p = np.array([870.17737, 0.0, 1421.48751, 0.0, 0.0, 1247.0332, 940.93758, 0.0, 0.0, 0.0, 1.0, 0.0]) + camera_info_msg.d = [0.45661, -0.00186, -0.00003, -0.00015, 0.00153, 0.85654, 0.08203, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + + self.camera_info[topic_name] = camera_info_msg + def get_topic_count(self, topic_name: str) -> int: return self._topic_name_to_topic_count.get(topic_name, 0) From 6d9648e1fa1e134afdbfc49770950deefb673a3e Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Tue, 15 Oct 2024 14:36:40 +0900 Subject: [PATCH 20/34] feat: update record classes for CoMLOPs format Signed-off-by: ktro2828 --- README.md | 22 +- build_depends.repos | 10 + ...ps_rosbag2_to_non_annotated_t4_sample.yaml | 36 ++ perception_dataset/ros2/oxts_msgs/__init__.py | 0 .../ros2/oxts_msgs/ins_handler.py | 514 ++++++++++++++++++ .../ros2/vehicle_msgs/__init__.py | 0 .../vehicle_msgs/vehicle_status_handler.py | 197 +++++++ .../rosbag2/converter_params.py | 4 + .../rosbag2_to_non_annotated_t4_converter.py | 167 +++++- .../t4_dataset/classes/ego_pose.py | 45 +- .../t4_dataset/classes/vehicle_state.py | 75 +++ 11 files changed, 1042 insertions(+), 28 deletions(-) create mode 100644 config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml create mode 100644 perception_dataset/ros2/oxts_msgs/__init__.py create mode 100644 perception_dataset/ros2/oxts_msgs/ins_handler.py create mode 100644 perception_dataset/ros2/vehicle_msgs/__init__.py create mode 100644 perception_dataset/ros2/vehicle_msgs/vehicle_status_handler.py create mode 100644 perception_dataset/t4_dataset/classes/vehicle_state.py diff --git a/README.md b/README.md index a6ac152a..2ea3522d 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,17 @@ These tools facilitate the preparation and transformation of perception data for ## Table of Contents -1. [Dataset Overview](#dataset-overview) -2. [Usage of T4 dataset](#usage-of-t4-format-dataset) -3. [Usage of Conversion Tools](#usage-of-conversion-tools) - - [Conversion Tools Overview](#conversion-tools-overview) - - [Setup](#setup) - - [Test](#test) - - [Pre Commit](#pre-commit) +- [TIER IV Perception Dataset Conversion Tools](#tier-iv-perception-dataset-conversion-tools) + - [Table of Contents](#table-of-contents) + - [Dataset Overview](#dataset-overview) + - [Usage of T4 format dataset](#usage-of-t4-format-dataset) + - [Usage of Conversion Tools](#usage-of-conversion-tools) + - [Conversion Tools Overview](#conversion-tools-overview) + - [Setup](#setup) + - [Test](#test) + - [Download test data](#download-test-data) + - [Run tests](#run-tests) + - [Pre commit](#pre-commit) ## Dataset Overview @@ -42,10 +46,10 @@ Install and build ros dependencies (this step must be outside of poetry virtuale ```bash source /opt/ros/${ROS_DISTRO}/setup.sh -sudo apt install -y ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap ros-${ROS_DISTRO}-radar-msgs +sudo apt install -y ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap ros-${ROS_DISTRO}-radar-msgs ros-${ROS_DISTRO}-tf-transformations mkdir src -p && vcs import src < build_depends.repos -colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release --packages-up-to autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs +colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release --packages-up-to autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs oxts_msgs vehicle_msgs source ./install/setup.bash ``` diff --git a/build_depends.repos b/build_depends.repos index 320635b1..359b3159 100644 --- a/build_depends.repos +++ b/build_depends.repos @@ -17,3 +17,13 @@ repositories: type: git url: https://github.com/tier4/autoware_auto_msgs.git version: tier4/main + + # for Co-MLOps features + oxts_ros2_driver: + type: git + url: https://github.com/tier4/oxts_ros2_driver.git + version: feat/data_recording_system + pacmod_interface: + type: git + url: https://github.com/tier4/pacmod_interface.git + version: feat/comlops_drs diff --git a/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml new file mode 100644 index 00000000..f25c81fc --- /dev/null +++ b/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml @@ -0,0 +1,36 @@ +task: convert_rosbag2_to_non_annotated_t4 +description: + scene: "" +conversion: + input_base: ./data/rosbag2 + output_base: ./data/non_annotated_t4_format + world_frame_id: "map" + start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. + skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. + num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. + accept_frame_drop: False # If true, the conversion will continue even if the LiDAR frame is dropped. + with_ins: true # whether to use INS messages as a ego state instead of `/tf` + with_vehicle_status: true # whether to generate `vehicle_state.json` + # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. + lidar_sensor: + topic: /sensing/lidar/concatenated/pointcloud + channel: LIDAR_CONCAT + camera_sensors: # Keep the same order as each camera exposure timing + - topic: /sensing/camera/camera3/image_raw/compressed + channel: CAM_BACK_LEFT + delay_msec: 48.33 + - topic: /sensing/camera/camera2/image_raw/compressed + channel: CAM_FRONT_LEFT + delay_msec: 65.0 + - topic: /sensing/camera/camera0/image_raw/compressed + channel: CAM_FRONT + delay_msec: 81.67 + - topic: /sensing/camera/camera4/image_raw/compressed + channel: CAM_FRONT_RIGHT + delay_msec: 98.33 + - topic: /sensing/camera/camera5/image_raw/compressed + channel: CAM_BACK_RIGHT + delay_msec: 115.0 + - topic: /sensing/camera/camera1/image_raw/compressed + channel: CAM_BACK + delay_msec: 131.67 diff --git a/perception_dataset/ros2/oxts_msgs/__init__.py b/perception_dataset/ros2/oxts_msgs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/ros2/oxts_msgs/ins_handler.py b/perception_dataset/ros2/oxts_msgs/ins_handler.py new file mode 100644 index 00000000..95ddc8d8 --- /dev/null +++ b/perception_dataset/ros2/oxts_msgs/ins_handler.py @@ -0,0 +1,514 @@ +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +from builtin_interfaces.msg import Time as RosTime +from geometry_msgs.msg import Quaternion, Twist, Vector3 +from nav_msgs.msg import Odometry +import numpy as np +from scipy.interpolate import interp1d +from scipy.spatial.transform import Rotation, Slerp +from sensor_msgs.msg import Imu, NavSatFix +from std_msgs.msg import Header +import tf_transformations as tf + +from perception_dataset.rosbag2.rosbag2_reader import Rosbag2Reader +from perception_dataset.utils.rosbag2 import stamp_to_unix_timestamp, unix_timestamp_to_stamp + + +@dataclass +class EgoState: + """A dataclass to represent a set of ego status.""" + + header: Header + child_frame_id: str + translation: Vector3 + rotation: Quaternion + twist: Twist + accel: Vector3 + + +class EgoStateBuffer: + """A buffer class to store `EgoState`s.""" + + def __init__(self) -> None: + self._buffer: list[EgoState] = [] + + # Cache of interpolation functions + self._interp_functions: Optional[Dict[str, Callable]] = None + + def set_state(self, ego_state: EgoState) -> None: + """Set a ego state to the buffer. + + Args: + ego_state (EgoState): Ego state. + """ + self._buffer.append(ego_state) + + def _load_interpolate_functions(self) -> Dict[str, Callable]: + """Load interpolate functions for each ego state field. + + Returns: + Dict[str, Callable]: Key-value mapping of interpolations for each field. + """ + timestamps: List[float] = [] + translations: List[Tuple[float, ...]] = [] + rotations: List[Tuple[float, ...]] = [] + linear_vel: List[Tuple[float, ...]] = [] + angular_vel: List[Tuple[float, ...]] = [] + accelerations: List[Tuple[float, ...]] = [] + for state in self._buffer: + timestamps.append(stamp_to_unix_timestamp(state.header.stamp)) + translations.append((state.translation.x, state.translation.y, state.translation.z)) + rotations.append( + ( + state.rotation.w, + state.rotation.x, + state.rotation.y, + state.rotation.z, + ) + ) + linear_vel.append( + ( + state.twist.linear.x, + state.twist.linear.y, + state.twist.linear.z, + ) + ) + angular_vel.append( + ( + state.twist.angular.x, + state.twist.angular.y, + state.twist.angular.z, + ) + ) + + accelerations.append( + ( + state.accel.x, + state.accel.y, + state.accel.z, + ) + ) + + translations = np.asarray(translations).reshape(-1, 3) + linear_vel = np.asarray(linear_vel).reshape(-1, 3) + angular_vel = np.asarray(angular_vel).reshape(-1, 3) + accelerations = np.asanyarray(accelerations).reshape(-1, 3) + + interp_func_x = interp1d( + timestamps, + translations[:, 0], + fill_value="extrapolate", + ) + interp_func_y = interp1d( + timestamps, + translations[:, 1], + fill_value="extrapolate", + ) + interp_func_z = interp1d( + timestamps, + translations[:, 2], + fill_value="extrapolate", + ) + + interp_func_rot = Slerp(timestamps, Rotation.from_quat(rotations)) + + interp_func_lvx = interp1d( + timestamps, + linear_vel[:, 0], + fill_value="extrapolate", + ) + interp_func_lvy = interp1d( + timestamps, + linear_vel[:, 1], + fill_value="extrapolate", + ) + interp_func_lvz = interp1d( + timestamps, + linear_vel[:, 2], + fill_value="extrapolate", + ) + + interp_func_avx = interp1d( + timestamps, + angular_vel[:, 0], + fill_value="extrapolate", + ) + interp_func_avy = interp1d( + timestamps, + angular_vel[:, 1], + fill_value="extrapolate", + ) + interp_func_avz = interp1d( + timestamps, + angular_vel[:, 2], + fill_value="extrapolate", + ) + + interp_func_ax = interp1d( + timestamps, + accelerations[:, 0], + fill_value="extrapolate", + ) + interp_func_ay = interp1d( + timestamps, + accelerations[:, 1], + fill_value="extrapolate", + ) + interp_func_az = interp1d( + timestamps, + accelerations[:, 2], + fill_value="extrapolate", + ) + + return { + "x": interp_func_x, + "y": interp_func_y, + "z": interp_func_z, + "rot": interp_func_rot, + "linear_vx": interp_func_lvx, + "linear_vy": interp_func_lvy, + "linear_vz": interp_func_lvz, + "angular_vx": interp_func_avx, + "angular_vy": interp_func_avy, + "angular_vz": interp_func_avz, + "ax": interp_func_ax, + "ay": interp_func_ay, + "az": interp_func_az, + } + + def lookup_state(self, stamp: RosTime) -> EgoState: + """Lookup a ego state corresponding to ROS timestamp. + + Args: + stamp (RosTime): _description_ + + Returns: + EgoState: _description_ + """ + return self.interpolate_state(stamp) + + def interpolate_state( + self, + query_stamp: Union[RosTime | Sequence[RosTime]], + ) -> Union[EgoState | List[EgoState]]: + """Return interpolated ego status(es). + + Args: + query_stamp (Union[RosTime | Sequence[RosTime]]): + Sequence of ROS timestamps or a single timestamp. + + Returns: + Union[EgoState | List[EgoState]]: If input stamp is a sequential, returns a list of `EgoState`s. + Otherwise returns a single `EgoState`. + """ + if self._interp_functions is None: + self._interp_functions = self._load_interpolate_functions() + + if isinstance(query_stamp, RosTime): + query_timestamps = [stamp_to_unix_timestamp(query_stamp)] + is_single_query = True + else: + query_timestamps = [stamp_to_unix_timestamp(stamp) for stamp in query_stamp] + is_single_query = False + + interp_x = self._interp_functions["x"](query_timestamps) + interp_y = self._interp_functions["y"](query_timestamps) + interp_z = self._interp_functions["z"](query_timestamps) + + interp_rotations = self._interp_functions["rot"](query_timestamps).as_quat().reshape(-1, 4) + + interp_lvx = self._interp_functions["linear_vx"](query_timestamps) + interp_lvy = self._interp_functions["linear_vy"](query_timestamps) + interp_lvz = self._interp_functions["linear_vz"](query_timestamps) + + interp_avx = self._interp_functions["angular_vx"](query_timestamps) + interp_avy = self._interp_functions["angular_vy"](query_timestamps) + interp_avz = self._interp_functions["angular_vz"](query_timestamps) + + interp_ax = self._interp_functions["ax"](query_timestamps) + interp_ay = self._interp_functions["ay"](query_timestamps) + interp_az = self._interp_functions["az"](query_timestamps) + + interp_translations = np.stack([interp_x, interp_y, interp_z], axis=-1) + interp_twists = np.stack( + [interp_lvx, interp_lvy, interp_lvz, interp_avx, interp_avy, interp_avz], + axis=-1, + ) + interp_accelerations = np.stack([interp_ax, interp_ay, interp_az], axis=-1) + + output: List[EgoState] = [] + for ( + translation, + rotation, + twist, + accel, + timestamp, + ) in zip( + interp_translations, + interp_rotations, + interp_twists, + interp_accelerations, + query_timestamps, + strict=True, + ): + header = Header() + header.frame_id = "map" + header.stamp = unix_timestamp_to_stamp(timestamp) + + twist_msg = Twist() + twist_msg.linear.x = twist[0] + twist_msg.linear.y = twist[1] + twist_msg.linear.z = twist[2] + twist_msg.angular.x = twist[0] + twist_msg.angular.y = twist[1] + twist_msg.angular.z = twist[2] + + ego_state = EgoState( + header=header, + child_frame_id="base_link", + translation=Vector3( + x=translation[0], + y=translation[1], + z=translation[2], + ), + rotation=Quaternion( + w=rotation[0], + x=rotation[1], + y=rotation[2], + z=rotation[3], + ), + twist=twist_msg, + accel=Vector3( + x=accel[0], + y=accel[1], + z=accel[2], + ), + ) + + output.append(ego_state) + + return output[0] if is_single_query else output + + +class LocalizeMethod(Enum): + WITH_ODOMETRY = 0 + + +class INSHandler: + DEFAULT_TOPIC_MAPPING = { + "imu": "/ins/oxts/imu", + "nav_sat_fix": "/ins/oxts/nav_sat_fix", + "odometry": "/ins/oxts/odometry", + "velocity": "/ins/oxts/velocity", + } + + def __init__( + self, + bag_dir: str, + *, + localize_method: LocalizeMethod = LocalizeMethod.WITH_ODOMETRY, + topic_mapping: Optional[Dict[str, str]] = None, + ) -> None: + self._reader = Rosbag2Reader(bag_dir=bag_dir) + self._localize_method = localize_method + self._topic_mapping = self.get_topic_mapping(topic_mapping=topic_mapping) + + # buffer to store all messages + buffer = { + key: [msg for msg in self._reader.read_messages(topics=[topic])] # noqa + for key, topic in self._topic_mapping.items() + } + self._buffer = buffer + + # buffer to store EgoStates + self._ego_buffer = EgoStateBuffer() + + if self._localize_method == LocalizeMethod.WITH_ODOMETRY: + self._localize_with_odometry() + else: + raise ValueError(f"Unexpected localize method: {self._localize_method}") + + @classmethod + def get_topic_mapping(cls, topic_mapping: Optional[Dict[str, str]] = None) -> Dict[str, str]: + if topic_mapping is not None: + assert set(cls.DEFAULT_TOPIC_MAPPING) == set(topic_mapping.keys()) + return topic_mapping + return cls.DEFAULT_TOPIC_MAPPING + + def _localize_with_odometry(self) -> None: + odometries: List[Odometry] = self._buffer["odometry"] + + if len(odometries) == 0: + return + + for odometry in odometries: + assert odometry.header.frame_id == "map" + + current_pose = odometry.pose.pose + current_translation = [ + current_pose.position.x, + current_pose.position.y, + current_pose.position.z, + ] + + current_rotation = [ + current_pose.orientation.x, + current_pose.orientation.y, + current_pose.orientation.z, + current_pose.orientation.w, + ] + + cur_matrix = tf.compose_matrix( + translate=current_translation, + angles=tf.euler_from_quaternion(current_rotation), + ) + + imu: Imu = self.get_closest_msg(key="imu", stamp=odometry.header.stamp) + + # NOTE: rotate acceleration from IMU coords to map coords + b_H_m = cur_matrix # base_link -> map + i_H_b = tf.compose_matrix( + angles=tf.euler_from_quaternion( + [ + imu.orientation.x, + imu.orientation.y, + imu.orientation.z, + imu.orientation.w, + ] + ) + ) # imu -> base_link + + i_H_m = tf.concatenate_matrices(i_H_b, b_H_m) # imu -> map + # rotate acceleration + relative_acceleration = np.matmul( + i_H_m[:3, :3], + [ + imu.linear_acceleration.x, + imu.linear_acceleration.y, + imu.linear_acceleration.z, + ], + ) + + ego_state = EgoState( + header=odometry.header, + child_frame_id="base_link", + translation=Vector3( + x=current_translation[0], + y=current_translation[1], + z=current_translation[2], + ), + rotation=Quaternion( + x=current_rotation[0], + y=current_rotation[1], + z=current_rotation[2], + w=current_rotation[3], + ), + twist=odometry.twist.twist, + accel=Vector3( + x=relative_acceleration[0], + y=relative_acceleration[1], + z=relative_acceleration[2], + ), + ) + + self._ego_buffer.set_state(ego_state) + + def get_closest_msg(self, key: str, stamp: RosTime) -> Any: + assert key in self._buffer + messages: List[Any] = self._buffer[key] + timestamp = stamp_to_unix_timestamp(stamp) + return min( + messages, + key=lambda x: abs(stamp_to_unix_timestamp(x.header.stamp) - timestamp), + ) + + def get_odometries(self) -> List[Odometry]: + return self._buffer["odometry"] + + def get_imus(self) -> List[Imu]: + return self._buffer["imu"] + + def lookup_nav_sat_fixes(self, stamp: RosTime) -> NavSatFix: + return self.interpolate_nav_sat_fixes(stamp) + + def interpolate_nav_sat_fixes( + self, query_stamp: Union[RosTime, List[RosTime]] + ) -> Union[NavSatFix | List[NavSatFix]]: + """Interpolate NavSatFix. + + Args: + query_stamp (RosTime | List[RosTime]): Query stamp(s). + + Returns: + Union[NavSatFix, List[NavSatFix]]: Interpolated message(s). + + Warnings: + If the value in `query_stamps` is out of range of the observed timestamps, + the interpolation accuracy can decrease. + """ + # convert Times to unix timestamps + if isinstance(query_stamp, RosTime): + query_stamps = [query_stamp] + is_single_query = True + else: + query_stamps = query_stamp + is_single_query = False + + query_timestamps = [stamp_to_unix_timestamp(stamp) for stamp in query_stamps] + + observed = self._buffer["nav_sat_fix"] + + timestamps = [] + geo_coordinates: List[Tuple[float, ...]] = [] + for msg in observed: + timestamps.append(stamp_to_unix_timestamp(msg.header.stamp)) + geo_coordinates.append((msg.latitude, msg.longitude, msg.altitude)) + + if min(query_timestamps) < min(timestamps) or max(timestamps) < max(query_timestamps): + warnings.warn( + "The value in `query_timestamps` is out of range of the observed timestamps, " + "it can decrease interpolation accuracy." + ) + + geo_coordinates = np.asarray(geo_coordinates).reshape(-1, 3) + interp_func_lat = interp1d(timestamps, geo_coordinates[:, 0], fill_value="extrapolate") + interp_func_lon = interp1d(timestamps, geo_coordinates[:, 1], fill_value="extrapolate") + interp_func_alt = interp1d(timestamps, geo_coordinates[:, 2], fill_value="extrapolate") + + # interpolate + interp_lat = interp_func_lat(query_timestamps) + interp_lon = interp_func_lon(query_timestamps) + interp_alt = interp_func_alt(query_timestamps) + interp_geo_coords = np.stack([interp_lat, interp_lon, interp_alt], axis=-1) + + output: List[NavSatFix] = [] + for coord, timestamp in zip(interp_geo_coords, query_timestamps, strict=True): + msg = NavSatFix() + msg.header.frame_id = observed[0].header.frame_id + msg.header.stamp = unix_timestamp_to_stamp(timestamp) + msg.status = observed[0].status + msg.position_covariance = observed[0].position_covariance + msg.position_covariance_type = observed[0].position_covariance_type + msg.latitude = coord[0] + msg.longitude = coord[1] + msg.altitude = coord[2] + + output.append(msg) + + return output[0] if is_single_query else output + + def get_ego_state(self, stamp: RosTime) -> EgoState: + """Return a `EgoState` interpolated by the specified `stamp`. + + Args: + stamp (RosTime): ROS timestamp. + + Returns: + EgoState: Corresponding ego state. + """ + return self._ego_buffer.lookup_state(stamp) diff --git a/perception_dataset/ros2/vehicle_msgs/__init__.py b/perception_dataset/ros2/vehicle_msgs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/ros2/vehicle_msgs/vehicle_status_handler.py b/perception_dataset/ros2/vehicle_msgs/vehicle_status_handler.py new file mode 100644 index 00000000..bf0c8d58 --- /dev/null +++ b/perception_dataset/ros2/vehicle_msgs/vehicle_status_handler.py @@ -0,0 +1,197 @@ +from typing import Any, Dict, List, Optional + +import builtin_interfaces.msg +from vehicle_msgs.msg import ( + ActuationStatusStamped, + GearReport, + HazardLightsReport, + SteeringReport, + SteeringWheelStatusStamped, + TurnIndicatorsReport, + VelocityReport, +) + +from perception_dataset.rosbag2.rosbag2_reader import Rosbag2Reader +from perception_dataset.utils.rosbag2 import stamp_to_unix_timestamp + + +class VehicleStatusHandler: + """A class to handle ROS2 messages related to vehicle status.""" + + # Field name[str] -> Topic name[str] + DEFAULT_TOPIC_MAPPING = { + "actuation_status": "/vehicle/status/actuation_status", + "control_mode": "/vehicle/status/control_mode", + "door_status": "/vehicle/status/door_status", + "gear_status": "/vehicle/status/gear_status", + "hazard_lights_status": "/vehicle/status/hazard_lights_status", + "steering_status": "/vehicle/status/steering_status", + "steering_wheel_status": "/vehicle/status/steering_wheel_status", + "turn_indicators_status": "/vehicle/status/turn_indicators_status", + "velocity_status": "/vehicle/status/velocity_status", + } + + # GearReport[uint8] -> Shift[str] + GEAR2SHIFT: Dict[int, str] = { + 0: "NONE", + 1: "NEUTRAL", + 2: "DRIVE", + 3: "DRIVE", + 4: "DRIVE", + 5: "DRIVE", + 6: "DRIVE", + 7: "DRIVE", + 8: "DRIVE", + 9: "DRIVE", + 10: "DRIVE", + 11: "DRIVE", + 12: "DRIVE", + 13: "DRIVE", + 14: "DRIVE", + 15: "DRIVE", + 16: "DRIVE", + 17: "DRIVE", + 18: "DRIVE", + 19: "DRIVE", + 20: "REVERSE", + 21: "REVERSE", + 22: "PARK", + 23: "LOW", + 24: "LOW", + } + + # TurnIndicatorsReport[uint8] -> Dict[str, str] + INDICATOR_MAPPING = { + 0: {"left": "off", "right": "off", "hazard": "off"}, + 1: {"left": "on", "right": "off", "hazard": "off"}, + 2: {"left": "off", "right": "on", "hazard": "off"}, + 3: {"left": "off", "right": "off", "hazard": "on"}, + } + + def __init__(self, bag_dir: str, *, topic_mapping: Optional[Dict[str, str]] = None) -> None: + """Construct a new object. + + Args: + bag_dir (str): Directory path of rosbag. + topic_mapping (Optional[Dict[str, str]], optional): Mapping of a field name to a topic name. + If `None`, `DEFAULT_TOPIC_MAPPING` will be used. Defaults to None. + """ + self._reader = Rosbag2Reader(bag_dir=bag_dir) + self._topic_mapping = self.get_topic_mapping(topic_mapping=topic_mapping) + + buffer = { + key: [msg for msg in self._reader.read_messages(topics=[topic])] # noqa + for key, topic in self._topic_mapping.items() + } + self._buffer = buffer + + @classmethod + def get_topic_mapping(cls, topic_mapping: Optional[Dict[str, str]] = None) -> Dict[str, str]: + """Return a mapping of a field name to a topic name. + If `topic_mapping` is not specified, `DEFAULT_TOPIC_MAPPING` will be returned. + + Args: + topic_mapping (Optional[Dict[str, str]], optional): Custom mapping. Defaults to None. + + Returns: + Dict[str, str]: Topic mapping. + """ + if topic_mapping is not None: + assert set(cls.DEFAULT_TOPIC_MAPPING) == set(topic_mapping.keys()) + return topic_mapping + return cls.DEFAULT_TOPIC_MAPPING + + def gear_to_shift(self, gear: int) -> str: + """Convert the value of the gear report to the shift state using `GEAR2SHIFT`. + + Args: + gear (int): Value of `GearReport.report`. + + Returns: + str: Shift state. + """ + return self.GEAR2SHIFT[gear] + + def indicator_to_state(self, indicator: int) -> Dict[str, str]: + """Convert the value of the indicator report to a mapping. + + Args: + indicator (int): Value of `TurnIndicatorsReport.report`. + + Returns: + Dict[str, str]: Mapping value for each indicator as following format: + - keys: [left, right, hazard] + - values: "on" or "off" + """ + return self.INDICATOR_MAPPING[indicator] + + def get_closest_msg(self, key: str, stamp: builtin_interfaces.msg.Time) -> Any: + assert key in self._buffer + messages: List[Any] = self._buffer[key] + timestamp = stamp_to_unix_timestamp(stamp) + + def time_diff(x) -> float: + if hasattr(x, "header"): + return abs(stamp_to_unix_timestamp(x.header.stamp) - timestamp) + elif hasattr(x, "stamp"): + return abs(stamp_to_unix_timestamp(x.stamp) - timestamp) + else: + raise ValueError("Unexpected message type") + + return min(messages, key=lambda x: time_diff(x)) + + def get_actuation_statuses(self) -> List[ActuationStatusStamped]: + """Return a list of `ActuationStatusStamped`s. + + Returns: + List[ActuationStatusStamped]: List of messages. + """ + return self._buffer["actuation_status"] + + def get_gear_reports(self) -> List[GearReport]: + """Return a list of `GearReport`s. + + Returns: + List[GearReport]: List of messages. + """ + return self._buffer["gear_status"] + + def get_steering_reports(self) -> List[SteeringReport]: + """Return a list of `SteeringReport`s. + + Returns: + List[SteeringReport]: List of messages. + """ + return self._buffer["steering_status"] + + def get_steering_wheel_statuses(self) -> List[SteeringWheelStatusStamped]: + """Return a list of `SteeringWheelStatusStamped`s. + + Returns: + List[SteeringWheelStatusStamped]: List of messages. + """ + return self._buffer["steering_wheel_status"] + + def get_turn_indicators_reports(self) -> List[TurnIndicatorsReport]: + """Return a list of `TurnIndicatorsReport`s. + + Returns: + List[TurnIndicatorsReport]: List of messages. + """ + return self._buffer["turn_indicators_status"] + + def get_hazard_lights_reports(self) -> List[HazardLightsReport]: + """Return a list of `HazardLightsReport`s + + Returns: + List[HazardLightsReport]: List of messages. + """ + return self._buffer["hazard_lights_status"] + + def get_velocity_reports(self) -> List[VelocityReport]: + """Return a list of `VelocityReport`s. + + Returns: + List[VelocityReport]: List of messages. + """ + return self._buffer["velocity_status"] diff --git a/perception_dataset/rosbag2/converter_params.py b/perception_dataset/rosbag2/converter_params.py index c1176522..031e58be 100644 --- a/perception_dataset/rosbag2/converter_params.py +++ b/perception_dataset/rosbag2/converter_params.py @@ -75,6 +75,10 @@ class Rosbag2ConverterParams(BaseModel): generate_frame_every: int = 1 # pick frames out of every this number. generate_frame_every_meter: float = 5.0 # pick frames when ego vehicle moves certain meters + # for Co-MLOps + with_ins: bool = False # whether to convert rosbag with INS topics + with_vehicle_status: bool = False # whether to convert rosbag with vehicle status + def __init__(self, **args): if "scene_description" in args and isinstance(args["scene_description"], list): args["scene_description"] = ", ".join(args["scene_description"]) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index c67262bc..9a17ce89 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -22,6 +22,8 @@ SENSOR_MODALITY_ENUM, T4_FORMAT_DIRECTORY_NAME, ) +from perception_dataset.ros2.oxts_msgs.ins_handler import INSHandler +from perception_dataset.ros2.vehicle_msgs.vehicle_status_handler import VehicleStatusHandler from perception_dataset.rosbag2.converter_params import DataType, Rosbag2ConverterParams from perception_dataset.rosbag2.rosbag2_reader import Rosbag2Reader from perception_dataset.t4_dataset.classes.abstract_class import AbstractTable @@ -37,6 +39,7 @@ from perception_dataset.t4_dataset.classes.sample_data import SampleDataRecord, SampleDataTable from perception_dataset.t4_dataset.classes.scene import SceneRecord, SceneTable from perception_dataset.t4_dataset.classes.sensor import SensorTable +from perception_dataset.t4_dataset.classes.vehicle_state import VehicleStateTable from perception_dataset.t4_dataset.classes.visibility import VisibilityTable from perception_dataset.utils.logger import configure_logger import perception_dataset.utils.misc as misc_utils @@ -157,10 +160,22 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: shutil.rmtree(self._output_scene_dir, ignore_errors=True) self._make_directories() + # NOTE: even if `with_world_frame_conversion=True`, `/tf` topic is not needed and + # it is retrieved from INS messages. with_world_frame_conversion = self._ego_pose_target_frame != self._ego_pose_source_frame - self._bag_reader = Rosbag2Reader(self._input_bag, with_world_frame_conversion) + is_tf_needed = with_world_frame_conversion and not params.with_ins + self._bag_reader = Rosbag2Reader(self._input_bag, is_tf_needed) self._calc_actual_num_load_frames() + # for Co-MLOps + self._with_ins = params.with_ins + self._with_vehicle_status = params.with_vehicle_status + + self._ins_handler = INSHandler(params.input_bag_path) if self._with_ins else None + self._vehicle_status_handler = ( + VehicleStatusHandler(params.input_bag_path) if self._with_vehicle_status else None + ) + def _calc_actual_num_load_frames(self): topic_names: List[str] = [s["topic"] for s in self._camera_sensors] if self._sensor_mode == SensorMode.DEFAULT: @@ -237,6 +252,9 @@ def _init_tables(self): self._attribute_table = AttributeTable(name_to_description={}, default_value="") self._visibility_table = VisibilityTable(level_to_description={}, default_value="") + # additional (used in Co-MLops) + self._vehicle_state_table = VehicleStateTable() + def convert(self): self._convert() self._save_tables() @@ -308,6 +326,9 @@ def _convert(self) -> None: self._connect_sample_data_in_scene(sensor_channel_to_sample_data_token_list) self._add_scene_description(self._scene_description) + if self._with_vehicle_status: + self._convert_vehicle_state() + def _calc_start_timestamp(self) -> float: if self._start_timestamp < sys.float_info.epsilon: start_timestamp = self._bag_reader.start_timestamp + self._skip_timestamp @@ -748,6 +769,12 @@ def get_move_distance(trans1: Dict[str, float], trans2: Dict[str, float]) -> flo return sample_data_token_list + def _convert_vehicle_state(self) -> None: + msgs = self._vehicle_status_handler.get_actuation_statuses() + stamps = [msg.header.stamp for msg in msgs] + for stamp in stamps: + self._generate_vehicle_state(stamp) + def _generate_image_data( self, image_arr: Union[np.ndarray, CompressedImage], @@ -797,28 +824,123 @@ def _generate_image_data( return sample_data_token def _generate_ego_pose(self, stamp: builtin_interfaces.msg.Time) -> str: - transform_stamped = self._bag_reader.get_transform_stamped( - target_frame=self._ego_pose_target_frame, - source_frame=self._ego_pose_source_frame, + if self._with_ins: + ego_state = self._ins_handler.get_ego_state(stamp=stamp) + geocoordinate = self._ins_handler.lookup_nav_sat_fixes(stamp) + + ego_pose_token = self._ego_pose_table.insert_into_table( + translation={ + "x": ego_state.translation.x, + "y": ego_state.translation.y, + "z": ego_state.translation.z, + }, + rotation={ + "w": ego_state.rotation.w, + "x": ego_state.rotation.x, + "y": ego_state.rotation.y, + "z": ego_state.rotation.z, + }, + timestamp=rosbag2_utils.stamp_to_nusc_timestamp(ego_state.header.stamp), + twist={ + "vx": ego_state.twist.linear.x, + "vy": ego_state.twist.linear.y, + "vz": ego_state.twist.linear.z, + "yaw_rate": ego_state.twist.angular.z, + "pitch_rate": ego_state.twist.angular.y, + "roll_rate": ego_state.twist.angular.x, + }, + acceleration={ + "ax": ego_state.accel.x, + "ay": ego_state.accel.y, + "az": ego_state.accel.z, + }, + geocoordinate={ + "latitude": geocoordinate.latitude, + "longitude": geocoordinate.longitude, + "altitude": geocoordinate.altitude, + }, + ) + else: + transform_stamped = self._bag_reader.get_transform_stamped( + target_frame=self._ego_pose_target_frame, + source_frame=self._ego_pose_source_frame, + stamp=stamp, + ) + + ego_pose_token = self._ego_pose_table.insert_into_table( + translation={ + "x": transform_stamped.transform.translation.x, + "y": transform_stamped.transform.translation.y, + "z": transform_stamped.transform.translation.z, + }, + rotation={ + "w": transform_stamped.transform.rotation.w, + "x": transform_stamped.transform.rotation.x, + "y": transform_stamped.transform.rotation.y, + "z": transform_stamped.transform.rotation.z, + }, + timestamp=rosbag2_utils.stamp_to_nusc_timestamp(transform_stamped.header.stamp), + ) + + return ego_pose_token + + def _generate_vehicle_state(self, stamp: builtin_interfaces.msg.Time) -> str: + nusc_timestamp = rosbag2_utils.stamp_to_nusc_timestamp(stamp) + + # TODO(ktro2828): Implement operation to insert vehicle state into table + actuation_msg = self._vehicle_status_handler.get_closest_msg( + key="actuation_status", stamp=stamp + ) + + # steering tire + steering_tire_msg = self._vehicle_status_handler.get_closest_msg( + key="steering_status", stamp=stamp, ) - ego_pose_token = self._ego_pose_table.insert_into_table( - translation={ - "x": transform_stamped.transform.translation.x, - "y": transform_stamped.transform.translation.y, - "z": transform_stamped.transform.translation.z, - }, - rotation={ - "w": transform_stamped.transform.rotation.w, - "x": transform_stamped.transform.rotation.x, - "y": transform_stamped.transform.rotation.y, - "z": transform_stamped.transform.rotation.z, - }, - timestamp=rosbag2_utils.stamp_to_nusc_timestamp(transform_stamped.header.stamp), + # steering wheel + steering_wheel_msg = self._vehicle_status_handler.get_closest_msg( + key="steering_wheel_status", + stamp=stamp, ) - return ego_pose_token + # gear -> shift + gear_msg = self._vehicle_status_handler.get_closest_msg(key="gear_status", stamp=stamp) + shift_state = self._vehicle_status_handler.gear_to_shift(gear_msg.report) + + # indicators + turn_indicators_msg = self._vehicle_status_handler.get_closest_msg( + key="turn_indicators_status", stamp=stamp + ) + indicators_state = self._vehicle_status_handler.indicator_to_state( + turn_indicators_msg.report + ) + + # additional info + # --- speed --- + velocity_report_msg = self._vehicle_status_handler.get_closest_msg( + key="velocity_status", stamp=stamp + ) + speed = np.linalg.norm( + [ + velocity_report_msg.longitudinal_velocity, + velocity_report_msg.lateral_velocity, + ] + ) + + vehicle_state_token = self._vehicle_state_table.insert_into_table( + timestamp=nusc_timestamp, + accel_pedal=actuation_msg.status.accel_status, + brake_pedal=actuation_msg.status.brake_status, + steer_pedal=actuation_msg.status.steer_status, + steering_tire_angle=steering_tire_msg.steering_tire_angle, + steering_wheel_angle=steering_wheel_msg.data, + shift_state=shift_state, + indicators=indicators_state, + additional_info={"speed": speed}, + ) + + return vehicle_state_token def _generate_calibrated_sensor( self, sensor_channel: str, start_timestamp: builtin_interfaces.msg.Time, topic_name="" @@ -955,3 +1077,12 @@ def _connect_sample_data_in_scene( cur_rec: SampleRecord = self._sample_data_table.select_record_from_token(cur_token) cur_rec.prev = prev_token self._sample_data_table.set_record_to_table(cur_rec) + prev_rec: SampleRecord = self._sample_data_table.select_record_from_token( + prev_token + ) + prev_rec.next = cur_token + self._sample_data_table.set_record_to_table(prev_rec) + + cur_rec: SampleRecord = self._sample_data_table.select_record_from_token(cur_token) + cur_rec.prev = prev_token + self._sample_data_table.set_record_to_table(cur_rec) diff --git a/perception_dataset/t4_dataset/classes/ego_pose.py b/perception_dataset/t4_dataset/classes/ego_pose.py index dc4a3806..fd0a90cb 100644 --- a/perception_dataset/t4_dataset/classes/ego_pose.py +++ b/perception_dataset/t4_dataset/classes/ego_pose.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, Optional from perception_dataset.constants import EXTENSION_ENUM from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable @@ -10,15 +10,28 @@ def __init__( translation: Dict[str, float], rotation: Dict[str, float], timestamp: int, + twist: Optional[Dict[str, float]], + acceleration: Optional[Dict[str, float]], + geocoordinate: Optional[Dict[str, float]], ): super().__init__() assert {"x", "y", "z"} == set(translation.keys()) assert {"w", "x", "y", "z"} == set(rotation.keys()) + if twist is not None: + assert {"vx", "vy", "vz", "yaw_rate", "pitch_rate", "roll_rate"} == set(twist.keys()) + if acceleration is not None: + assert {"ax", "ay", "az"} == set(acceleration.keys()) + if geocoordinate is not None: + assert {"latitude", "longitude", "altitude"} == set(geocoordinate.keys()) + self.translation: Dict[str, float] = translation self.rotation: Dict[str, float] = rotation self.timestamp: int = timestamp + self.twist: Optional[Dict[str, float]] = twist + self.acceleration: Optional[Dict[str, float]] = acceleration + self.geocoordinate = geocoordinate def to_dict(self) -> Dict[str, Any]: d = { @@ -35,6 +48,36 @@ def to_dict(self) -> Dict[str, Any]: self.rotation["z"], ], "timestamp": self.timestamp, + "twist": ( + [ + self.twist["vx"], + self.twist["vy"], + self.twist["vz"], + self.twist["yaw_rate"], + self.twist["pitch_rate"], + self.twist["roll_rate"], + ] + if self.twist is not None + else None + ), + "acceleration": ( + [ + self.acceleration["ax"], + self.acceleration["ay"], + self.acceleration["az"], + ] + if self.acceleration is not None + else None + ), + "geocoordinate": ( + [ + self.geocoordinate["latitude"], + self.geocoordinate["longitude"], + self.geocoordinate["altitude"], + ] + if self.geocoordinate is not None + else None + ), } return d diff --git a/perception_dataset/t4_dataset/classes/vehicle_state.py b/perception_dataset/t4_dataset/classes/vehicle_state.py new file mode 100644 index 00000000..07c9ae66 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/vehicle_state.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from typing import Any, Dict, Optional + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class VehicleStateRecord(AbstractRecord): + def __init__( + self, + timestamp: int, + accel_pedal: Optional[float], + brake_pedal: Optional[float], + steer_pedal: Optional[float], + steering_tire_angle: Optional[float], + steering_wheel_angle: Optional[float], + shift_state: Optional[str], + indicators: Optional[Dict[str, str]], + additional_info: Optional[Dict[str, Any]], + ): + super().__init__() + + if shift_state is not None: + assert shift_state in ( + "PARK", + "REVERSE", + "NEUTRAL", + "HIGH", + "FORWARD", + "LOW", + "NONE", + ), f"Got unexpected shift state: {shift_state}" + if indicators is not None: + assert {"left", "right", "hazard"} == set(indicators.keys()) + assert {"on", "off"} >= set(indicators.values()) + if additional_info is not None: + assert {"speed"} == set(additional_info.keys()) + + self.timestamp: int = timestamp + self.accel_pedal: Optional[float] = accel_pedal + self.brake_pedal: Optional[float] = brake_pedal + self.steer_pedal: Optional[float] = steer_pedal + self.steering_tire_angle: Optional[float] = steering_tire_angle + self.steering_wheel_angle: Optional[float] = steering_wheel_angle + self.shift_state: Optional[str] = shift_state + self.indicators: Optional[Dict[str, str]] = indicators + self.additional_info: Optional[Dict[str, Any]] = additional_info + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "timestamp": self.timestamp, + "accel_pedal": self.accel_pedal, + "brake_pedal": self.brake_pedal, + "steer_pedal": self.steer_pedal, + "steering_tire_angle": self.steering_tire_angle, + "steering_wheel_angle": self.steering_wheel_angle, + "shift_state": self.shift_state, + "indicators": self.indicators, + "additional_info": self.additional_info, + } + return d + + +class VehicleStateTable(AbstractTable[VehicleStateRecord]): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#vehicle_statejson""" + + FILENAME = "vehicle_state" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> VehicleStateRecord: + return VehicleStateRecord(**kwargs) From c660db63df0022f7c2927d9108c4989e707d0d23 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 30 Oct 2024 00:43:00 +0900 Subject: [PATCH 21/34] ci: update test depends Signed-off-by: ktro2828 --- .github/workflows/build-and-test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml index 2db907ba..ad168077 100644 --- a/.github/workflows/build-and-test.yaml +++ b/.github/workflows/build-and-test.yaml @@ -72,7 +72,7 @@ jobs: uses: autowarefoundation/autoware-github-actions/colcon-build@v1 with: rosdistro: ${{ matrix.rosdistro }} - target-packages: autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs + target-packages: autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs oxts_msgs vehicle_msgs build-depends-repos: build_depends.repos - name: Install dependencies @@ -80,7 +80,7 @@ jobs: apt update -yqq apt install -yqq curl python3-venv python-is-python3 \ libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py \ - ros-humble-radar-msgs + ros-humble-radar-msgs ros-${ROS_DISTRO}-tf-transformations - name: Check python version run: | From 3c6642f9dbbce2a0a2479ef64f41e2f7b8f49f53 Mon Sep 17 00:00:00 2001 From: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:56:59 +0900 Subject: [PATCH 22/34] Update perception_dataset/ros2/oxts_msgs/ins_handler.py Co-authored-by: Shunsuke Miura <37187849+miursh@users.noreply.github.com> --- perception_dataset/ros2/oxts_msgs/ins_handler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perception_dataset/ros2/oxts_msgs/ins_handler.py b/perception_dataset/ros2/oxts_msgs/ins_handler.py index 95ddc8d8..df9ddce0 100644 --- a/perception_dataset/ros2/oxts_msgs/ins_handler.py +++ b/perception_dataset/ros2/oxts_msgs/ins_handler.py @@ -264,9 +264,9 @@ def interpolate_state( twist_msg.linear.x = twist[0] twist_msg.linear.y = twist[1] twist_msg.linear.z = twist[2] - twist_msg.angular.x = twist[0] - twist_msg.angular.y = twist[1] - twist_msg.angular.z = twist[2] + twist_msg.angular.x = twist[3] + twist_msg.angular.y = twist[4] + twist_msg.angular.z = twist[5] ego_state = EgoState( header=header, From a093a78743d36c965728929887a0cc3e9a81d4d0 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Tue, 12 Nov 2024 19:21:46 +0900 Subject: [PATCH 23/34] feat: update acceleration coordinate system in base_link Signed-off-by: ktro2828 --- .../ros2/oxts_msgs/ins_handler.py | 58 +++++++++---------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/perception_dataset/ros2/oxts_msgs/ins_handler.py b/perception_dataset/ros2/oxts_msgs/ins_handler.py index df9ddce0..59eed1d3 100644 --- a/perception_dataset/ros2/oxts_msgs/ins_handler.py +++ b/perception_dataset/ros2/oxts_msgs/ins_handler.py @@ -9,11 +9,11 @@ from geometry_msgs.msg import Quaternion, Twist, Vector3 from nav_msgs.msg import Odometry import numpy as np +from pyquaternion import Quaternion as PyQuaternion from scipy.interpolate import interp1d from scipy.spatial.transform import Rotation, Slerp from sensor_msgs.msg import Imu, NavSatFix from std_msgs.msg import Header -import tf_transformations as tf from perception_dataset.rosbag2.rosbag2_reader import Rosbag2Reader from perception_dataset.utils.rosbag2 import stamp_to_unix_timestamp, unix_timestamp_to_stamp @@ -21,10 +21,18 @@ @dataclass class EgoState: - """A dataclass to represent a set of ego status.""" + """A dataclass to represent a set of ego status. + + Attributes: + header (Header): Message header. + translation (Vector3): 3D position in global coordinates. + rotation (Quaternion): Quaternion in global coordinates. + twist (Twist): Twist in local coordinates. + Linear velocities are in (m/s), and angular velocities in (rad/s). + accel (Vector3): Acceleration in local coordinates in (m/s2). + """ header: Header - child_frame_id: str translation: Vector3 rotation: Quaternion twist: Twist @@ -270,7 +278,6 @@ def interpolate_state( ego_state = EgoState( header=header, - child_frame_id="base_link", translation=Vector3( x=translation[0], y=translation[1], @@ -363,40 +370,29 @@ def _localize_with_odometry(self) -> None: current_pose.orientation.w, ] - cur_matrix = tf.compose_matrix( - translate=current_translation, - angles=tf.euler_from_quaternion(current_rotation), - ) - + # acceleration from imu imu: Imu = self.get_closest_msg(key="imu", stamp=odometry.header.stamp) - # NOTE: rotate acceleration from IMU coords to map coords - b_H_m = cur_matrix # base_link -> map - i_H_b = tf.compose_matrix( - angles=tf.euler_from_quaternion( - [ - imu.orientation.x, - imu.orientation.y, - imu.orientation.z, - imu.orientation.w, - ] - ) - ) # imu -> base_link - - i_H_m = tf.concatenate_matrices(i_H_b, b_H_m) # imu -> map - # rotate acceleration - relative_acceleration = np.matmul( - i_H_m[:3, :3], + # rotate acceleration from imu to base_link + b_Q_i = PyQuaternion( + [ + imu.orientation.w, + imu.orientation.x, + imu.orientation.y, + imu.orientation.z, + ] + ) # base_link -> imu + + current_acceleration = b_Q_i.inverse.rotate( [ imu.linear_acceleration.x, imu.linear_acceleration.y, imu.linear_acceleration.z, - ], + ] ) ego_state = EgoState( header=odometry.header, - child_frame_id="base_link", translation=Vector3( x=current_translation[0], y=current_translation[1], @@ -410,9 +406,9 @@ def _localize_with_odometry(self) -> None: ), twist=odometry.twist.twist, accel=Vector3( - x=relative_acceleration[0], - y=relative_acceleration[1], - z=relative_acceleration[2], + x=current_acceleration[0], + y=current_acceleration[1], + z=current_acceleration[2], ), ) From 8eb9eec61012f5a2698f3dd6c415fec5da16f96a Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 13 Nov 2024 15:38:38 +0900 Subject: [PATCH 24/34] feat: update handling of velocity/omega/acceleration Signed-off-by: ktro2828 --- .../ros2/oxts_msgs/ins_handler.py | 29 ++++++------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/perception_dataset/ros2/oxts_msgs/ins_handler.py b/perception_dataset/ros2/oxts_msgs/ins_handler.py index 59eed1d3..ce1a478b 100644 --- a/perception_dataset/ros2/oxts_msgs/ins_handler.py +++ b/perception_dataset/ros2/oxts_msgs/ins_handler.py @@ -9,7 +9,6 @@ from geometry_msgs.msg import Quaternion, Twist, Vector3 from nav_msgs.msg import Odometry import numpy as np -from pyquaternion import Quaternion as PyQuaternion from scipy.interpolate import interp1d from scipy.spatial.transform import Rotation, Slerp from sensor_msgs.msg import Imu, NavSatFix @@ -370,26 +369,16 @@ def _localize_with_odometry(self) -> None: current_pose.orientation.w, ] + current_twist = odometry.twist.twist + # acceleration from imu + # TODO: update with imu bias imu: Imu = self.get_closest_msg(key="imu", stamp=odometry.header.stamp) - - # rotate acceleration from imu to base_link - b_Q_i = PyQuaternion( - [ - imu.orientation.w, - imu.orientation.x, - imu.orientation.y, - imu.orientation.z, - ] - ) # base_link -> imu - - current_acceleration = b_Q_i.inverse.rotate( - [ - imu.linear_acceleration.x, - imu.linear_acceleration.y, - imu.linear_acceleration.z, - ] - ) + current_acceleration = [ + imu.linear_acceleration.x, + imu.linear_acceleration.y, + imu.linear_acceleration.z, + ] ego_state = EgoState( header=odometry.header, @@ -404,7 +393,7 @@ def _localize_with_odometry(self) -> None: z=current_rotation[2], w=current_rotation[3], ), - twist=odometry.twist.twist, + twist=current_twist, accel=Vector3( x=current_acceleration[0], y=current_acceleration[1], From 2e8b0f4e40feea6910eb46277c76f1d04f5a16f7 Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 13 Nov 2024 15:41:08 +0900 Subject: [PATCH 25/34] chore: remove unused tf-transformations Signed-off-by: ktro2828 --- .github/workflows/build-and-test.yaml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml index ad168077..a6ba311f 100644 --- a/.github/workflows/build-and-test.yaml +++ b/.github/workflows/build-and-test.yaml @@ -80,7 +80,7 @@ jobs: apt update -yqq apt install -yqq curl python3-venv python-is-python3 \ libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py \ - ros-humble-radar-msgs ros-${ROS_DISTRO}-tf-transformations + ros-humble-radar-msgs - name: Check python version run: | diff --git a/README.md b/README.md index 2ea3522d..cc452aed 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Install and build ros dependencies (this step must be outside of poetry virtuale ```bash source /opt/ros/${ROS_DISTRO}/setup.sh -sudo apt install -y ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap ros-${ROS_DISTRO}-radar-msgs ros-${ROS_DISTRO}-tf-transformations +sudo apt install -y ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap ros-${ROS_DISTRO}-radar-msgs mkdir src -p && vcs import src < build_depends.repos colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release --packages-up-to autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs oxts_msgs vehicle_msgs From a79b5c39a50a693f67540a849597eb5092543c0a Mon Sep 17 00:00:00 2001 From: ktro2828 Date: Wed, 13 Nov 2024 16:05:49 +0900 Subject: [PATCH 26/34] ci: update to install pacmod3-msgs Signed-off-by: ktro2828 --- .github/workflows/build-and-test.yaml | 9 +-------- build_depends.repos | 4 ++++ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml index a6ba311f..c0444434 100644 --- a/.github/workflows/build-and-test.yaml +++ b/.github/workflows/build-and-test.yaml @@ -52,7 +52,7 @@ jobs: apt update -yqq apt install -yqq curl python3-venv python-is-python3 \ libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap \ - unzip tree + ros-${ROS_DISTRO}-radar-msgs unzip tree - name: Download test data uses: actions/download-artifact@v3 @@ -75,13 +75,6 @@ jobs: target-packages: autoware_auto_perception_msgs autoware_perception_msgs tier4_perception_msgs oxts_msgs vehicle_msgs build-depends-repos: build_depends.repos - - name: Install dependencies - run: | - apt update -yqq - apt install -yqq curl python3-venv python-is-python3 \ - libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py \ - ros-humble-radar-msgs - - name: Check python version run: | python -V diff --git a/build_depends.repos b/build_depends.repos index 359b3159..6d5ce7db 100644 --- a/build_depends.repos +++ b/build_depends.repos @@ -23,6 +23,10 @@ repositories: type: git url: https://github.com/tier4/oxts_ros2_driver.git version: feat/data_recording_system + pacmod3_msgs: + type: git + url: https://github.com/astuff/pacmod3_msgs.git + version: 1.0.0 pacmod_interface: type: git url: https://github.com/tier4/pacmod_interface.git From eea021ddbd4bdbb252f30b2c881122068c0ea5b6 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 19 Nov 2024 23:10:11 +0900 Subject: [PATCH 27/34] [tantative] add angular velocity conversion patch Signed-off-by: Shunsuke Miura --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index f5e62490..6e7219f8 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -888,9 +888,9 @@ def _generate_ego_pose(self, stamp: builtin_interfaces.msg.Time) -> str: "vx": ego_state.twist.linear.x, "vy": ego_state.twist.linear.y, "vz": ego_state.twist.linear.z, - "yaw_rate": ego_state.twist.angular.z, - "pitch_rate": ego_state.twist.angular.y, - "roll_rate": ego_state.twist.angular.x, + "yaw_rate": -ego_state.twist.angular.z / 180 * np.pi, + "pitch_rate": ego_state.twist.angular.y / 180 * np.pi, + "roll_rate": ego_state.twist.angular.x / 180 * np.pi, }, acceleration={ "ax": ego_state.accel.x, From b4aa1865d6f13fd3828977186af651d1549c7f71 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 19 Nov 2024 23:53:20 +0900 Subject: [PATCH 28/34] update config for DRS conversion Signed-off-by: Shunsuke Miura --- config/convert_rosbag2_to_non_annotated_t4_drs.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml index b59072d3..1347bc70 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -4,12 +4,14 @@ description: conversion: input_base: ./data/rosbag2 output_base: ./data/non_annotated_t4_format - world_frame_id: "base_link" + world_frame_id: "map" start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. skip_timestamp: 0.5 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. - accept_frame_drop: True # If true, the conversion will continue even if the LiDAR frame is dropped. - undistort_image: True # If true, the camera image will be undistorted. + accept_frame_drop: true # If true, the conversion will continue even if the LiDAR frame is dropped. + undistort_image: true # If true, the camera image will be undistorted. + with_ins: true # whether to use INS messages as a ego state instead of `/tf` + with_vehicle_status: false # whether to generate `vehicle_state.json` # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. lidar_sensor: topic: /sensing/lidar/concatenated/pointcloud From 6ba58e8fe9d391cf086fbd44227fc183229fd584 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Wed, 20 Nov 2024 00:08:13 +0900 Subject: [PATCH 29/34] fix: bug in pcd conversion Signed-off-by: Shunsuke Miura --- perception_dataset/utils/rosbag2.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/perception_dataset/utils/rosbag2.py b/perception_dataset/utils/rosbag2.py index 7cabe27b..1e8ed149 100644 --- a/perception_dataset/utils/rosbag2.py +++ b/perception_dataset/utils/rosbag2.py @@ -170,11 +170,13 @@ def pointcloud_msg_to_numpy(pointcloud_msg: PointCloud2) -> NDArray: # Extract the x, y, z coordinates and additional fields if available xyz = points["xyz"] + points_arr = xyz if "intensity" in points.keys(): intensity = points["intensity"].astype(np.float32) - points_arr = np.hstack((xyz, intensity)) - else: - points_arr = xyz + points_arr = np.hstack((points_arr, intensity)) + if "lidar_index" in points.keys(): + lidar_index = points["lidar_index"].astype(np.float32) + points_arr = np.hstack((points_arr, lidar_index)) # Ensure the resulting array has exactly NUM_DIMENSIONS columns if points_arr.shape[1] > NUM_DIMENSIONS: From 995767ffdfea7ef56ab19d413cd285b03bddaaba Mon Sep 17 00:00:00 2001 From: Shunsuke Miura <37187849+miursh@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:46:40 +0900 Subject: [PATCH 30/34] Update convert_rosbag2_to_non_annotated_t4_drs.yaml --- config/convert_rosbag2_to_non_annotated_t4_drs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml index 1347bc70..e6e1b3c7 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_drs.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -4,7 +4,7 @@ description: conversion: input_base: ./data/rosbag2 output_base: ./data/non_annotated_t4_format - world_frame_id: "map" + world_frame_id: "base_link" start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. skip_timestamp: 0.5 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. From 59881b9cd9e79158ab308e145c40c921af099ba1 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 10 Dec 2024 02:53:01 +0900 Subject: [PATCH 31/34] Revert "[tantative] add angular velocity conversion patch" This reverts commit eea021ddbd4bdbb252f30b2c881122068c0ea5b6. --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index c43058ca..4115c79a 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -891,9 +891,9 @@ def _generate_ego_pose(self, stamp: builtin_interfaces.msg.Time) -> str: "vx": ego_state.twist.linear.x, "vy": ego_state.twist.linear.y, "vz": ego_state.twist.linear.z, - "yaw_rate": -ego_state.twist.angular.z / 180 * np.pi, - "pitch_rate": ego_state.twist.angular.y / 180 * np.pi, - "roll_rate": ego_state.twist.angular.x / 180 * np.pi, + "yaw_rate": ego_state.twist.angular.z, + "pitch_rate": ego_state.twist.angular.y, + "roll_rate": ego_state.twist.angular.x, }, acceleration={ "ax": ego_state.accel.x, From 641e81e2bb5928fdd1db757cbbb040bd8497bfb0 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Tue, 10 Dec 2024 02:53:06 +0900 Subject: [PATCH 32/34] Revert "[tentative] add camera info patch" This reverts commit d45d573f1feffc8ca1c486565fc03f4da7bbeb31. --- perception_dataset/rosbag2/rosbag2_reader.py | 49 -------------------- 1 file changed, 49 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_reader.py b/perception_dataset/rosbag2/rosbag2_reader.py index 898a4d3e..863839f0 100644 --- a/perception_dataset/rosbag2/rosbag2_reader.py +++ b/perception_dataset/rosbag2/rosbag2_reader.py @@ -9,7 +9,6 @@ from rclpy.time import Time from rosbag2_py import StorageFilter from rosidl_runtime_py.utilities import get_message -from sensor_msgs.msg import CameraInfo import tf2_ros from perception_dataset.utils.rosbag2 import create_reader, get_topic_count, get_topic_type_dict @@ -108,54 +107,6 @@ def _set_camera_info(self): return self.camera_info[topic_name] = message - import numpy as np - - for i in range(8): - topic_name = f"/sensing/camera/camera{i}/camera_info" - camera_info_msg = CameraInfo() - camera_info_msg.header.frame_id = f"camera{i}/camera_optical_link" - camera_info_msg.header.stamp = builtin_interfaces.msg.Time( - sec=int(self.start_timestamp + 1), nanosec=0 - ) - camera_info_msg.distortion_model = "plumb_bob" - camera_info_msg.r = np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]) - camera_info_msg.width = 2880 - camera_info_msg.height = 1860 - if "camera0" in topic_name: - camera_info_msg.k = np.array([5368.25873, 0.0, 1412.70938, 0.0, 5364.46693, 958.59729, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([5305.15088, 0.0, 1412.64275, 0.0, 0.0, 5342.61084, 958.70113, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [-0.08849, -0.90255, 0.00075, 0.00111, 0.0] - elif "camera1" in topic_name: - camera_info_msg.k = np.array([1496.73395, 0.0, 1424.70018, 0.0, 1497.10726, 945.6712, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([1015.1003418, 0.0, 1466.52248505, 0.0, 0.0, 1284.54455566, 950.87123341, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [-0.08989, -0.1186, -0.00016, -0.00007, 0.0067, 0.30995, -0.24648, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - elif "camera2" in topic_name: - camera_info_msg.k = np.array([1502.98471, 0.0, 1422.35349, 0.0, 1504.5042, 931.99575, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([878.42378, 0.0, 1402.49031, 0.0, 0.0, 1258.01633, 933.10245, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [0.32864, -0.03183, 2e-05, 0.0002, 0.00267, 0.73261, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - elif "camera3" in topic_name: - camera_info_msg.k = np.array([1500.05060, 0.00000, 1430.78876, 0.00000, 1499.95752, 940.95613, 0.00000, 0.00000, 1.00000]) - camera_info_msg.p = np.array([877.863525, 0.00000000, 1418.95998, 0.00000000, 0.0, 1254.34375, 945.262686, 0.00000000, 0.00000000, 0.0, 1.00000000, 0.0]) - camera_info_msg.d = [0.27430142, -0.02073177, 0.00007407, 0.00008116, 0.00128976, 0.67045534, 0.00000000, 0.00000000, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - elif "camera4" in topic_name: - camera_info_msg.k = np.array([5363.43736, 0.0, 1507.51202, 0.0, 5341.55785, 1076.26984, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([5296.04052734, 0.0, 1511.62903545, 0.0, 0.0, 5311.76367188, 1077.67061308, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [-0.12858, -0.44056, 0.00123, 0.00215, 0.0] - elif "camera5" in topic_name: - camera_info_msg.k = np.array([1500.35853, 0.0, 1419.21658, 0.0, 1501.15968, 936.4509, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([871.67853, 0.0, 1390.37965, 0.0, 0.0, 1251.62366, 939.62595, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [3.49528, 0.71004, 0.00028, 0.0001, -0.03621, 3.91361, 1.98308, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - elif "camera6" in topic_name: - camera_info_msg.k = np.array([1543.88646, 0.0, 1455.51234, 0.0, 1542.117, 955.83491, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([940.59991455, 0.0, 1472.20666395, 0.0, 0.0, 1302.85144043, 965.17800362, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [0.45661, -0.00186, -0.00003, -0.00015, 0.00153, 0.85654, 0.08203, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - elif "camera7" in topic_name: - camera_info_msg.k = np.array([1493.89305, 0.0, 1434.05368, 0.0, 1494.11047, 938.13478, 0.0, 0.0, 1.0]) - camera_info_msg.p = np.array([870.17737, 0.0, 1421.48751, 0.0, 0.0, 1247.0332, 940.93758, 0.0, 0.0, 0.0, 1.0, 0.0]) - camera_info_msg.d = [0.45661, -0.00186, -0.00003, -0.00015, 0.00153, 0.85654, 0.08203, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - - self.camera_info[topic_name] = camera_info_msg - def get_topic_count(self, topic_name: str) -> int: return self._topic_name_to_topic_count.get(topic_name, 0) From 73ee7b277cb1fd5826d01a101846cb7292af0414 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura <37187849+miursh@users.noreply.github.com> Date: Tue, 10 Dec 2024 23:58:07 +0900 Subject: [PATCH 33/34] Apply suggestions from code review Co-authored-by: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> --- .../rosbag2/rosbag2_to_non_annotated_t4_converter.py | 3 --- perception_dataset/utils/rosbag2.py | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 29df6c0c..67ff9478 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -351,9 +351,6 @@ def _convert(self) -> None: if self._with_vehicle_status: self._convert_vehicle_state() - if self._with_vehicle_status: - self._convert_vehicle_state() - def _calc_start_timestamp(self) -> float: if self._start_timestamp < sys.float_info.epsilon: start_timestamp = self._bag_reader.start_timestamp + self._skip_timestamp diff --git a/perception_dataset/utils/rosbag2.py b/perception_dataset/utils/rosbag2.py index 1e8ed149..a98fc8fc 100644 --- a/perception_dataset/utils/rosbag2.py +++ b/perception_dataset/utils/rosbag2.py @@ -169,8 +169,7 @@ def pointcloud_msg_to_numpy(pointcloud_msg: PointCloud2) -> NDArray: points = point_cloud2_to_array(pointcloud_msg) # Extract the x, y, z coordinates and additional fields if available - xyz = points["xyz"] - points_arr = xyz + points_arr = points["xyz"] if "intensity" in points.keys(): intensity = points["intensity"].astype(np.float32) points_arr = np.hstack((points_arr, intensity)) From 7c485e40399311c84cdd338fdc617d8165914fcd Mon Sep 17 00:00:00 2001 From: Shunsuke Miura Date: Wed, 11 Dec 2024 00:01:29 +0900 Subject: [PATCH 34/34] remove sample config yaml for Co-MLOps dataset Signed-off-by: Shunsuke Miura --- ...ps_rosbag2_to_non_annotated_t4_sample.yaml | 36 ------------------- 1 file changed, 36 deletions(-) delete mode 100644 config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml diff --git a/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml deleted file mode 100644 index f25c81fc..00000000 --- a/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml +++ /dev/null @@ -1,36 +0,0 @@ -task: convert_rosbag2_to_non_annotated_t4 -description: - scene: "" -conversion: - input_base: ./data/rosbag2 - output_base: ./data/non_annotated_t4_format - world_frame_id: "map" - start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. - skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. - num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. - accept_frame_drop: False # If true, the conversion will continue even if the LiDAR frame is dropped. - with_ins: true # whether to use INS messages as a ego state instead of `/tf` - with_vehicle_status: true # whether to generate `vehicle_state.json` - # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. - lidar_sensor: - topic: /sensing/lidar/concatenated/pointcloud - channel: LIDAR_CONCAT - camera_sensors: # Keep the same order as each camera exposure timing - - topic: /sensing/camera/camera3/image_raw/compressed - channel: CAM_BACK_LEFT - delay_msec: 48.33 - - topic: /sensing/camera/camera2/image_raw/compressed - channel: CAM_FRONT_LEFT - delay_msec: 65.0 - - topic: /sensing/camera/camera0/image_raw/compressed - channel: CAM_FRONT - delay_msec: 81.67 - - topic: /sensing/camera/camera4/image_raw/compressed - channel: CAM_FRONT_RIGHT - delay_msec: 98.33 - - topic: /sensing/camera/camera5/image_raw/compressed - channel: CAM_BACK_RIGHT - delay_msec: 115.0 - - topic: /sensing/camera/camera1/image_raw/compressed - channel: CAM_BACK - delay_msec: 131.67