From 212647a961132270774c1c0e5c144dfeb065e04b Mon Sep 17 00:00:00 2001 From: Shunsuke Miura <37187849+miursh@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:15:56 +0900 Subject: [PATCH] feat: drs conversion (#177) * Output topic drop error during initialization Signed-off-by: Shunsuke Miura * Avoid out-of-bounds access in get_lidar_camera_synced_frame_info Signed-off-by: Shunsuke Miura * Enhance image data handling to support CompressedImage format - Modify _generate_image_data to accept both np.ndarray and CompressedImage types - Add image_shape parameter to properly set image dimensions - Ensure correct saving of CompressedImage data Signed-off-by: Shunsuke Miura * add RX2 constants Signed-off-by: Shunsuke Miura * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat(rosbag2_to_non_annotated_t4): support image undistortion option Signed-off-by: Shunsuke Miura * feat(rosbag2_to_non_annotated_t4): add DRS related constants/config Signed-off-by: Shunsuke Miura * raise error when the camera_info is not found Signed-off-by: Shunsuke Miura * overwrite distortion coefficient when applying undistortion Signed-off-by: Shunsuke Miura * fix config, set jpeg_quality to 100 Signed-off-by: Shunsuke Miura * update config file Signed-off-by: Shunsuke Miura * modity overwrite logic for rosbag2_to_non_annotated_t4 converter Signed-off-by: Shunsuke Miura * Add elapsed time logging and increase message display interval Signed-off-by: Shunsuke Miura * add error handling, add print Signed-off-by: Shunsuke Miura * add error handling Signed-off-by: Shunsuke Miura * update drs config file Signed-off-by: Shunsuke Miura * add output when finish conversion Signed-off-by: Shunsuke Miura * add output when finish conversion Signed-off-by: Shunsuke Miura * [tentative] add camera info patch Signed-off-by: Shunsuke Miura * feat: update record classes for CoMLOPs format Signed-off-by: ktro2828 * ci: update test depends Signed-off-by: ktro2828 * Update perception_dataset/ros2/oxts_msgs/ins_handler.py Co-authored-by: Shunsuke Miura <37187849+miursh@users.noreply.github.com> * feat: update acceleration coordinate system in base_link Signed-off-by: ktro2828 * feat: update handling of velocity/omega/acceleration Signed-off-by: ktro2828 * chore: remove unused tf-transformations Signed-off-by: ktro2828 * ci: update to install pacmod3-msgs Signed-off-by: ktro2828 * [tantative] add angular velocity conversion patch Signed-off-by: Shunsuke Miura * update config for DRS conversion Signed-off-by: Shunsuke Miura * fix: bug in pcd conversion Signed-off-by: Shunsuke Miura * Update convert_rosbag2_to_non_annotated_t4_drs.yaml * Revert "[tantative] add angular velocity conversion patch" This reverts commit eea021ddbd4bdbb252f30b2c881122068c0ea5b6. * Revert "[tentative] add camera info patch" This reverts commit d45d573f1feffc8ca1c486565fc03f4da7bbeb31. * Apply suggestions from code review Co-authored-by: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> * remove sample config yaml for Co-MLOps dataset Signed-off-by: Shunsuke Miura --------- Signed-off-by: Shunsuke Miura Signed-off-by: ktro2828 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: ktro2828 Co-authored-by: Kotaro Uetake <60615504+ktro2828@users.noreply.github.com> --- ...vert_rosbag2_to_non_annotated_t4_drs.yaml} | 45 +++++++++++-------- perception_dataset/constants.py | 8 ++++ .../rosbag2_to_non_annotated_t4_converter.py | 42 +++++++++++++---- perception_dataset/utils/rosbag2.py | 9 ++-- 4 files changed, 73 insertions(+), 31 deletions(-) rename config/{convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml => convert_rosbag2_to_non_annotated_t4_drs.yaml} (62%) diff --git a/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml similarity index 62% rename from config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml rename to config/convert_rosbag2_to_non_annotated_t4_drs.yaml index f25c81fc..e6e1b3c7 100644 --- a/config/convert_comlops_rosbag2_to_non_annotated_t4_sample.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_drs.yaml @@ -4,33 +4,40 @@ description: conversion: input_base: ./data/rosbag2 output_base: ./data/non_annotated_t4_format - world_frame_id: "map" + world_frame_id: "base_link" start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. - skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. + skip_timestamp: 0.5 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. - accept_frame_drop: False # If true, the conversion will continue even if the LiDAR frame is dropped. + accept_frame_drop: true # If true, the conversion will continue even if the LiDAR frame is dropped. + undistort_image: true # If true, the camera image will be undistorted. with_ins: true # whether to use INS messages as a ego state instead of `/tf` - with_vehicle_status: true # whether to generate `vehicle_state.json` + with_vehicle_status: false # whether to generate `vehicle_state.json` # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. lidar_sensor: topic: /sensing/lidar/concatenated/pointcloud channel: LIDAR_CONCAT camera_sensors: # Keep the same order as each camera exposure timing - - topic: /sensing/camera/camera3/image_raw/compressed - channel: CAM_BACK_LEFT - delay_msec: 48.33 - - topic: /sensing/camera/camera2/image_raw/compressed - channel: CAM_FRONT_LEFT - delay_msec: 65.0 - topic: /sensing/camera/camera0/image_raw/compressed - channel: CAM_FRONT - delay_msec: 81.67 - - topic: /sensing/camera/camera4/image_raw/compressed + channel: CAM_FRONT_NARROW + delay_msec: 16.0 + - topic: /sensing/camera/camera1/image_raw/compressed + channel: CAM_FRONT_WIDE + delay_msec: 16.0 + - topic: /sensing/camera/camera2/image_raw/compressed channel: CAM_FRONT_RIGHT - delay_msec: 98.33 - - topic: /sensing/camera/camera5/image_raw/compressed + delay_msec: -9.0 + - topic: /sensing/camera/camera3/image_raw/compressed channel: CAM_BACK_RIGHT - delay_msec: 115.0 - - topic: /sensing/camera/camera1/image_raw/compressed - channel: CAM_BACK - delay_msec: 131.67 + delay_msec: -59.0 + - topic: /sensing/camera/camera4/image_raw/compressed + channel: CAM_BACK_NARROW + delay_msec: -34.0 + - topic: /sensing/camera/camera5/image_raw/compressed + channel: CAM_BACK_WIDE + delay_msec: -84.0 + - topic: /sensing/camera/camera6/image_raw/compressed + channel: CAM_BACK_LEFT + delay_msec: -59.0 + - topic: /sensing/camera/camera7/image_raw/compressed + channel: CAM_FRONT_LEFT + delay_msec: -59.0 diff --git a/perception_dataset/constants.py b/perception_dataset/constants.py index 7527623c..0ec83229 100644 --- a/perception_dataset/constants.py +++ b/perception_dataset/constants.py @@ -43,6 +43,14 @@ class SENSOR_ENUM(Enum): "channel": "CAM_BACK", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, } + CAM_BACK_NARROW = { + "channel": "CAM_BACK_NARROW", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_BACK_WIDE = { + "channel": "CAM_BACK_WIDE", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } CAM_FRONT_LEFT = { "channel": "CAM_FRONT_LEFT", "modality": SENSOR_MODALITY_ENUM.CAMERA.value, diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index ac9f6a01..67ff9478 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -5,6 +5,7 @@ import os.path as osp import shutil import sys +import time from typing import Dict, List, Optional, Tuple, Union import warnings @@ -81,21 +82,31 @@ def convert(self): if not self._overwrite_mode: dir_exist: bool = False - for bag_dir in bag_dirs: + for bag_dir in bag_dirs[:]: # copy to avoid modifying list while iterating bag_name: str = osp.basename(bag_dir) output_dir = osp.join(self._output_base, bag_name) if osp.exists(output_dir): logger.error(f"{output_dir} already exists.") dir_exist = True - - if dir_exist: + bag_dirs.remove(bag_dir) + if dir_exist and len(bag_dirs) == 0: + logger.error(f"{output_dir} already exists.") raise ValueError("If you want to overwrite files, use --overwrite option.") - for bag_dir in bag_dirs: + for bag_dir in sorted(bag_dirs): + logger.info(f"Start converting {bag_dir} to T4 format.") self._params.input_bag_path = bag_dir - bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) - bag_converter.convert() + try: + bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) + bag_converter.convert() + except Exception as e: + logger.error(f"Error occurred during conversion: {e}") + continue + logger.info(f"Conversion of {bag_dir} is completed") + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) class _Rosbag2ToNonAnnotatedT4Converter: @@ -157,7 +168,7 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: self._output_data_dir = osp.join( self._output_scene_dir, T4_FORMAT_DIRECTORY_NAME.DATA.value ) - self._msg_display_interval = 10 + self._msg_display_interval = 100 shutil.rmtree(self._output_scene_dir, ignore_errors=True) self._make_directories() @@ -259,16 +270,23 @@ def _init_tables(self): def convert(self): self._convert() + self._save_tables() self._save_config() if not self._without_compress: self._compress_directory() def _save_tables(self): + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) for cls_attr in self.__dict__.values(): if isinstance(cls_attr, AbstractTable): print(f"{cls_attr.FILENAME}: #rows {len(cls_attr)}") cls_attr.save_json(self._output_anno_dir) + print( + "--------------------------------------------------------------------------------------------------------------------------" + ) def _save_config(self): config_data = { @@ -318,6 +336,7 @@ def _convert(self) -> None: - is_key_frame=True - fill in next/prev """ + start = time.time() sensor_channel_to_sample_data_token_list: Dict[str, List[str]] = {} self._init_tables() @@ -327,6 +346,7 @@ def _convert(self) -> None: self._connect_sample_in_scene() self._connect_sample_data_in_scene(sensor_channel_to_sample_data_token_list) self._add_scene_description(self._scene_description) + print(f"Total elapsed time: {time.time() - start:.2f} sec") if self._with_vehicle_status: self._convert_vehicle_state() @@ -348,6 +368,7 @@ def _convert_sensor_data( logger.info(f"set start_timestamp to {start_timestamp}") if self._sensor_mode == SensorMode.DEFAULT: + start = time.time() lidar_sensor_channel = self._lidar_sensor["channel"] sensor_channel_to_sample_data_token_list[lidar_sensor_channel] = ( self._convert_pointcloud( @@ -357,7 +378,7 @@ def _convert_sensor_data( scene_token=scene_token, ) ) - + print(f"LiDAR conversion. total elapsed time: {time.time() - start:.2f} sec\n") for radar_sensor in self._radar_sensors: radar_sensor_channel = radar_sensor["channel"] sensor_channel_to_sample_data_token_list[radar_sensor_channel] = ( @@ -382,6 +403,7 @@ def _convert_sensor_data( ) for camera_sensor in self._camera_sensors: + start = time.time() sensor_channel = camera_sensor["channel"] sensor_channel_to_sample_data_token_list[sensor_channel] = self._convert_image( start_timestamp=camera_start_timestamp, @@ -400,6 +422,9 @@ def _convert_sensor_data( camera_start_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( first_sample_data_record.timestamp ) + print( + f"camera {camera_sensor['channel']} conversion. total elapsed time: {time.time() - start:.2f} sec\n" + ) def _convert_static_data(self): # Log, Map @@ -968,6 +993,7 @@ def _generate_calibrated_sensor( self, sensor_channel: str, start_timestamp: builtin_interfaces.msg.Time, topic_name="" ) -> Union[str, Tuple[str, CameraInfo]]: calibrated_sensor_token = str() + camera_info = None for sensor_enum in self._sensor_enums: channel = sensor_enum.value["channel"] modality = sensor_enum.value["modality"] diff --git a/perception_dataset/utils/rosbag2.py b/perception_dataset/utils/rosbag2.py index 7cabe27b..a98fc8fc 100644 --- a/perception_dataset/utils/rosbag2.py +++ b/perception_dataset/utils/rosbag2.py @@ -169,12 +169,13 @@ def pointcloud_msg_to_numpy(pointcloud_msg: PointCloud2) -> NDArray: points = point_cloud2_to_array(pointcloud_msg) # Extract the x, y, z coordinates and additional fields if available - xyz = points["xyz"] + points_arr = points["xyz"] if "intensity" in points.keys(): intensity = points["intensity"].astype(np.float32) - points_arr = np.hstack((xyz, intensity)) - else: - points_arr = xyz + points_arr = np.hstack((points_arr, intensity)) + if "lidar_index" in points.keys(): + lidar_index = points["lidar_index"].astype(np.float32) + points_arr = np.hstack((points_arr, lidar_index)) # Ensure the resulting array has exactly NUM_DIMENSIONS columns if points_arr.shape[1] > NUM_DIMENSIONS: