From 188e3b672a1f29942ead405c56aafed7eee9665d Mon Sep 17 00:00:00 2001 From: kminoda <44218668+kminoda@users.noreply.github.com> Date: Sat, 23 Dec 2023 17:21:19 +0900 Subject: [PATCH 1/2] feat(rosbag_to_non_annotated_t4): support lidar drop (#58) * feat: add lidar point filtering (still not working for image fusion part) Signed-off-by: kminoda * feat(rosbag_to_non_annotated_t4): support lidar drop Signed-off-by: kminoda * feat: update test Signed-off-by: kminoda * feat: apply pre-commit Signed-off-by: kminoda * feat: add test Signed-off-by: kminoda * fix: fix comment Signed-off-by: kminoda * fix: reorganize arguments for latency handling Signed-off-by: kminoda * test: add test Signed-off-by: kminoda * fix: fix for executable script Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: add document for get_lidar_camera_synced_frame_info Signed-off-by: kminoda * feat: update args to lidar_to_camera_latency_sec Signed-off-by: kminoda * feat: add lidar_latency Signed-off-by: kminoda * feat: add lidar-camera latency margin Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: add comment Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: parametrize self._lidar_points_ratio_threshold Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: remove image drop error and fix docs Signed-off-by: kminoda * test: remove unnecessary test Signed-off-by: kminoda * fix pre-commit Signed-off-by: kminoda * remove magic number Signed-off-by: kminoda * feat: extend range Signed-off-by: kminoda * fix: remove params.timestamp_diff Signed-off-by: kminoda * fix: change default param of ratio_points Signed-off-by: kminoda * revert: revert unnecessary change Signed-off-by: kminoda * feat: change logic so that the experiment in the description is valid Signed-off-by: kminoda * feat: remove accept_frame_drop from _convert_image() Signed-off-by: kminoda * fix: remove unnecessary comment Signed-off-by: kminoda * feat: improve rosbag2_utils.pointcloud_msg_to_numpy Signed-off-by: kminoda * feat: use lower case for timestamp_diff Signed-off-by: kminoda * feat: replace matching algorithm with prior knowledge about camera latency Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert: revert unnecessary added file Signed-off-by: kminoda * fix: fix default vale Signed-off-by: kminoda * fix: fix test config Signed-off-by: kminoda * fix: update config Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update config Signed-off-by: kminoda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * modify a config for synthetic dataset Signed-off-by: Shunsuke Miura --------- Signed-off-by: kminoda Signed-off-by: Shunsuke Miura Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Shunsuke Miura --- ...rt_rosbag2_to_non_annotated_t4_sample.yaml | 7 +- ...osbag2_to_non_annotated_t4_tlr_sample.yaml | 3 +- ...to_non_annotated_t4_with_radar_sample.yaml | 7 +- .../convert_synthetic_data_camera.yaml | 4 +- .../rosbag2/converter_params.py | 14 ++- .../rosbag2_to_non_annotated_t4_converter.py | 97 ++++++++++------ perception_dataset/utils/misc.py | 93 ++++++++------- perception_dataset/utils/rosbag2.py | 11 +- .../convert_rosbag2_to_non_annotated_t4.yaml | 7 +- ..._rosbag2_to_non_annotated_t4_tlr_test.yaml | 3 +- tests/utils/test_misc.py | 107 +++++++++++++++--- 11 files changed, 251 insertions(+), 102 deletions(-) diff --git a/config/convert_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml index e17eaa77..4b09f55a 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_sample.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml @@ -12,17 +12,22 @@ conversion: lidar_sensor: topic: /sensing/lidar/concatenated/pointcloud channel: LIDAR_CONCAT - camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger camera_sensors: # Keep the same order as each camera exposure timing - topic: /sensing/camera/camera3/image_rect_color/compressed channel: CAM_BACK_LEFT + delay_msec: 48.33 - topic: /sensing/camera/camera2/image_rect_color/compressed channel: CAM_FRONT_LEFT + delay_msec: 65.0 - topic: /sensing/camera/camera0/image_rect_color/compressed channel: CAM_FRONT + delay_msec: 81.67 - topic: /sensing/camera/camera4/image_rect_color/compressed channel: CAM_FRONT_RIGHT + delay_msec: 98.33 - topic: /sensing/camera/camera5/image_rect_color/compressed channel: CAM_BACK_RIGHT + delay_msec: 115.0 - topic: /sensing/camera/camera1/image_rect_color/compressed channel: CAM_BACK + delay_msec: 131.67 diff --git a/config/convert_rosbag2_to_non_annotated_t4_tlr_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_tlr_sample.yaml index 44d3f189..1651e1fa 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_tlr_sample.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_tlr_sample.yaml @@ -11,11 +11,12 @@ conversion: lidar_sensor: topic: "" channel: "" - camera_latency_sec: 0.1 # camera latency in seconds between the header.stamp and shutter trigger camera_sensors: - topic: /sensing/camera/camera6/image_raw/compressed channel: CAM_TRAFFIC_LIGHT_FAR + delay_msec: 0.1 - topic: /sensing/camera/camera7/image_raw/compressed channel: CAM_TRAFFIC_LIGHT_NEAR + delay_msec: 0.1 generate_frame_every: 1 generate_frame_every_meter: 50 diff --git a/config/convert_rosbag2_to_non_annotated_t4_with_radar_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_with_radar_sample.yaml index 7197b140..468059c4 100644 --- a/config/convert_rosbag2_to_non_annotated_t4_with_radar_sample.yaml +++ b/config/convert_rosbag2_to_non_annotated_t4_with_radar_sample.yaml @@ -24,17 +24,22 @@ conversion: channel: RADAR_BACK_LEFT - topic: /sensing/radar/rear_right/objects_raw channel: RADAR_BACK_RIGHT - camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger camera_sensors: # Keep the same order as each camera exposure timing - topic: /sensing/camera/camera3/image_rect_color/compressed channel: CAM_BACK_LEFT + delay_msec: 48.33 - topic: /sensing/camera/camera2/image_rect_color/compressed channel: CAM_FRONT_LEFT + delay_msec: 65.0 - topic: /sensing/camera/camera0/image_rect_color/compressed channel: CAM_FRONT + delay_msec: 81.67 - topic: /sensing/camera/camera4/image_rect_color/compressed channel: CAM_FRONT_RIGHT + delay_msec: 98.33 - topic: /sensing/camera/camera5/image_rect_color/compressed channel: CAM_BACK_RIGHT + delay_msec: 115.0 - topic: /sensing/camera/camera1/image_rect_color/compressed channel: CAM_BACK + delay_msec: 131.67 diff --git a/config/rosbag2_to_t4/convert_synthetic_data_camera.yaml b/config/rosbag2_to_t4/convert_synthetic_data_camera.yaml index 665c9a31..804f2870 100644 --- a/config/rosbag2_to_t4/convert_synthetic_data_camera.yaml +++ b/config/rosbag2_to_t4/convert_synthetic_data_camera.yaml @@ -8,8 +8,8 @@ conversion: workers_number: 1 skip_timestamp: 2.0 num_load_frames: 50 + accept_frame_drop: false # If true, the conversion will continue even if the LiDAR frame is dropped. crop_frames_unit: 1 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames. - camera_latency_sec: 0.00 object_topic_name: /ground_truth/objects object_msg_type: TrackedObjects world_frame_id: world @@ -19,5 +19,7 @@ conversion: camera_sensors: #Keep the same order as each camera exposure timing - topic: /sensing/camera/camera4/image_rect_color channel: CAM_FRONT + delay_msec: 0.0 - topic: /sensing/camera/camera5/image_rect_color channel: CAM_BACK + delay_msec: 0.0 diff --git a/perception_dataset/rosbag2/converter_params.py b/perception_dataset/rosbag2/converter_params.py index 86a23e56..ea008f08 100644 --- a/perception_dataset/rosbag2/converter_params.py +++ b/perception_dataset/rosbag2/converter_params.py @@ -50,12 +50,20 @@ class Rosbag2ConverterParams(BaseModel): skip_timestamp: float # not read for the second after the first topic start_timestamp_sec: float = 0.0 # conversion start timestamp in sec crop_frames_unit: int = 1 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames. - camera_latency_sec: float = ( - 0.0 # camera latency in seconds between the header.stamp and shutter trigger + + # Maximum camera jitter in seconds. This value MUST be set large enough since the camera jitter smaller than this value is not considererd. + # Also, do not set this value larger than system_scan_period_sec. + max_camera_jitter_sec: float = 0.03 + + lidar_latency_sec: float = ( + 0.03 # lidar latency in seconds between the header.stamp and shutter trigger ) - timestamp_diff: float = 0.15 + system_scan_period_sec: float = 0.1 # system scan period in seconds topic_list: list = [] # topic list for input_bag mandatory_topic_list: list = [] # mandatory topic list for input_bag + + lidar_points_ratio_threshold: float = 0.2 # ratio of lidar points to be used proportion to the maximum number of lidar points in a frame + # in synthetic data (from AWSIM) it may be the case that there is no ego transform available at the beginning of rosbag ignore_no_ego_transform_at_rosbag_beginning: bool = False generate_frame_every: int = 1 # pick frames out of every this number. diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py index 864a8e54..f7c2e3cf 100644 --- a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -103,7 +103,10 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: self._num_load_frames: int = params.num_load_frames self._crop_frames_unit: int = params.crop_frames_unit self._without_compress: bool = params.without_compress - self._camera_latency: float = params.camera_latency_sec + self._system_scan_period_sec: float = params.system_scan_period_sec + self._max_camera_jitter_sec: float = params.max_camera_jitter_sec + self._lidar_latency: float = params.lidar_latency_sec + self._lidar_points_ratio_threshold: float = params.lidar_points_ratio_threshold self._start_timestamp: float = params.start_timestamp_sec self._end_timestamp: float = 0 self._data_type: DataType = params.data_type @@ -121,8 +124,8 @@ def __init__(self, params: Rosbag2ConverterParams) -> None: self._calibrated_sensor_target_frame: str = "base_link" # Note: To determine if there is any message dropout, including a delay tolerance of 10Hz. - - self._TIMESTAMP_DIFF = params.timestamp_diff + # Note: The delay tolerance is set to 1.5 times the system scan period. + self._timestamp_diff = params.system_scan_period_sec * 1.5 self._lidar_sensor: Dict[str, str] = params.lidar_sensor self._radar_sensors: List[Dict[str, str]] = params.radar_sensors @@ -346,9 +349,8 @@ def _convert_sensor_data( # temporaly use start_timestamp instead of recorded timestamp for non synced data camera_start_timestamp = start_timestamp else: - camera_start_timestamp = ( - misc_utils.nusc_timestamp_to_unix_timestamp(first_sample_data_record.timestamp) - + self._camera_latency + camera_start_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( + first_sample_data_record.timestamp ) for camera_sensor in self._camera_sensors: @@ -357,6 +359,7 @@ def _convert_sensor_data( start_timestamp=camera_start_timestamp, sensor_channel=camera_sensor["channel"], topic=camera_sensor["topic"], + delay_msec=float(camera_sensor["delay_msec"]), scene_token=scene_token, ) @@ -405,6 +408,17 @@ def _convert_pointcloud( calibrated_sensor_token = self._generate_calibrated_sensor( sensor_channel, start_time_in_time, topic ) + + # Calculate the maximum number of points + max_num_points = 0 + for pointcloud_msg in self._bag_reader.read_messages( + topics=[topic], + start_time=start_time_in_time, + ): + points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg) + max_num_points = max(max_num_points, len(points_arr)) + + # Main iteration for pointcloud_msg in self._bag_reader.read_messages( topics=[topic], start_time=start_time_in_time, @@ -434,12 +448,27 @@ def _convert_pointcloud( ) # Note: LiDAR Message drops are not accepted unless accept_frame_drop is True. if not self._accept_frame_drop and ( - time_diff > self._TIMESTAMP_DIFF or unix_timestamp < prev_frame_unix_timestamp + time_diff > self._timestamp_diff or unix_timestamp < prev_frame_unix_timestamp ): raise ValueError( f"PointCloud message is dropped [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}" ) + points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg) + if len(points_arr) < max_num_points * self._lidar_points_ratio_threshold: + if not self._accept_frame_drop: + raise ValueError( + f"PointCloud message is relatively lower than the maximum size, which is not acceptable. " + f"If you would like to accept, please change accept_frame_drop parameter. " + f"frame_index: {frame_index}, stamp: {unix_timestamp}, # points: {len(points_arr)}" + ) + else: + warnings.warn( + f"PointCloud message is relatively lower than the maximum size. " + f"May be encountering a LiDAR message drop. Skip frame_index: {frame_index}, stamp: {unix_timestamp}, # points: {len(points_arr)}" + ) + continue + nusc_timestamp = rosbag2_utils.stamp_to_nusc_timestamp(pointcloud_msg.header.stamp) sample_token = self._sample_table.insert_into_table( timestamp=nusc_timestamp, scene_token=scene_token @@ -461,10 +490,8 @@ def _convert_pointcloud( ) # TODO(yukke42): Save data in the PCD file format, which allows flexible field configuration. - if isinstance(pointcloud_msg, PointCloud2): - points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg) - else: - points_arr = np.zeros((0, 5), dtype=np.float32) + points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg) + if len(points_arr) == 0: warnings.warn( f"PointCloud message is empty [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}" ) @@ -519,7 +546,7 @@ def _convert_radar_tracks( ) if ( unix_timestamp - prev_frame_unix_timestamp - ) > self._TIMESTAMP_DIFF or unix_timestamp < prev_frame_unix_timestamp: + ) > self._timestamp_diff or unix_timestamp < prev_frame_unix_timestamp: raise ValueError( f"{topic} message is dropped [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}" ) @@ -561,6 +588,7 @@ def _convert_image( start_timestamp: float, sensor_channel: str, topic: str, + delay_msec: float, scene_token: str, ): """convert image topic to raw image data""" @@ -584,14 +612,16 @@ def _convert_image( misc_utils.nusc_timestamp_to_unix_timestamp(sample_record.timestamp) for sample_record in sample_records ] + lidar_to_camera_latency_sec = self._lidar_latency + 1e-3 * delay_msec + synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( - image_timestamp_list, - lidar_timestamp_list, - self._camera_latency, - self._accept_frame_drop, - self._TIMESTAMP_DIFF, - self._num_load_frames, - self._msg_display_interval, + image_timestamp_list=image_timestamp_list, + lidar_timestamp_list=lidar_timestamp_list, + lidar_to_camera_latency_sec=lidar_to_camera_latency_sec, + system_scan_period_sec=self._system_scan_period_sec, + max_camera_jitter_sec=self._max_camera_jitter_sec, + num_load_frames=self._num_load_frames, + msg_display_interval=self._msg_display_interval, ) # Get image shape @@ -606,34 +636,35 @@ def _convert_image( ) for image_index, lidar_frame_index, dummy_image_timestamp in synced_frame_info_list: lidar_sample_token: str = sample_records[lidar_frame_index].token - if dummy_image_timestamp is None: - image_msg = None - while image_index_counter < image_index: - image_msg = next(image_generator) - image_index_counter += 1 - + if image_index is None: # Image dropped sample_data_token = self._generate_image_data( - rosbag2_utils.compressed_msg_to_numpy(image_msg), - rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp), + np.zeros(shape=image_shape, dtype=np.uint8), # dummy image + dummy_image_timestamp, lidar_sample_token, calibrated_sensor_token, sensor_channel, lidar_frame_index, + output_blank_image=True, + is_key_frame=False, ) sample_data_token_list.append(sample_data_token) - else: + elif lidar_frame_index is None: # LiDAR dropped + warnings.warn(f"LiDAR message dropped at image_index: {image_index}") + else: # Both messages available + image_msg = None + while image_index_counter < image_index: + image_msg = next(image_generator) + image_index_counter += 1 + sample_data_token = self._generate_image_data( - np.zeros(shape=image_shape, dtype=np.uint8), # dummy image - dummy_image_timestamp, + rosbag2_utils.compressed_msg_to_numpy(image_msg), + rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp), lidar_sample_token, calibrated_sensor_token, sensor_channel, lidar_frame_index, - output_blank_image=True, - is_key_frame=False, ) sample_data_token_list.append(sample_data_token) - else: # camera only mode def get_move_distance(trans1: Dict[str, float], trans2: Dict[str, float]) -> float: diff --git a/perception_dataset/utils/misc.py b/perception_dataset/utils/misc.py index b423b8f6..029b9459 100644 --- a/perception_dataset/utils/misc.py +++ b/perception_dataset/utils/misc.py @@ -24,54 +24,59 @@ def get_sample_data_filename(sensor_channel: str, frame_index: int, fileformat: def get_lidar_camera_synced_frame_info( image_timestamp_list: List[float], lidar_timestamp_list: List[float], - camera_latency_sec: float = 0.0, - accept_frame_drop: bool = False, - timestamp_diff: float = 0.15, + lidar_to_camera_latency_sec: float = 0.0, + system_scan_period_sec: float = 0.1, + max_camera_jitter_sec: float = 0.03, num_load_frames: int = 0, msg_display_interval: int = 100, ): - synced_frame_info_list: List[ - int, int, float - ] = [] # [image_index, lidar_frame_index, dummy_timestamp (None if not dummy)] - lidar_frame_index: int = 0 - prev_frame_unix_timestamp = lidar_timestamp_list[0] + camera_latency_sec - - for image_index, image_unix_timestamp in enumerate(image_timestamp_list): - if lidar_frame_index >= num_load_frames: + """ + Get synced frame info list for lidar and camera. + LiDAR scan with t_lidar and image with t_image are synced if + t_image - t_lidar >= lidar_to_camera_latency_sec - (system_scan_period_sec - max_camera_jitter_sec) + and + t_image - t_lidar <= lidar_to_camera_latency_sec + (system_scan_period_sec - max_camera_jitter_sec) + + Args: + image_timestamp_list: image timestamp list + lidar_timestamp_list: lidar timestamp list + lidar_to_camera_latency_sec: camera latency in seconds between the header.stamp and shutter trigger + system_scan_period_sec: system scan period in seconds + num_load_frames: the number of frames to be loaded. if the value isn't positive, read all messages. + msg_display_interval: display interval for messages + Return: + synced_frame_info_list: synced frame info list + [[image_index, lidar_frame_index, dummy_image_timestamp (None if image is not dropped)]] + """ + synced_frame_info_list: List[int, int, float] = [] + + # calculate nominal delay between lidar and camera + + current_image_index: int = 0 + for lidar_index, lidar_timestamp in enumerate(lidar_timestamp_list): + if lidar_index >= num_load_frames: break - - # Get LiDAR data - lidar_unix_timestamp = lidar_timestamp_list[lidar_frame_index] - - # Address image drop - while (image_unix_timestamp - prev_frame_unix_timestamp) > timestamp_diff: - # Add dummy image timestamp in synced_frame_info_list - dummy_image_timestamp = image_unix_timestamp - while (dummy_image_timestamp - prev_frame_unix_timestamp) > timestamp_diff: - dummy_image_timestamp -= 0.1 - synced_frame_info_list.append([None, lidar_frame_index, dummy_image_timestamp]) - - # Increment LiDAR information - lidar_frame_index += 1 - prev_frame_unix_timestamp = dummy_image_timestamp - if lidar_frame_index >= num_load_frames: - return synced_frame_info_list - - lidar_unix_timestamp = lidar_timestamp_list[lidar_frame_index] - - time_diff_from_lidar = image_unix_timestamp - lidar_unix_timestamp - if not accept_frame_drop and time_diff_from_lidar > (camera_latency_sec + timestamp_diff): - raise ValueError( - f"Topic message may be dropped at [{lidar_frame_index}]: lidar_timestamp={lidar_unix_timestamp} image_timestamp={image_unix_timestamp}" - ) - - if lidar_frame_index % msg_display_interval == 0: + image_timestamp = image_timestamp_list[current_image_index] + + while image_timestamp - lidar_timestamp < lidar_to_camera_latency_sec - ( + system_scan_period_sec - max_camera_jitter_sec + ): + current_image_index += 1 + image_timestamp = image_timestamp_list[current_image_index] + + if image_timestamp - lidar_timestamp > lidar_to_camera_latency_sec + ( + system_scan_period_sec - max_camera_jitter_sec + ): + # Image is dropped + dummy_timestamp = image_timestamp - system_scan_period_sec + synced_frame_info_list.append([None, lidar_index, dummy_timestamp]) + continue + + synced_frame_info_list.append([current_image_index, lidar_index, None]) + current_image_index += 1 + + if lidar_index % msg_display_interval == 0: print( - f"frame{lidar_frame_index}, stamp = {image_unix_timestamp}, diff cam - lidar = {time_diff_from_lidar:0.3f} sec" + f"frame{lidar_index}, stamp = {image_timestamp}, diff cam - lidar = {image_timestamp - lidar_timestamp:0.3f} sec" ) - - synced_frame_info_list.append([image_index, lidar_frame_index, None]) - lidar_frame_index += 1 - prev_frame_unix_timestamp = image_unix_timestamp - return synced_frame_info_list diff --git a/perception_dataset/utils/rosbag2.py b/perception_dataset/utils/rosbag2.py index 0fc6bbda..b3709826 100644 --- a/perception_dataset/utils/rosbag2.py +++ b/perception_dataset/utils/rosbag2.py @@ -106,13 +106,18 @@ def pointcloud_msg_to_numpy( pointcloud_msg: PointCloud2, ) -> NDArray: """numpy ver. of https://github.com/ros2/common_interfaces/blob/master/sensor_msgs_py/sensor_msgs_py/point_cloud2.py#L119""" + NUM_DIMENSIONS = 5 + + if not isinstance(pointcloud_msg, PointCloud2): + return np.zeros((0, NUM_DIMENSIONS), dtype=np.float32) + points_arr = np.array( [tuple(p) for p in sensor_msgs_py.point_cloud2.read_points(pointcloud_msg)], dtype=np.float32, ) - if len(points_arr[0]) > 5: - points_arr = np.delete(points_arr, np.s_[5:], axis=1) - while len(points_arr[0]) < 5: + if len(points_arr[0]) > NUM_DIMENSIONS: + points_arr = np.delete(points_arr, np.s_[NUM_DIMENSIONS:], axis=1) + while len(points_arr[0]) < NUM_DIMENSIONS: points_arr = np.insert(points_arr, len(points_arr[0]), -1, axis=1) return points_arr diff --git a/tests/config/convert_rosbag2_to_non_annotated_t4.yaml b/tests/config/convert_rosbag2_to_non_annotated_t4.yaml index d57b0825..766ce31f 100644 --- a/tests/config/convert_rosbag2_to_non_annotated_t4.yaml +++ b/tests/config/convert_rosbag2_to_non_annotated_t4.yaml @@ -11,17 +11,22 @@ conversion: lidar_sensor: topic: /sensing/lidar/concatenated/pointcloud channel: LIDAR_CONCAT - camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger camera_sensors: # Keep the same order as each camera exposure timing - topic: /sensing/camera/camera3/image_rect_color/compressed channel: CAM_BACK_LEFT + delay_msec: 48.33 - topic: /sensing/camera/camera2/image_rect_color/compressed channel: CAM_FRONT_LEFT + delay_msec: 65.0 - topic: /sensing/camera/camera0/image_rect_color/compressed channel: CAM_FRONT + delay_msec: 81.67 - topic: /sensing/camera/camera4/image_rect_color/compressed channel: CAM_FRONT_RIGHT + delay_msec: 98.33 - topic: /sensing/camera/camera5/image_rect_color/compressed channel: CAM_BACK_RIGHT + delay_msec: 115.0 - topic: /sensing/camera/camera1/image_rect_color/compressed channel: CAM_BACK + delay_msec: 131.67 diff --git a/tests/config/convert_rosbag2_to_non_annotated_t4_tlr_test.yaml b/tests/config/convert_rosbag2_to_non_annotated_t4_tlr_test.yaml index 27bab46c..3c57324b 100644 --- a/tests/config/convert_rosbag2_to_non_annotated_t4_tlr_test.yaml +++ b/tests/config/convert_rosbag2_to_non_annotated_t4_tlr_test.yaml @@ -11,11 +11,12 @@ conversion: lidar_sensor: topic: "" channel: "" - camera_latency_sec: 0.1 # camera latency in seconds between the header.stamp and shutter trigger camera_sensors: - topic: /sensing/camera/camera0/image_rect_color/compressed channel: CAM_TRAFFIC_LIGHT_NEAR + delay_msec: 0.1 - topic: /sensing/camera/camera6/image_raw/compressed channel: CAM_TRAFFIC_LIGHT_FAR + delay_msec: 0.1 generate_frame_every: 1 generate_frame_every_meter: 5 diff --git a/tests/utils/test_misc.py b/tests/utils/test_misc.py index 61a5da0a..c378a7a3 100644 --- a/tests/utils/test_misc.py +++ b/tests/utils/test_misc.py @@ -30,56 +30,137 @@ def assert_synced_frame_info_list(expected, synced_frame_info_list): def test_get_lidar_camera_synced_frame_info_1(): - image_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] + # Basic test case + image_timestamp_list = [0.07, 0.17, 0.27, 0.37, 0.47] lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] expected = [[0, 0, None], [1, 1, None], [2, 2, None], [3, 3, None], [4, 4, None]] synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( image_timestamp_list=image_timestamp_list, lidar_timestamp_list=lidar_timestamp_list, - accept_frame_drop=True, - num_load_frames=5, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), ) assert_synced_frame_info_list(expected, synced_frame_info_list) def test_get_lidar_camera_synced_frame_info_2(): - image_timestamp_list = [0.0, 0.2, 0.4] + # Test case with image drops + image_timestamp_list = [0.07, 0.27, 0.47] lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] - expected = [[0, 0, None], [None, 1, 0.1], [1, 2, None], [None, 3, 0.3], [2, 4, None]] + expected = [[0, 0, None], [None, 1, 0.17], [1, 2, None], [None, 3, 0.37], [2, 4, None]] synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( image_timestamp_list=image_timestamp_list, lidar_timestamp_list=lidar_timestamp_list, - accept_frame_drop=True, - num_load_frames=5, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), ) assert_synced_frame_info_list(expected, synced_frame_info_list) def test_get_lidar_camera_synced_frame_info_3(): - image_timestamp_list = [0.07, 0.17, 0.27, 0.37, 0.47] + # Test case with different latency + image_timestamp_list = [0.11, 0.21, 0.31, 0.41, 0.51] lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] expected = [[0, 0, None], [1, 1, None], [2, 2, None], [3, 3, None], [4, 4, None]] synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( image_timestamp_list=image_timestamp_list, lidar_timestamp_list=lidar_timestamp_list, - accept_frame_drop=True, - num_load_frames=5, + lidar_to_camera_latency_sec=0.11, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), ) assert_synced_frame_info_list(expected, synced_frame_info_list) def test_get_lidar_camera_synced_frame_info_4(): - image_timestamp_list = [0.12, 0.22, 0.32, 0.42, 0.52] + # Test case with different latency + image_timestamp_list = [0.14, 0.24, 0.34, 0.44, 0.54] + lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] + expected = [[0, 0, None], [1, 1, None], [2, 2, None], [3, 3, None], [4, 4, None]] + + synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( + image_timestamp_list=image_timestamp_list, + lidar_timestamp_list=lidar_timestamp_list, + lidar_to_camera_latency_sec=0.14, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), + ) + assert_synced_frame_info_list(expected, synced_frame_info_list) + + +def test_get_lidar_camera_synced_frame_info_5(): + # Test case with LiDAR drop + image_timestamp_list = [0.07, 0.17, 0.27, 0.37, 0.47] + lidar_timestamp_list = [0.0, 0.2, 0.3, 0.4] + expected = [[0, 0, None], [2, 1, None], [3, 2, None], [4, 3, None]] + + synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( + image_timestamp_list=image_timestamp_list, + lidar_timestamp_list=lidar_timestamp_list, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), + ) + assert_synced_frame_info_list(expected, synced_frame_info_list) + + +def test_get_lidar_camera_synced_frame_info_6(): + # Test case with successive Image drop + image_timestamp_list = [0.07, 0.37, 0.47] + lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] + expected = [[0, 0, None], [None, 1, 0.17], [None, 2, 0.27], [1, 3, None], [2, 4, None]] + + synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( + image_timestamp_list=image_timestamp_list, + lidar_timestamp_list=lidar_timestamp_list, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), + ) + assert_synced_frame_info_list(expected, synced_frame_info_list) + + +def test_get_lidar_camera_synced_frame_info_7(): + # Test case with camera jitter + image_timestamp_list = [0.07, 0.17, 0.289, 0.37, 0.47] lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] expected = [[0, 0, None], [1, 1, None], [2, 2, None], [3, 3, None], [4, 4, None]] synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( image_timestamp_list=image_timestamp_list, lidar_timestamp_list=lidar_timestamp_list, - accept_frame_drop=True, - num_load_frames=5, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=len(lidar_timestamp_list), + ) + assert_synced_frame_info_list(expected, synced_frame_info_list) + + +def test_get_lidar_camera_synced_frame_info_num_load_frames(): + # Test case with num_load_frames + image_timestamp_list = [0.07, 0.17, 0.27, 0.37, 0.47] + lidar_timestamp_list = [0.0, 0.1, 0.2, 0.3, 0.4] + expected = [[0, 0, None], [1, 1, None], [2, 2, None]] + num_load_frames = 3 + + synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info( + image_timestamp_list=image_timestamp_list, + lidar_timestamp_list=lidar_timestamp_list, + lidar_to_camera_latency_sec=0.07, + system_scan_period_sec=0.1, + max_camera_jitter_sec=0.03, + num_load_frames=num_load_frames, ) assert_synced_frame_info_list(expected, synced_frame_info_list) From 2b0db0beeadfc0f1a102d5e70a70d080da4fcf36 Mon Sep 17 00:00:00 2001 From: Shunsuke Miura <37187849+miursh@users.noreply.github.com> Date: Sat, 23 Dec 2023 17:24:09 +0900 Subject: [PATCH 2/2] increment version (#68) Signed-off-by: Shunsuke Miura --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6e558eb3..feb77006 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "perception-dataset" -version = "1.0.4" +version = "1.0.5" description = "TIER IV Perception dataset has modules to convert dataset from rosbag to t4_dataset" authors = ["Yusuke Muramatsu ", "Shunsuke Miura "]