Skip to content

Commit

Permalink
Merge branch 'main' into feat/annotated_to_deepen/add_default_attribute
Browse files Browse the repository at this point in the history
  • Loading branch information
miursh authored Dec 27, 2023
2 parents 1482737 + 2b0db0b commit 8a66687
Show file tree
Hide file tree
Showing 12 changed files with 252 additions and 103 deletions.
7 changes: 6 additions & 1 deletion config/convert_rosbag2_to_non_annotated_t4_sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,22 @@ conversion:
lidar_sensor:
topic: /sensing/lidar/concatenated/pointcloud
channel: LIDAR_CONCAT
camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger
camera_sensors: # Keep the same order as each camera exposure timing
- topic: /sensing/camera/camera3/image_rect_color/compressed
channel: CAM_BACK_LEFT
delay_msec: 48.33
- topic: /sensing/camera/camera2/image_rect_color/compressed
channel: CAM_FRONT_LEFT
delay_msec: 65.0
- topic: /sensing/camera/camera0/image_rect_color/compressed
channel: CAM_FRONT
delay_msec: 81.67
- topic: /sensing/camera/camera4/image_rect_color/compressed
channel: CAM_FRONT_RIGHT
delay_msec: 98.33
- topic: /sensing/camera/camera5/image_rect_color/compressed
channel: CAM_BACK_RIGHT
delay_msec: 115.0
- topic: /sensing/camera/camera1/image_rect_color/compressed
channel: CAM_BACK
delay_msec: 131.67
3 changes: 2 additions & 1 deletion config/convert_rosbag2_to_non_annotated_t4_tlr_sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@ conversion:
lidar_sensor:
topic: ""
channel: ""
camera_latency_sec: 0.1 # camera latency in seconds between the header.stamp and shutter trigger
camera_sensors:
- topic: /sensing/camera/camera6/image_raw/compressed
channel: CAM_TRAFFIC_LIGHT_FAR
delay_msec: 0.1
- topic: /sensing/camera/camera7/image_raw/compressed
channel: CAM_TRAFFIC_LIGHT_NEAR
delay_msec: 0.1
generate_frame_every: 1
generate_frame_every_meter: 50
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,22 @@ conversion:
channel: RADAR_BACK_LEFT
- topic: /sensing/radar/rear_right/objects_raw
channel: RADAR_BACK_RIGHT
camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger
camera_sensors: # Keep the same order as each camera exposure timing
- topic: /sensing/camera/camera3/image_rect_color/compressed
channel: CAM_BACK_LEFT
delay_msec: 48.33
- topic: /sensing/camera/camera2/image_rect_color/compressed
channel: CAM_FRONT_LEFT
delay_msec: 65.0
- topic: /sensing/camera/camera0/image_rect_color/compressed
channel: CAM_FRONT
delay_msec: 81.67
- topic: /sensing/camera/camera4/image_rect_color/compressed
channel: CAM_FRONT_RIGHT
delay_msec: 98.33
- topic: /sensing/camera/camera5/image_rect_color/compressed
channel: CAM_BACK_RIGHT
delay_msec: 115.0
- topic: /sensing/camera/camera1/image_rect_color/compressed
channel: CAM_BACK
delay_msec: 131.67
4 changes: 3 additions & 1 deletion config/rosbag2_to_t4/convert_synthetic_data_camera.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ conversion:
workers_number: 1
skip_timestamp: 2.0
num_load_frames: 50
accept_frame_drop: false # If true, the conversion will continue even if the LiDAR frame is dropped.
crop_frames_unit: 1 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames.
camera_latency_sec: 0.00
object_topic_name: /ground_truth/objects
object_msg_type: TrackedObjects
world_frame_id: world
Expand All @@ -19,5 +19,7 @@ conversion:
camera_sensors: #Keep the same order as each camera exposure timing
- topic: /sensing/camera/camera4/image_rect_color
channel: CAM_FRONT
delay_msec: 0.0
- topic: /sensing/camera/camera5/image_rect_color
channel: CAM_BACK
delay_msec: 0.0
14 changes: 11 additions & 3 deletions perception_dataset/rosbag2/converter_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,20 @@ class Rosbag2ConverterParams(BaseModel):
skip_timestamp: float # not read for the second after the first topic
start_timestamp_sec: float = 0.0 # conversion start timestamp in sec
crop_frames_unit: int = 1 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames.
camera_latency_sec: float = (
0.0 # camera latency in seconds between the header.stamp and shutter trigger

# Maximum camera jitter in seconds. This value MUST be set large enough since the camera jitter smaller than this value is not considererd.
# Also, do not set this value larger than system_scan_period_sec.
max_camera_jitter_sec: float = 0.03

lidar_latency_sec: float = (
0.03 # lidar latency in seconds between the header.stamp and shutter trigger
)
timestamp_diff: float = 0.15
system_scan_period_sec: float = 0.1 # system scan period in seconds
topic_list: list = [] # topic list for input_bag
mandatory_topic_list: list = [] # mandatory topic list for input_bag

lidar_points_ratio_threshold: float = 0.2 # ratio of lidar points to be used proportion to the maximum number of lidar points in a frame

# in synthetic data (from AWSIM) it may be the case that there is no ego transform available at the beginning of rosbag
ignore_no_ego_transform_at_rosbag_beginning: bool = False
generate_frame_every: int = 1 # pick frames out of every this number.
Expand Down
97 changes: 64 additions & 33 deletions perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,10 @@ def __init__(self, params: Rosbag2ConverterParams) -> None:
self._num_load_frames: int = params.num_load_frames
self._crop_frames_unit: int = params.crop_frames_unit
self._without_compress: bool = params.without_compress
self._camera_latency: float = params.camera_latency_sec
self._system_scan_period_sec: float = params.system_scan_period_sec
self._max_camera_jitter_sec: float = params.max_camera_jitter_sec
self._lidar_latency: float = params.lidar_latency_sec
self._lidar_points_ratio_threshold: float = params.lidar_points_ratio_threshold
self._start_timestamp: float = params.start_timestamp_sec
self._end_timestamp: float = 0
self._data_type: DataType = params.data_type
Expand All @@ -121,8 +124,8 @@ def __init__(self, params: Rosbag2ConverterParams) -> None:
self._calibrated_sensor_target_frame: str = "base_link"

# Note: To determine if there is any message dropout, including a delay tolerance of 10Hz.

self._TIMESTAMP_DIFF = params.timestamp_diff
# Note: The delay tolerance is set to 1.5 times the system scan period.
self._timestamp_diff = params.system_scan_period_sec * 1.5

self._lidar_sensor: Dict[str, str] = params.lidar_sensor
self._radar_sensors: List[Dict[str, str]] = params.radar_sensors
Expand Down Expand Up @@ -346,9 +349,8 @@ def _convert_sensor_data(
# temporaly use start_timestamp instead of recorded timestamp for non synced data
camera_start_timestamp = start_timestamp
else:
camera_start_timestamp = (
misc_utils.nusc_timestamp_to_unix_timestamp(first_sample_data_record.timestamp)
+ self._camera_latency
camera_start_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp(
first_sample_data_record.timestamp
)

for camera_sensor in self._camera_sensors:
Expand All @@ -357,6 +359,7 @@ def _convert_sensor_data(
start_timestamp=camera_start_timestamp,
sensor_channel=camera_sensor["channel"],
topic=camera_sensor["topic"],
delay_msec=float(camera_sensor["delay_msec"]),
scene_token=scene_token,
)

Expand Down Expand Up @@ -405,6 +408,17 @@ def _convert_pointcloud(
calibrated_sensor_token = self._generate_calibrated_sensor(
sensor_channel, start_time_in_time, topic
)

# Calculate the maximum number of points
max_num_points = 0
for pointcloud_msg in self._bag_reader.read_messages(
topics=[topic],
start_time=start_time_in_time,
):
points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg)
max_num_points = max(max_num_points, len(points_arr))

# Main iteration
for pointcloud_msg in self._bag_reader.read_messages(
topics=[topic],
start_time=start_time_in_time,
Expand Down Expand Up @@ -434,12 +448,27 @@ def _convert_pointcloud(
)
# Note: LiDAR Message drops are not accepted unless accept_frame_drop is True.
if not self._accept_frame_drop and (
time_diff > self._TIMESTAMP_DIFF or unix_timestamp < prev_frame_unix_timestamp
time_diff > self._timestamp_diff or unix_timestamp < prev_frame_unix_timestamp
):
raise ValueError(
f"PointCloud message is dropped [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}"
)

points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg)
if len(points_arr) < max_num_points * self._lidar_points_ratio_threshold:
if not self._accept_frame_drop:
raise ValueError(
f"PointCloud message is relatively lower than the maximum size, which is not acceptable. "
f"If you would like to accept, please change accept_frame_drop parameter. "
f"frame_index: {frame_index}, stamp: {unix_timestamp}, # points: {len(points_arr)}"
)
else:
warnings.warn(
f"PointCloud message is relatively lower than the maximum size. "
f"May be encountering a LiDAR message drop. Skip frame_index: {frame_index}, stamp: {unix_timestamp}, # points: {len(points_arr)}"
)
continue

nusc_timestamp = rosbag2_utils.stamp_to_nusc_timestamp(pointcloud_msg.header.stamp)
sample_token = self._sample_table.insert_into_table(
timestamp=nusc_timestamp, scene_token=scene_token
Expand All @@ -461,10 +490,8 @@ def _convert_pointcloud(
)

# TODO(yukke42): Save data in the PCD file format, which allows flexible field configuration.
if isinstance(pointcloud_msg, PointCloud2):
points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg)
else:
points_arr = np.zeros((0, 5), dtype=np.float32)
points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg)
if len(points_arr) == 0:
warnings.warn(
f"PointCloud message is empty [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}"
)
Expand Down Expand Up @@ -519,7 +546,7 @@ def _convert_radar_tracks(
)
if (
unix_timestamp - prev_frame_unix_timestamp
) > self._TIMESTAMP_DIFF or unix_timestamp < prev_frame_unix_timestamp:
) > self._timestamp_diff or unix_timestamp < prev_frame_unix_timestamp:
raise ValueError(
f"{topic} message is dropped [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}"
)
Expand Down Expand Up @@ -561,6 +588,7 @@ def _convert_image(
start_timestamp: float,
sensor_channel: str,
topic: str,
delay_msec: float,
scene_token: str,
):
"""convert image topic to raw image data"""
Expand All @@ -584,14 +612,16 @@ def _convert_image(
misc_utils.nusc_timestamp_to_unix_timestamp(sample_record.timestamp)
for sample_record in sample_records
]
lidar_to_camera_latency_sec = self._lidar_latency + 1e-3 * delay_msec

synced_frame_info_list = misc_utils.get_lidar_camera_synced_frame_info(
image_timestamp_list,
lidar_timestamp_list,
self._camera_latency,
self._accept_frame_drop,
self._TIMESTAMP_DIFF,
self._num_load_frames,
self._msg_display_interval,
image_timestamp_list=image_timestamp_list,
lidar_timestamp_list=lidar_timestamp_list,
lidar_to_camera_latency_sec=lidar_to_camera_latency_sec,
system_scan_period_sec=self._system_scan_period_sec,
max_camera_jitter_sec=self._max_camera_jitter_sec,
num_load_frames=self._num_load_frames,
msg_display_interval=self._msg_display_interval,
)

# Get image shape
Expand All @@ -606,34 +636,35 @@ def _convert_image(
)
for image_index, lidar_frame_index, dummy_image_timestamp in synced_frame_info_list:
lidar_sample_token: str = sample_records[lidar_frame_index].token
if dummy_image_timestamp is None:
image_msg = None
while image_index_counter < image_index:
image_msg = next(image_generator)
image_index_counter += 1

if image_index is None: # Image dropped
sample_data_token = self._generate_image_data(
rosbag2_utils.compressed_msg_to_numpy(image_msg),
rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp),
np.zeros(shape=image_shape, dtype=np.uint8), # dummy image
dummy_image_timestamp,
lidar_sample_token,
calibrated_sensor_token,
sensor_channel,
lidar_frame_index,
output_blank_image=True,
is_key_frame=False,
)
sample_data_token_list.append(sample_data_token)
else:
elif lidar_frame_index is None: # LiDAR dropped
warnings.warn(f"LiDAR message dropped at image_index: {image_index}")
else: # Both messages available
image_msg = None
while image_index_counter < image_index:
image_msg = next(image_generator)
image_index_counter += 1

sample_data_token = self._generate_image_data(
np.zeros(shape=image_shape, dtype=np.uint8), # dummy image
dummy_image_timestamp,
rosbag2_utils.compressed_msg_to_numpy(image_msg),
rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp),
lidar_sample_token,
calibrated_sensor_token,
sensor_channel,
lidar_frame_index,
output_blank_image=True,
is_key_frame=False,
)
sample_data_token_list.append(sample_data_token)

else: # camera only mode

def get_move_distance(trans1: Dict[str, float], trans2: Dict[str, float]) -> float:
Expand Down
Loading

0 comments on commit 8a66687

Please sign in to comment.