From 05d224756cddccd3bc9a22362b265ecf3095a7c8 Mon Sep 17 00:00:00 2001 From: Maciejeg Date: Fri, 16 Feb 2024 11:44:45 +0100 Subject: [PATCH] fix: adjust bbox mask based on frame and sensor --- .../t4_dataset/annotation_files_generator.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/perception_dataset/t4_dataset/annotation_files_generator.py b/perception_dataset/t4_dataset/annotation_files_generator.py index 00db9ece..8a9f6ba0 100644 --- a/perception_dataset/t4_dataset/annotation_files_generator.py +++ b/perception_dataset/t4_dataset/annotation_files_generator.py @@ -279,10 +279,12 @@ def _convert_to_t4_format( # Object Annotation if "two_d_box" in anno.keys(): - anno_two_d_box: List[float] = self._clip_bbox(anno["two_d_box"], mask) sensor_id: int = int(anno["sensor_id"]) if frame_index not in frame_index_to_sample_data_token[sensor_id]: continue + anno_two_d_box: List[float] = self._clip_bbox( + anno["two_d_box"], mask[sensor_id][frame_index] + ) self._object_ann_table.insert_into_table( sample_data_token=frame_index_to_sample_data_token[sensor_id][frame_index], instance_token=instance_token, @@ -292,10 +294,10 @@ def _convert_to_t4_format( mask=mask[sensor_id][frame_index], ) - def _clip_bbox(self, bbox: List[float], mask: any) -> List[float]: + def _clip_bbox(self, bbox: List[float], mask: Dict[str, Any]) -> List[float]: """Clip the bbox to the image size.""" try: - width, height = list(mask[0].values())[0]["size"] + width, height = mask["size"] bbox[0] = max(0, bbox[0]) bbox[1] = max(0, bbox[1]) bbox[2] = min(width, bbox[2])