Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

perception: ROS2 Node to convert from 2D to 3D detections #94

Open
wants to merge 17 commits into
base: main
Choose a base branch
from

cleanup and add viz

fc23ea3
Select commit
Loading
Failed to load commit list.
Sign in for the full log view
Open

perception: ROS2 Node to convert from 2D to 3D detections #94

cleanup and add viz
fc23ea3
Select commit
Loading
Failed to load commit list.
GitHub Actions / Autopep8 failed Oct 4, 2024 in 0s

4 errors

Autopep8 found 4 errors

Annotations

Check failure on line 30 in src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py

See this annotation in the file changed.

@github-actions github-actions / Autopep8

src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py#L24-L30

 from random import randint
 
 mutex = Lock()
+
 
 class DrawBasicDetections(Node):
     def __init__(self):

Check failure on line 130 in src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py

See this annotation in the file changed.

@github-actions github-actions / Autopep8

src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py#L123-L130

     def try_draw(self):
         if not self.unprocessed_images or not self.unprocessed_dets or self.transform is None or self.camera_info is None:
             return
-        
+
         with mutex:
             image_msg = self.unprocessed_images.popleft()
             det_3d_msg = self.unprocessed_dets.popleft()

Check failure on line 159 in src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py

See this annotation in the file changed.

@github-actions github-actions / Autopep8

src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py#L139-L159

         for det_msg in det_3d_msg.detections:
             bbox = det_msg.bbox
 
-            center = np.array([bbox.center.position.x, bbox.center.position.y, bbox.center.position.z])
-            rot = Rotation.from_quat([bbox.center.orientation.x, bbox.center.orientation.y, bbox.center.orientation.z, bbox.center.orientation.w])
+            center = np.array(
+                [bbox.center.position.x, bbox.center.position.y, bbox.center.position.z])
+            rot = Rotation.from_quat([bbox.center.orientation.x, bbox.center.orientation.y,
+                                     bbox.center.orientation.z, bbox.center.orientation.w])
             size = np.array([bbox.size.x, bbox.size.y, bbox.size.z])
 
             # get all 8 corners
-            vert = [ center + rot.apply(np.multiply(size , np.array([-1, 1, 1]))),
-                     center + rot.apply(np.multiply(size , np.array([-1, -1, 1]))),
-                     center + rot.apply(np.multiply(size , np.array([-1, -1, -1]))),
-                     center + rot.apply(np.multiply(size , np.array([-1, 1, -1]))),
-                     center + rot.apply(np.multiply(size , np.array([1, 1, 1]))),
-                     center + rot.apply(np.multiply(size , np.array([1, -1, 1]))),
-                     center + rot.apply(np.multiply(size , np.array([1, -1, -1]))),
-                     center + rot.apply(np.multiply(size , np.array([1, 1, -1]))),
-            ]
+            vert = [center + rot.apply(np.multiply(size, np.array([-1, 1, 1]))),
+                    center + rot.apply(np.multiply(size, np.array([-1, -1, 1]))),
+                    center + rot.apply(np.multiply(size, np.array([-1, -1, -1]))),
+                    center + rot.apply(np.multiply(size, np.array([-1, 1, -1]))),
+                    center + rot.apply(np.multiply(size, np.array([1, 1, 1]))),
+                    center + rot.apply(np.multiply(size, np.array([1, -1, 1]))),
+                    center + rot.apply(np.multiply(size, np.array([1, -1, -1]))),
+                    center + rot.apply(np.multiply(size, np.array([1, 1, -1]))),
+                    ]
 
             color = (randint(0, 255), randint(0, 255), randint(0, 255))
             verts_2d = []

Check failure on line 197 in src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py

See this annotation in the file changed.

@github-actions github-actions / Autopep8

src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py#L179-L197

 
             # draw edges
             for i in range(4):
-                image = cv2.line(image, verts_2d[i], verts_2d[(i+1) % 4], color, 10) # face 1
-                image = cv2.line(image, verts_2d[i+4], verts_2d[(i+1) % 4 + 4], color, 10) # face 2
-                image = cv2.line(image, verts_2d[i], verts_2d[i+4], color, 10) # connect faces
+                image = cv2.line(image, verts_2d[i], verts_2d[(i+1) % 4], color, 10)  # face 1
+                image = cv2.line(image, verts_2d[i+4], verts_2d[(i+1) % 4 + 4], color, 10)  # face 2
+                image = cv2.line(image, verts_2d[i], verts_2d[i+4], color, 10)  # connect faces
 
         self.publish_viz(image, image_msg)
-            
+
     def publish_viz(self, cv_img, msg):
         imgmsg = self.cv_bridge.cv2_to_imgmsg(cv_img, "bgr8")
         imgmsg.header.stamp = msg.header.stamp
         imgmsg.header.frame_id = msg.header.frame_id
         self.viz_publisher.publish(imgmsg)
 
+
 def main(args=None):
     rclpy.init(args=args)