Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP Sensor fusion / batch inference node #102

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions docker/perception/sensor_fusion/sensor_fusion.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04

################################ Source ################################
FROM ${BASE_IMAGE} as source

WORKDIR ${AMENT_WS}/src

# Copy in source code
COPY src/perception/sensor_fusion sensor_fusion
COPY src/wato_msgs/sample_msgs sample_msgs

# Scan for rosdeps
RUN apt-get -qq update && rosdep update && \
rosdep install --from-paths . --ignore-src -r -s \
| grep 'apt-get install' \
| awk '{print $3}' \
| sort > /tmp/colcon_install_list

################################# Dependencies ################################
FROM ${BASE_IMAGE} as dependencies

# Install Rosdep requirements
COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list
RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list)

# Copy in source code from source stage
WORKDIR ${AMENT_WS}
COPY --from=source ${AMENT_WS}/src src

# Dependency Cleanup
WORKDIR /
RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \
rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/*

################################ Build ################################
FROM dependencies as build

# Build ROS2 packages
WORKDIR ${AMENT_WS}
RUN . /opt/ros/$ROS_DISTRO/setup.sh && \
colcon build \
--cmake-args -DCMAKE_BUILD_TYPE=Release

# Entrypoint will run before any CMD on launch. Sources ~/opt/<ROS_DISTRO>/setup.bash and ~/ament_ws/install/setup.bash
COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh
ENTRYPOINT ["./wato_ros_entrypoint.sh"]

################################ Prod ################################
FROM build as deploy

# Source Cleanup and Security Setup
RUN chown -R $USER:$USER ${AMENT_WS}
RUN rm -rf src/*

USER ${USER}
11 changes: 11 additions & 0 deletions modules/docker-compose.perception.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,14 @@ services:
target: deploy
image: "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:${TAG}"
command: /bin/bash -c "ros2 launch depth_estimation eve.launch.py"

sensor_fusion:
build:
context: ..
dockerfile: docker/perception/sensor_fusion/sensor_fusion.Dockerfile
cache_from:
- "${PERCEPTION_SENSOR_FUSION_IMAGE}:build_${TAG}"
- "${PERCEPTION_SENSOR_FUSION_IMAGE}:build_main"
target: deploy
image: "${PERCEPTION_SENSOR_FUSION_IMAGE}:${TAG}"
command: /bin/bash -c "ros2 launch sensor_fusion eve.launch.py"
23 changes: 23 additions & 0 deletions src/perception/camera_object_detection/config/eve_2cam_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
left_camera_object_detection_node:
ros__parameters:
camera_topic: /camera/left/image_color
publish_vis_topic: /camera/left/camera_detections_viz
publish_detection_topic: /camera/left/camera_detections
model_path: /perception_models/yolov8m.pt
image_size: 1024

center_camera_object_detection_node:
ros__parameters:
camera_topic: /camera/center/image_color
publish_vis_topic: /camera/center/camera_detections_viz
publish_detection_topic: /camera/center/camera_detections
model_path: /perception_models/yolov8m.pt
image_size: 1024

right_camera_object_detection_node:
ros__parameters:
camera_topic: /camera/right/image_color
publish_vis_topic: /camera/right/camera_detections_viz
publish_detection_topic: /camera/right/camera_detections
model_path: /perception_models/yolov8m.pt
image_size: 1024
23 changes: 23 additions & 0 deletions src/perception/sensor_fusion/package.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>sensor_fusion</name>
<version>0.0.0</version>
<description>Sensor fusion node that subscribes to multiple sensor topics and synchronizes their output as a unified output message.</description>
<maintainer email="[email protected]">bolty</maintainer>
<license>TODO: License declaration</license>


<depend>sensor_msgs</depend>
<depend>std_msgs</depend>

<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>

<export>
<build_type>ament_python</build_type>
</export>

</package>
Empty file.
45 changes: 45 additions & 0 deletions src/perception/sensor_fusion/sensor_fusion/sensor_fusion_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import String
from message_filters import Subscriber, TimeSynchronizer
from cv_bridge import CvBridge
import json

class SensorFusionNode(Node):
def __init__(self):
super().__init__('sensor_fusion_node')

self.bridge = CvBridge()

# Subscribe to camera topics
self.subscribers = [
Subscriber(self, Image, f'/camera{i}/image_raw') for i in range(1, 9)
]

# TimeSynchronizer to synchronize messages
self.ts = TimeSynchronizer(self.subscribers, 10)
self.ts.registerCallback(self.image_callback)

# Publisher for synchronized images
self.publisher = self.create_publisher(String, 'synchronized_images', 10)

def image_callback(self, *images):
# Convert ROS images to OpenCV format and encode them
encoded_images = [self.bridge.cv2_to_imgmsg(cv2.imencode('.jpg', self.bridge.imgmsg_to_cv2(img, 'bgr8'))[1], encoding='bgr8') for img in images]

# Create a JSON object to hold the images
images_dict = {f'camera{i+1}': img.data.tobytes().hex() for i, img in enumerate(encoded_images)}

# Publish the synchronized images as an array of images
self.publisher.publish(json.dumps(images_dict))

def main(args=None):
rclpy.init(args=args)
node = SensorFusionNode()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()

if __name__ == '__main__':
main()
31 changes: 31 additions & 0 deletions src/perception/sensor_fusion/setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from setuptools import setup
import os
from glob import glob

package_name = 'sensor_fusion'

setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')),
(os.path.join('share', package_name, 'launch', 'include'), glob('launch/include/*.launch.py')),
(os.path.join('share', package_name, 'config'), glob('config/*.yaml')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Justin',
maintainer_email='[email protected]',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'sensor_fusion_node = sensor_fusion.sensor_fusion_node:main'
],
},
)
4 changes: 2 additions & 2 deletions watod-config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
## - simulation : starts simulation
## - samples : starts sample ROS2 pubsub nodes

# ACTIVE_MODULES=""
ACTIVE_MODULES="perception"

################################# MODE OF OPERATION #################################
## Possible modes of operation when running watod.
Expand All @@ -31,7 +31,7 @@

## Tag to use. Images are formatted as <IMAGE_NAME>:<TAG> with forward slashes replaced with dashes.
## DEFAULT = "<your_current_github_branch>"
# TAG=""
TAG="jusleung"

# Docker Registry to pull/push images. DEFAULT = "ghcr.io/watonomous/wato_monorepo"
# REGISTRY_URL=""
2 changes: 2 additions & 0 deletions watod_scripts/watod-setup-env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ PERCEPTION_LANE_DETECTION_IMAGE=${PERCEPTION_LANE_DETECTION_IMAGE:-"$REGISTRY_UR
PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE:-"$REGISTRY_URL/perception/semantic_segmentation"}
PERCEPTION_TRACKING_IMAGE=${PERCEPTION_TRACKING_IMAGE:-"$REGISTRY_URL/perception/tracking"}
PERCEPTION_DEPTH_ESTIMATION_IMAGE=${PERCEPTION_DEPTH_ESTIMATION_IMAGE:-"$REGISTRY_URL/perception/depth_estimation"}
PERCEPTION_SENSOR_FUSION_IMAGE=${PERCEPTION_SENSOR_FUSION_IMAGE:-"$REGISTRY_URL/perception/sensor_fusion"}

# World Modeling
WORLD_MODELING_HD_MAP_IMAGE=${WORLD_MODELING_HD_MAP_IMAGE:-"$REGISTRY_URL/world_modeling/hd_map"}
Expand Down Expand Up @@ -168,6 +169,7 @@ echo "PERCEPTION_LANE_DETECTION_IMAGE=$PERCEPTION_LANE_DETECTION_IMAGE" >> "$MOD
echo "PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=$PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE" >> "$MODULES_DIR/.env"
echo "PERCEPTION_TRACKING_IMAGE=$PERCEPTION_TRACKING_IMAGE" >> "$MODULES_DIR/.env"
echo "PERCEPTION_DEPTH_ESTIMATION_IMAGE=$PERCEPTION_DEPTH_ESTIMATION_IMAGE" >> "$MODULES_DIR/.env"
echo "PERCEPTION_SENSOR_FUSION_IMAGE=$PERCEPTION_SENSOR_FUSION_IMAGE" >> "$MODULES_DIR/.env"

# World Modeling
echo "WORLD_MODELING_HD_MAP_IMAGE=$WORLD_MODELING_HD_MAP_IMAGE" >> "$MODULES_DIR/.env"
Expand Down
Loading