Skip to content

Commit

Permalink
Accommodate new sdk, make tof stream show up, more or less properly. …
Browse files Browse the repository at this point in the history
…Still need to fix a few gui bugs.
  • Loading branch information
zrezke committed Dec 5, 2023
1 parent e60fd28 commit e9b5f0b
Show file tree
Hide file tree
Showing 11 changed files with 146 additions and 113 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ perf.data*

# Screenshots from samples etc.
screenshot*.png
venv*
4 changes: 3 additions & 1 deletion crates/re_viewer/src/depthai/depthai.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ pub struct DeviceConfig {
impl Default for DeviceConfig {
fn default() -> Self {
Self {
auto: true,
auto: false,
cameras: Vec::new(),
depth_enabled: true,
depth: Some(DepthConfig::default()),
Expand Down Expand Up @@ -771,13 +771,15 @@ impl State {
applied_device_config.depth_enabled = config.depth.is_some();
self.modified_device_config.depth_enabled =
self.modified_device_config.depth.is_some();
self.modified_device_config.auto = false; // Always reset auto
self.set_subscriptions(&subs);
self.set_update_in_progress(false);
}
WsMessageData::DeviceProperties(device) => {
re_log::debug!("Setting device: {device:?}");
self.set_device(device);
if !self.selected_device.id.is_empty() {
self.modified_device_config.auto = true;
// Apply default pipeline
self.set_pipeline(&mut self.modified_device_config.clone(), false);
}
Expand Down
6 changes: 3 additions & 3 deletions crates/re_viewer/src/ui/auto_layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ fn create_inner_viewport_layout(
.iter()
.filter(|space| {
if let Some(last) = space.path.as_ref().and_then(|path| path.as_slice().last()) {
last == &EntityPathPart::from("color_cam")
last == &EntityPathPart::from("color_cam") || last == &EntityPathPart::from("tof") // Treat TOF as color for now
} else {
false
}
Expand Down Expand Up @@ -551,8 +551,8 @@ pub(crate) fn default_tree_from_space_views(
let tree_clone = tree.clone();
let color_tabs = tree_clone.tabs().filter(|tab| {
if let Some(space_path) = tab.space_path.clone() {
if let Some(first_part) = space_path.as_slice().first() {
first_part == &EntityPathPart::from("CAM_A")
if let Some(first_part) = space_path.as_slice().get(space_path.len() - 2) {
first_part == &EntityPathPart::from("color_cam")
} else {
false
}
Expand Down
7 changes: 7 additions & 0 deletions crates/re_viewer/src/ui/selection_panel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,13 @@ fn colormap_props_ui(
) {
// Color mapping picker
{
if entity_props.color_mapper.get() == &ColorMapper::AlbedoTexture {
if entity_props.albedo_texture.is_none() {
entity_props.color_mapper = EditableAutoValue::Auto(ColorMapper::Colormap(
Colormap::Turbo, // Same default as in images.rs (scene part)
));
}
}
let current = *entity_props.color_mapper.get();
ui.label("Color map");
egui::ComboBox::from_id_source("depth_color_mapper")
Expand Down
3 changes: 2 additions & 1 deletion crates/re_viewer/src/ui/space_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,9 @@ impl SpaceView {
let mut is_2d = false;
if !is_3d {
let last_part = space_path.iter().last().unwrap();
is_2d = (last_part == &EntityPathPart::from("mono_cam")
is_2d = ((last_part == &EntityPathPart::from("mono_cam")
|| last_part == &EntityPathPart::from("color_cam"))
|| last_part == &EntityPathPart::from("tof"))
&& last_part != &EntityPathPart::from("transform");
}
if let Some(board_socket) =
Expand Down
2 changes: 1 addition & 1 deletion crates/re_viewer/src/ui/space_view_heuristics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ fn default_depthai_space_views(
.collect::<Vec<_>>();

// If a depth tensor is found, we want to find the 2D space view that has the Image + Depth tensor.
// We then wan't to create two separate 2D space views, one for the image and one for the depth.
// We then want to create two separate 2D space views, one for the image and one for the depth.
// But we only want to hide the depth (or image), not remove it from the space view.
if let Some(depth_2d) = space_views
.iter_mut()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use egui::NumExt;
use glam::Vec3;
use itertools::Itertools;

use re_data_store::{ query_latest_single, EntityPath, EntityProperties };
use re_data_store::{ query_latest_single, EntityPath, EntityProperties, EditableAutoValue };
use re_log_types::{
component_types::{ ColorRGBA, InstanceKey, Tensor, TensorData, TensorDataMeaning },
Component,
Expand Down Expand Up @@ -344,7 +344,9 @@ impl ImagesPart {
"Albedo texture couldn't be fetched ({:?})",
properties.albedo_texture
);
colormap = Colormap::Grayscale;
colormap = Colormap::Turbo;
// Would need some way to update the space view blueprint properties here - to reflect the change in colormap.
// For now set the matching default in selection_panel.rs
}
}

Expand Down
99 changes: 58 additions & 41 deletions rerun_py/depthai_viewer/_backend/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
import numpy as np
from depthai_sdk import OakCamera
from depthai_sdk.components import CameraComponent, NNComponent, StereoComponent
from depthai_sdk.components.tof_component import ToFComponent, Component
from depthai_sdk.components.tof_component import Component
from depthai_sdk.components.camera_helper import (
getClosestIspScale,
)
from depthai_sdk.classes.packet_handlers import QueuePacketHandler, ComponentOutput
from depthai_sdk.classes.packet_handlers import ComponentOutput
from numpy.typing import NDArray

import depthai_viewer as viewer
Expand All @@ -29,19 +29,16 @@
compare_dai_camera_configs,
get_size_from_resolution,
size_to_resolution,
DepthConfiguration,
ALL_NEURAL_NETWORKS,
)
from depthai_viewer._backend.messages import (
ErrorMessage,
InfoMessage,
Message,
WarningMessage,
)
from depthai_viewer._backend.packet_handler import (
AiModelCallbackArgs,
DepthCallbackArgs,
PacketHandler,
SyncedCallbackArgs,
)
from depthai_viewer._backend.packet_handler import PacketHandler
from depthai_viewer._backend.store import Store


Expand Down Expand Up @@ -84,9 +81,7 @@ class Device:
_xlink_statistics: Optional[XlinkStatistics] = None
_sys_info_q: Optional[Queue] = None # type: ignore[type-arg]
_pipeline_start_t: Optional[float] = None
_queues: Dict[
Component, QueuePacketHandler
] = {}
_queues: List[Tuple[Component, ComponentOutput]] = []

# _profiler = cProfile.Profile()

Expand Down Expand Up @@ -285,13 +280,54 @@ def _create_auto_pipeline_config(self, config: PipelineConfiguration) -> Message
connected_cam_features = self._oak.device.getConnectedCameraFeatures()
if not connected_cam_features:
return ErrorMessage("No camera features found, can't create auto pipeline config!")
n_cams = len(connected_cam_features)

print("Connected camera features: ", connected_cam_features)
# Step 1: Create all the cameras. Try to find RGB cam, to align depth to it later
# Step 2: Create stereo depth if calibration is present. Align to RGB if present, otherwise to left cam
# Step 3: Create YOLO
rgb_cam_socket = None
# 1. Create all the cameras
config.cameras = []
has_tof = False
for cam in connected_cam_features:
if cam.name == "rgb": # By convention
rgb_cam_socket = cam.socket
resolution = CameraSensorResolution.THE_1080_P if cam.width >= 1920 else CameraSensorResolution.THE_720_P
resolution = CameraSensorResolution.THE_1200_P if cam.height == 1200 else resolution
preferred_type = cam.supportedTypes[0]
if preferred_type == dai.CameraSensorType.TOF:
has_tof = True
config.cameras.append(
CameraConfiguration(
)
CameraConfiguration(resolution=resolution, kind=preferred_type, board_socket=cam.socket, name=cam.name)
)

# 2. Create stereo depth
if not has_tof:
try:
calibration = self._oak.device.readCalibration2()
left_cam = calibration.getStereoLeftCameraId()
right_cam = calibration.getStereoRightCameraId()
if left_cam.value != 255 and right_cam.value != 255:
config.depth = DepthConfiguration(
stereo_pair=(left_cam, right_cam),
align=rgb_cam_socket if rgb_cam_socket is not None else left_cam,
)
except RuntimeError:
calibration = None
else:
config.depth = None
# 3. Create YOLO
nnet_cam_sock = rgb_cam_socket
if nnet_cam_sock is None:
# Try to find a color camera config
nnet_cam_sock = next(filter(lambda cam: cam.kind == dai.CameraSensorType.COLOR, config.cameras), None) # type: ignore[assignment]
if nnet_cam_sock is not None:
nnet_cam_sock = nnet_cam_sock.board_socket
if nnet_cam_sock is not None:
config.ai_model = ALL_NEURAL_NETWORKS[0]
config.ai_model.camera = nnet_cam_sock
else:
config.ai_model = None
return InfoMessage("Created auto pipeline config")

def update_pipeline(self, runtime_only: bool) -> Message:
if self._oak is None:
Expand All @@ -313,20 +349,15 @@ def update_pipeline(self, runtime_only: bool) -> Message:
if isinstance(message, ErrorMessage):
return message

# if config.auto:
# self._create_auto_pipeline_config(config, self._oak.device)
if config.auto:
self._create_auto_pipeline_config(config)

self._cameras = []
self._stereo = None
self._packet_handler.reset()
self._sys_info_q = None
self._pipeline_start_t = None

synced_outputs: Dict[
Component, ComponentOutput
] = {}
synced_callback_args = SyncedCallbackArgs()

is_poe = self._oak.device.getDeviceInfo().protocol == dai.XLinkProtocol.X_LINK_TCP_IP
print("Usb speed: ", self._oak.device.getUsbSpeed())
is_usb2 = self._oak.device.getUsbSpeed() == dai.UsbSpeed.HIGH
Expand Down Expand Up @@ -378,9 +409,7 @@ def update_pipeline(self, runtime_only: bool) -> Message:
# Only create a camera node if it is used by stereo or AI.
if cam.stream_enabled:
if dai.CameraSensorType.TOF in camera_features.supportedTypes:
sdk_cam = self._oak.create_tof(
cam.board_socket
)
sdk_cam = self._oak.create_tof(cam.board_socket)
else:
sdk_cam = self._oak.create_camera(
cam.board_socket,
Expand All @@ -395,7 +424,7 @@ def update_pipeline(self, runtime_only: bool) -> Message:
)
)
self._cameras.append(sdk_cam)
synced_outputs[sdk_cam] = sdk_cam.out.main
self._queues.append((sdk_cam, self._oak.queue(sdk_cam.out.main)))

if config.depth:
print("Creating depth")
Expand Down Expand Up @@ -428,10 +457,7 @@ def update_pipeline(self, runtime_only: bool) -> Message:
aligned_camera = self._get_camera_config_by_socket(config, config.depth.align)
if not aligned_camera:
return ErrorMessage(f"{config.depth.align} is not configured. Couldn't create stereo pair.")
synced_callback_args.depth_args = DepthCallbackArgs(
alignment_camera=aligned_camera, stereo_pair=config.depth.stereo_pair
)
synced_outputs[self._stereo] = self._stereo.out.main
self._queues.append((self._stereo, self._oak.queue(self._stereo.out.main)))

if self._oak.device.getConnectedIMU() != "NONE":
print("Creating IMU")
Expand Down Expand Up @@ -465,16 +491,7 @@ def update_pipeline(self, runtime_only: bool) -> Message:
if not camera:
return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.")

synced_callback_args.ai_args = AiModelCallbackArgs(
model_name=config.ai_model.path, camera=camera, labels=labels
)
synced_outputs[self._nnet] = self._nnet.out.main

# Create the sdk queues and finalize the packet handler
if synced_outputs:
for component, synced_out in synced_outputs.items():
self._queues[component] = self._oak.queue(synced_out)
self._packet_handler.set_synced_callback_args(synced_callback_args)
self._queues.append((self._nnet, self._oak.queue(self._nnet.out.main)))

sys_logger_xlink = self._oak.pipeline.createXLinkOut()
logger = self._oak.pipeline.createSystemLogger()
Expand Down Expand Up @@ -508,7 +525,7 @@ def update(self) -> None:
return
self._oak.poll()

for component, queue in self._queues.items():
for component, queue in self._queues:
try:
packet = queue.get_queue().get_nowait()
self._packet_handler.log_packet(component, packet)
Expand Down
31 changes: 28 additions & 3 deletions rerun_py/depthai_viewer/_backend/device_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ class Config:
arbitrary_types_allowed = True

def __init__(self, **v) -> None: # type: ignore[no-untyped-def]
if v.get("median", None):
if v.get("median", None) and isinstance(v["median"], str):
v["median"] = getattr(dai.MedianFilter, v["median"])
if v.get("align", None):
if v.get("align", None) and isinstance(v["align"], str):
v["align"] = getattr(dai.CameraBoardSocket, v["align"])
if v.get("stereo_pair", None) and all(isinstance(pair, str) for pair in v["stereo_pair"]):
v["stereo_pair"] = (
Expand Down Expand Up @@ -105,6 +105,30 @@ def dict(self, *args, **kwargs): # type: ignore[no-untyped-def]
}


ALL_NEURAL_NETWORKS = [
AiModelConfiguration(
path="yolov8n_coco_640x352",
display_name="Yolo V8",
camera=dai.CameraBoardSocket.CAM_A,
),
AiModelConfiguration(
path="mobilenet-ssd",
display_name="MobileNet SSD",
camera=dai.CameraBoardSocket.CAM_A,
),
AiModelConfiguration(
path="face-detection-retail-0004",
display_name="Face Detection",
camera=dai.CameraBoardSocket.CAM_A,
),
AiModelConfiguration(
path="age-gender-recognition-retail-0013",
display_name="Age gender recognition",
camera=dai.CameraBoardSocket.CAM_A,
),
]


class ImuConfiguration(BaseModel): # type: ignore[misc]
report_rate: int = 100
batch_report_threshold: int = 5
Expand Down Expand Up @@ -217,7 +241,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de


class PipelineConfiguration(BaseModel): # type: ignore[misc]
auto: bool = True # Should the backend automatically create a pipeline based on the device.
auto: bool = False # Should the backend automatically create a pipeline?
cameras: List[CameraConfiguration] = []
depth: Optional[DepthConfiguration]
ai_model: Optional[AiModelConfiguration]
Expand Down Expand Up @@ -278,6 +302,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de
(640, 400): CameraSensorResolution.THE_400_P,
(640, 480): CameraSensorResolution.THE_480_P, # OV7251
(1280, 720): CameraSensorResolution.THE_720_P,
# (1280, 962): CameraSensorResolution.THE_1280P, # TOF
(1280, 800): CameraSensorResolution.THE_800_P, # OV9782
(2592, 1944): CameraSensorResolution.THE_5_MP, # OV5645
(1440, 1080): CameraSensorResolution.THE_1440X1080,
Expand Down
Loading

0 comments on commit e9b5f0b

Please sign in to comment.