diff --git a/Cargo.lock b/Cargo.lock index cac04b24efca..a021d8d15cc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -140,30 +140,30 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "a3a318f1f38d2418400f8209655bfd825785afd25aa30bb7ba6cc792e4596748" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -177,7 +177,7 @@ name = "api_demo" version = "0.1.4" dependencies = [ "anyhow", - "clap 4.4.10", + "clap 4.4.11", "depthai-viewer", "glam", "itertools", @@ -906,9 +906,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.10" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" dependencies = [ "clap_builder", "clap_derive", @@ -916,9 +916,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.9" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" dependencies = [ "anstream", "anstyle", @@ -1394,7 +1394,7 @@ version = "0.1.4" dependencies = [ "anyhow", "backtrace", - "clap 4.4.10", + "clap 4.4.11", "ctrlc", "document-features", "itertools", @@ -1900,14 +1900,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", - "windows-sys 0.48.0", + "redox_syscall 0.4.1", + "windows-sys 0.52.0", ] [[package]] @@ -3121,7 +3121,7 @@ name = "minimal_options" version = "0.1.4" dependencies = [ "anyhow", - "clap 4.4.10", + "clap 4.4.11", "depthai-viewer", "glam", ] @@ -3602,7 +3602,7 @@ name = "objectron" version = "0.1.4" dependencies = [ "anyhow", - "clap 4.4.10", + "clap 4.4.11", "depthai-viewer", "glam", "prost", @@ -3623,9 +3623,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.60" +version = "0.10.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -3655,9 +3655,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.96" +version = "0.9.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" dependencies = [ "cc", "libc", @@ -4328,7 +4328,7 @@ version = "0.1.4" dependencies = [ "anyhow", "bytes", - "clap 4.4.10", + "clap 4.4.11", "depthai-viewer", "gltf", "mimalloc", @@ -5057,9 +5057,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684d5e6e18f669ccebf64a92236bb7db9a34f07be010e3627368182027180866" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", @@ -5179,7 +5179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring 0.17.6", + "ring 0.17.7", "rustls-webpki", "sct", ] @@ -5190,7 +5190,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.6", + "ring 0.17.7", "untrusted 0.9.0", ] @@ -5259,7 +5259,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.6", + "ring 0.17.7", "untrusted 0.9.0", ] @@ -6745,7 +6745,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.6", + "ring 0.17.7", "untrusted 0.9.0", ] @@ -7177,9 +7177,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.5.19" +version = "0.5.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" +checksum = "b7e87b8dfbe3baffbe687eef2e164e32286eff31a5ee16463ce03d991643ec94" dependencies = [ "memchr", ] @@ -7399,18 +7399,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.28" +version = "0.7.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e" +checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.28" +version = "0.7.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b" +checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" dependencies = [ "proc-macro2", "quote", diff --git a/crates/re_viewer/src/depthai/depthai.rs b/crates/re_viewer/src/depthai/depthai.rs index a681fb35f4a2..5e770cdcb208 100644 --- a/crates/re_viewer/src/depthai/depthai.rs +++ b/crates/re_viewer/src/depthai/depthai.rs @@ -130,6 +130,7 @@ pub enum CameraSensorResolution { THE_1440X1080, THE_1080_P, THE_1200_P, + THE_1280_P, THE_4_K, THE_4000X3000, THE_12_MP, @@ -149,6 +150,7 @@ impl fmt::Display for CameraSensorResolution { Self::THE_1440X1080 => write!(f, "1440x1080"), Self::THE_1080_P => write!(f, "1080p"), Self::THE_1200_P => write!(f, "1200p"), + Self::THE_1280_P => write!(f, "1280p"), Self::THE_4_K => write!(f, "4k"), Self::THE_4000X3000 => write!(f, "4000x3000"), Self::THE_12_MP => write!(f, "12MP"), @@ -243,8 +245,8 @@ impl Default for DepthMedianFilter { } } -#[derive(serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq, Debug)] -pub struct DepthConfig { +#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, Debug)] +pub struct StereoDepthConfig { pub median: DepthMedianFilter, pub lr_check: bool, pub lrc_threshold: u64, @@ -256,7 +258,7 @@ pub struct DepthConfig { pub stereo_pair: (CameraBoardSocket, CameraBoardSocket), } -impl Default for DepthConfig { +impl Default for StereoDepthConfig { fn default() -> Self { Self { median: DepthMedianFilter::default(), @@ -267,17 +269,17 @@ impl Default for DepthConfig { sigma: 0, confidence: 230, align: CameraBoardSocket::RGB, - stereo_pair: (CameraBoardSocket::CAM_A, CameraBoardSocket::CAM_C), + stereo_pair: (CameraBoardSocket::CAM_B, CameraBoardSocket::CAM_C), } } } -impl DepthConfig { +impl StereoDepthConfig { pub fn default_as_option() -> Option { Some(Self::default()) } - pub fn only_runtime_configs_differ(&self, other: &DepthConfig) -> bool { + pub fn only_runtime_configs_differ(&self, other: &StereoDepthConfig) -> bool { self.lr_check == other.lr_check && self.align == other.align && self.extended_disparity == other.extended_disparity @@ -286,12 +288,14 @@ impl DepthConfig { } } -impl From<&DeviceProperties> for Option { +impl From<&DeviceProperties> for Option { fn from(props: &DeviceProperties) -> Self { - let mut config = DepthConfig::default(); - let Some(cam_with_stereo_pair) = props.cameras + let mut config = StereoDepthConfig::default(); + let Some(cam_with_stereo_pair) = props + .cameras .iter() - .find(|feat| !feat.stereo_pairs.is_empty()) else { + .find(|feat| !feat.stereo_pairs.is_empty()) + else { return None; }; if let Some((cam_a, cam_b)) = props.default_stereo_pair { @@ -316,8 +320,8 @@ pub struct DeviceConfig { pub cameras: Vec, #[serde(default = "bool_true")] pub depth_enabled: bool, // Much easier to have an explicit bool for checkbox - #[serde(default = "DepthConfig::default_as_option")] - pub depth: Option, + #[serde(default = "StereoDepthConfig::default_as_option")] + pub depth: Option, pub ai_model: AiModel, } @@ -327,7 +331,7 @@ impl Default for DeviceConfig { auto: false, cameras: Vec::new(), depth_enabled: true, - depth: Some(DepthConfig::default()), + depth: Some(StereoDepthConfig::default()), ai_model: AiModel::default(), } } @@ -359,7 +363,7 @@ impl From<&DeviceProperties> for DeviceConfig { kind: *cam.supported_types.first().unwrap(), }) .collect(); - config.depth = Option::::from(props); + config.depth = Option::::from(props); config.ai_model = AiModel::from(props); config } @@ -505,6 +509,7 @@ impl AiModel { impl From<&DeviceProperties> for AiModel { fn from(props: &DeviceProperties) -> Self { let mut model = Self::default(); + if let Some(cam) = props.cameras.iter().find(|cam| cam.is_color_camera()) { model.camera = cam.board_socket; } else if let Some(cam) = props.cameras.first() { @@ -648,12 +653,10 @@ impl State { old_config: &DeviceConfig, new_config: &DeviceConfig, ) -> bool { - let any_runtime_conf_changed = old_config.depth.is_some() - && new_config.depth.is_some() - && old_config - .depth - .unwrap() - .only_runtime_configs_differ(&new_config.depth.unwrap()); // || others to be added + let any_runtime_conf_changed = match (&old_config.depth, &new_config.depth) { + (Some(old_depth), Some(new_depth)) => old_depth.only_runtime_configs_differ(new_depth), + _ => false, + }; any_runtime_conf_changed && old_config.cameras == new_config.cameras && old_config.ai_model == new_config.ai_model @@ -762,8 +765,8 @@ impl State { } self.applied_device_config.config = Some(config.clone()); self.modified_device_config = config.clone(); - let Some(applied_device_config) = - self.applied_device_config.config.as_mut() else { + let Some(applied_device_config) = self.applied_device_config.config.as_mut() + else { self.reset(); self.applied_device_config.update_in_progress = false; return; diff --git a/crates/re_viewer/src/ui/device_settings_panel.rs b/crates/re_viewer/src/ui/device_settings_panel.rs index d6fe6f9f4c62..bb58ae97fb60 100644 --- a/crates/re_viewer/src/ui/device_settings_panel.rs +++ b/crates/re_viewer/src/ui/device_settings_panel.rs @@ -1,5 +1,5 @@ use crate::{ - depthai::depthai::{self, CameraBoardSocket}, + depthai::depthai::{self}, misc::ViewerContext, }; @@ -234,11 +234,13 @@ impl DeviceSettingsPanel { ui.horizontal(|ui| { ui.vertical(|ui| { for cam in connected_cameras.clone() { - let Some(config) = device_config.cameras - .iter_mut() - .find(|conf| conf.board_socket == cam.board_socket) else { - continue; - }; + let Some(config) = device_config + .cameras + .iter_mut() + .find(|conf| conf.board_socket == cam.board_socket) + else { + continue; + }; Self::camera_config_ui(ctx, ui, &cam, config); } @@ -270,7 +272,17 @@ impl DeviceSettingsPanel { false, true, |ui| { - for cam in &connected_cameras { + let filtered_cameras: Vec<_> = connected_cameras + .iter() // iterates over references + .filter(|cam| { + !(cam.supported_types.contains( + &depthai::CameraSensorKind::THERMAL, + ) || cam.supported_types.contains( + &depthai::CameraSensorKind::TOF, + )) + }) + .collect(); + for cam in filtered_cameras { ui.selectable_value( &mut device_config.ai_model.camera, cam.board_socket, diff --git a/rerun_py/depthai_viewer/_backend/device.py b/rerun_py/depthai_viewer/_backend/device.py index 45010c0bab71..74ef15cb3b47 100644 --- a/rerun_py/depthai_viewer/_backend/device.py +++ b/rerun_py/depthai_viewer/_backend/device.py @@ -1,22 +1,24 @@ import itertools import time -from queue import Queue, Empty as QueueEmpty +from queue import Empty as QueueEmpty +from queue import Queue from typing import Dict, List, Optional, Tuple import depthai as dai import numpy as np from depthai_sdk import OakCamera +from depthai_sdk.classes.packet_handlers import ComponentOutput from depthai_sdk.components import CameraComponent, NNComponent, StereoComponent from depthai_sdk.components.tof_component import Component from depthai_sdk.components.camera_helper import ( getClosestIspScale, ) -from depthai_sdk.classes.packet_handlers import ComponentOutput +from depthai_sdk.components.tof_component import Component from numpy.typing import NDArray import depthai_viewer as viewer -from depthai_viewer._backend import classification_labels from depthai_viewer._backend.device_configuration import ( + ALL_NEURAL_NETWORKS, CameraConfiguration, CameraFeatures, CameraSensorResolution, @@ -24,12 +26,12 @@ DeviceProperties, ImuKind, PipelineConfiguration, + StereoDepthConfiguration, XLinkConnection, calculate_isp_scale, compare_dai_camera_configs, get_size_from_resolution, size_to_resolution, - DepthConfiguration, ALL_NEURAL_NETWORKS, ) from depthai_viewer._backend.messages import ( @@ -68,7 +70,8 @@ def update(self) -> None: class Device: id: str - intrinsic_matrix: Dict[Tuple[dai.CameraBoardSocket, int, int], NDArray[np.float32]] = {} + intrinsic_matrix: Dict[Tuple[dai.CameraBoardSocket, + int, int], NDArray[np.float32]] = {} calibration_data: Optional[dai.CalibrationHandler] = None use_encoding: bool = False store: Store @@ -89,7 +92,8 @@ def __init__(self, device_id: str, store: Store): self.id = device_id self.set_oak(OakCamera(device_id)) self.store = store - self._packet_handler = PacketHandler(self.store, self.get_intrinsic_matrix) + self._packet_handler = PacketHandler( + self.store, self.get_intrinsic_matrix) print("Oak cam: ", self._oak) # self.start = time.time() # self._profiler.enable() @@ -105,13 +109,15 @@ def is_closed(self) -> bool: def get_intrinsic_matrix(self, board_socket: dai.CameraBoardSocket, width: int, height: int) -> NDArray[np.float32]: if self.intrinsic_matrix.get((board_socket, width, height)) is not None: - return self.intrinsic_matrix.get((board_socket, width, height)) # type: ignore[return-value] + # type: ignore[return-value] + return self.intrinsic_matrix.get((board_socket, width, height)) if self.calibration_data is None: raise Exception("Missing calibration data!") M_right = self.calibration_data.getCameraIntrinsics( # type: ignore[union-attr] board_socket, dai.Size2f(width, height) ) - self.intrinsic_matrix[(board_socket, width, height)] = np.array(M_right).reshape(3, 3) + self.intrinsic_matrix[(board_socket, width, height)] = np.array( + M_right).reshape(3, 3) return self.intrinsic_matrix[(board_socket, width, height)] def _get_possible_stereo_pairs_for_cam( @@ -120,19 +126,26 @@ def _get_possible_stereo_pairs_for_cam( """Tries to find the possible stereo pairs for a camera.""" if self._oak is None: return [] - calib_data = self._oak.device.readCalibration() + try: + calib_data = self._oak.device.readCalibration2() + except RuntimeError: + print("No calibration available.") + return [] try: calib_data.getCameraIntrinsics(cam.socket) except IndexError: + print("No intrisics for cam: ", cam.socket) return [] possible_stereo_pairs = [] if cam.name == "right": possible_stereo_pairs.extend( - [features.socket for features in filter(lambda c: c.name == "left", connected_camera_features)] + [features.socket for features in filter( + lambda c: c.name == "left", connected_camera_features)] ) elif cam.name == "left": possible_stereo_pairs.extend( - [features.socket for features in filter(lambda c: c.name == "right", connected_camera_features)] + [features.socket for features in filter( + lambda c: c.name == "right", connected_camera_features)] ) else: possible_stereo_pairs.extend( @@ -142,7 +155,8 @@ def _get_possible_stereo_pairs_for_cam( if camera != cam and all( map( - lambda confs: compare_dai_camera_configs(confs[0], confs[1]), + lambda confs: compare_dai_camera_configs( + confs[0], confs[1]), zip(camera.configs, cam.configs), ) ) @@ -171,7 +185,8 @@ def get_device_properties(self) -> DeviceProperties: else XLinkConnection.USB, mxid=device_info.mxid, ) - device_properties = DeviceProperties(id=self.id, imu=imu, info=device_info) + device_properties = DeviceProperties( + id=self.id, imu=imu, info=device_info) try: calib = self._oak.device.readCalibration2() left_cam = calib.getStereoLeftCameraId() @@ -183,19 +198,28 @@ def get_device_properties(self) -> DeviceProperties: else: device_properties.default_stereo_pair = (left_cam, right_cam) except RuntimeError: - pass + print("No calibration found while trying to fetch the default stereo pair.") - ordered_resolutions = list(sorted(size_to_resolution.keys(), key=lambda res: res[0] * res[1])) + ordered_resolutions = list( + sorted(size_to_resolution.keys(), key=lambda res: res[0] * res[1])) for cam in connected_cam_features: - if dai.CameraSensorType.TOF not in cam.supportedTypes: - prioritized_type = cam.supportedTypes[0] - biggest_width, biggest_height = [ - (conf.width, conf.height) for conf in cam.configs[::-1] if conf.type == prioritized_type - ][0] + prioritized_type = cam.supportedTypes[0] + biggest_width_height = [ + (conf.width, conf.height) for conf in cam.configs[::-1] if conf.type == prioritized_type + ] + # Some sensors don't have configs, use the sensor width, height + if not biggest_width_height: + biggest_width, biggest_height = cam.width, cam.height + else: + biggest_width, biggest_height = biggest_width_height[0] + if cam.supportedTypes[0] == dai.CameraSensorType.TOF: + all_supported_resolutions = [ + size_to_resolution[(biggest_width, biggest_height)]] + else: all_supported_resolutions = list( filter( - lambda x: x, + lambda x: x, # type: ignore[arg-type] [ size_to_resolution.get((w, h), None) for w, h in ordered_resolutions @@ -203,8 +227,6 @@ def get_device_properties(self) -> DeviceProperties: ], ) ) - else: - all_supported_resolutions = [] # Fill in lower resolutions that can be achieved with ISP scaling device_properties.cameras.append( @@ -213,7 +235,8 @@ def get_device_properties(self) -> DeviceProperties: max_fps=60, resolutions=all_supported_resolutions, supported_types=cam.supportedTypes, - stereo_pairs=self._get_possible_stereo_pairs_for_cam(cam, connected_cam_features), + stereo_pairs=self._get_possible_stereo_pairs_for_cam( + cam, connected_cam_features), name=cam.name.capitalize(), ) ) @@ -222,7 +245,7 @@ def get_device_properties(self) -> DeviceProperties: [(cam.board_socket, pair) for pair in cam.stereo_pairs] for cam in device_properties.cameras ) ) - print("Device properties: ", device_properties.default_stereo_pair) + print("Default stereo pair: ", device_properties.default_stereo_pair) return device_properties def close_oak(self) -> None: @@ -244,7 +267,8 @@ def reconnect_to_oak(self) -> Message: timeout_start = time.time() while time.time() - timeout_start < 10: available_devices = [ - device.getMxId() for device in dai.Device.getAllAvailableDevices() # type: ignore[call-arg] + # type: ignore[call-arg] + device.getMxId() for device in dai.Device.getAllAvailableDevices() ] if self.id in available_devices: break @@ -258,7 +282,8 @@ def reconnect_to_oak(self) -> Message: return ErrorMessage("Failed to create oak camera") def _get_component_by_socket(self, socket: dai.CameraBoardSocket) -> Optional[CameraComponent]: - component = list(filter(lambda c: c.node.getBoardSocket() == socket, self._cameras)) + component = list( + filter(lambda c: c.node.getBoardSocket() == socket, self._cameras)) if not component: return None return component[0] @@ -267,7 +292,8 @@ def _get_camera_config_by_socket( self, config: PipelineConfiguration, socket: dai.CameraBoardSocket ) -> Optional[CameraConfiguration]: print("Getting cam by socket: ", socket, " Cameras: ", config.cameras) - camera = list(filter(lambda c: c.board_socket == socket, config.cameras)) + camera = list( + filter(lambda c: c.board_socket == socket, config.cameras)) if not camera: return None return camera[0] @@ -292,13 +318,17 @@ def _create_auto_pipeline_config(self, config: PipelineConfiguration) -> Message for cam in connected_cam_features: if cam.name == "rgb": # By convention rgb_cam_socket = cam.socket - resolution = CameraSensorResolution.THE_1080_P if cam.width >= 1920 else CameraSensorResolution.THE_720_P + resolution = ( + CameraSensorResolution.THE_1080_P if cam.width >= 1920 else size_to_resolution[( + cam.width, cam.height)] + ) resolution = CameraSensorResolution.THE_1200_P if cam.height == 1200 else resolution preferred_type = cam.supportedTypes[0] if preferred_type == dai.CameraSensorType.TOF: has_tof = True config.cameras.append( - CameraConfiguration(resolution=resolution, kind=preferred_type, board_socket=cam.socket, name=cam.name) + CameraConfiguration( + resolution=resolution, kind=preferred_type, board_socket=cam.socket, name=cam.name) ) # 2. Create stereo depth if not has_tof: @@ -307,7 +337,7 @@ def _create_auto_pipeline_config(self, config: PipelineConfiguration) -> Message left_cam = calibration.getStereoLeftCameraId() right_cam = calibration.getStereoRightCameraId() if left_cam.value != 255 and right_cam.value != 255: - config.depth = DepthConfiguration( + config.depth = StereoDepthConfiguration( stereo_pair=(left_cam, right_cam), align=rgb_cam_socket if rgb_cam_socket is not None else left_cam, ) @@ -319,11 +349,18 @@ def _create_auto_pipeline_config(self, config: PipelineConfiguration) -> Message nnet_cam_sock = rgb_cam_socket if nnet_cam_sock is None: # Try to find a color camera config - nnet_cam_sock = next(filter(lambda cam: cam.kind == dai.CameraSensorType.COLOR, config.cameras), None) # type: ignore[assignment] + nnet_cam_sock = next( + filter( + # type: ignore[arg-type,union-attr] + lambda cam: cam.kind == dai.CameraSensorType.COLOR, + config.cameras, + ), + None, + ) # type: ignore[assignment] if nnet_cam_sock is not None: nnet_cam_sock = nnet_cam_sock.board_socket if nnet_cam_sock is not None: - config.ai_model = ALL_NEURAL_NETWORKS[0] + config.ai_model = ALL_NEURAL_NETWORKS[1] # Mobilenet SSd config.ai_model.camera = nnet_cam_sock else: config.ai_model = None @@ -340,7 +377,8 @@ def update_pipeline(self, runtime_only: bool) -> Message: if self._oak.device.isPipelineRunning(): if runtime_only: if config.depth is not None: - self._stereo.control.send_controls(config.depth.to_runtime_controls()) + self._stereo.control.send_controls( + config.depth.to_runtime_controls()) return InfoMessage("") return ErrorMessage("Depth is disabled, can't send runtime controls!") print("Cam running, closing...") @@ -363,42 +401,51 @@ def update_pipeline(self, runtime_only: bool) -> Message: is_usb2 = self._oak.device.getUsbSpeed() == dai.UsbSpeed.HIGH if is_poe: self.store.send_message_to_frontend( - WarningMessage("Device is connected via PoE. This may cause performance issues.") + WarningMessage( + "Device is connected via PoE. This may cause performance issues.") ) print("Connected to a PoE device, camera streams will be JPEG encoded...") elif is_usb2: self.store.send_message_to_frontend( - WarningMessage("Device is connected in USB2 mode. This may cause performance issues.") + WarningMessage( + "Device is connected in USB2 mode. This may cause performance issues.") ) - print("Device is connected in USB2 mode, camera streams will be JPEG encoded...") + print( + "Device is connected in USB2 mode, camera streams will be JPEG encoded...") self.use_encoding = is_poe or is_usb2 connected_camera_features = self._oak.device.getConnectedCameraFeatures() for cam in config.cameras: print("Creating camera: ", cam) - camera_features = next(filter(lambda feat: feat.socket == cam.board_socket, connected_camera_features)) + camera_features = next( + filter(lambda feat: feat.socket == cam.board_socket, connected_camera_features)) # When the resolution is too small, the ISP needs to scale it down res_x, res_y = get_size_from_resolution(cam.resolution) - does_sensor_support_resolution = any( - [ - config.width == res_x and config.height == res_y - for config in camera_features.configs - if config.type == camera_features.supportedTypes[0] - ] - ) + does_sensor_support_resolution = ( + any( + [ + config.width == res_x and config.height == res_y + for config in camera_features.configs + if config.type == camera_features.supportedTypes[0] + ] + ) + or len(camera_features.configs) == 0 + ) # Some sensors don't have any configs... just assume the resolution is supported # In case of ISP scaling, don't change the sensor resolution in the pipeline config # to keep it logical for the user in the UI - sensor_resolution: Optional[CameraSensorResolution] = cam.resolution # None for ToF + # None for ToF + sensor_resolution: Optional[CameraSensorResolution] = cam.resolution if not does_sensor_support_resolution: smallest_supported_resolution = [ config for config in camera_features.configs if config.type == camera_features.supportedTypes[0] ][0] sensor_resolution = size_to_resolution.get( - (smallest_supported_resolution.width, smallest_supported_resolution.height), None + (smallest_supported_resolution.width, + smallest_supported_resolution.height), None ) is_used_by_depth = config.depth is not None and ( cam.board_socket == config.depth.align or cam.board_socket in config.depth.stereo_pair @@ -410,9 +457,10 @@ def update_pipeline(self, runtime_only: bool) -> Message: if cam.stream_enabled: if dai.CameraSensorType.TOF in camera_features.supportedTypes: sdk_cam = self._oak.create_tof(cam.board_socket) - else: + elif sensor_resolution is not None: sdk_cam = self._oak.create_camera( cam.board_socket, + # type: ignore[union-attr] sensor_resolution.as_sdk_resolution(), cam.fps, encode=self.use_encoding, @@ -420,11 +468,17 @@ def update_pipeline(self, runtime_only: bool) -> Message: if not does_sensor_support_resolution: sdk_cam.config_color_camera( isp_scale=getClosestIspScale( - (smallest_supported_resolution.width, smallest_supported_resolution.height), res_x + (smallest_supported_resolution.width, + smallest_supported_resolution.height), res_x ) ) self._cameras.append(sdk_cam) - self._queues.append((sdk_cam, self._oak.queue(sdk_cam.out.main))) + else: + print("Skipped creating camera:", cam.board_socket, + "because no valid sensor resolution was found.") + continue + self._queues.append( + (sdk_cam, self._oak.queue(sdk_cam.out.main))) if config.depth: print("Creating depth") @@ -436,11 +490,14 @@ def update_pipeline(self, runtime_only: bool) -> Message: if left_cam.node.getResolutionWidth() > 1280: print("Left cam width > 1280, setting isp scale to get 800") - left_cam.config_color_camera(isp_scale=calculate_isp_scale(left_cam.node.getResolutionWidth())) + left_cam.config_color_camera(isp_scale=calculate_isp_scale( + left_cam.node.getResolutionWidth())) if right_cam.node.getResolutionWidth() > 1280: print("Right cam width > 1280, setting isp scale to get 800") - right_cam.config_color_camera(isp_scale=calculate_isp_scale(right_cam.node.getResolutionWidth())) - self._stereo = self._oak.create_stereo(left=left_cam, right=right_cam, name="depth") + right_cam.config_color_camera(isp_scale=calculate_isp_scale( + right_cam.node.getResolutionWidth())) + self._stereo = self._oak.create_stereo( + left=left_cam, right=right_cam, name="depth") align_component = self._get_component_by_socket(config.depth.align) if not align_component: @@ -454,13 +511,17 @@ def update_pipeline(self, runtime_only: bool) -> Message: median=config.depth.median, ) - aligned_camera = self._get_camera_config_by_socket(config, config.depth.align) + aligned_camera = self._get_camera_config_by_socket( + config, config.depth.align) if not aligned_camera: return ErrorMessage(f"{config.depth.align} is not configured. Couldn't create stereo pair.") - self._queues.append((self._stereo, self._oak.queue(self._stereo.out.main))) + self._queues.append( + (self._stereo, self._oak.queue(self._stereo.out.main))) - if self._oak.device.getConnectedIMU() != "NONE": - print("Creating IMU") + if self._oak.device.getConnectedIMU() != "NONE" and self._oak.device.getConnectedIMU() != "": + print("Creating IMU, connected IMU: ", + self._oak.device.getConnectedIMU()) + # TODO(someone): Handle IMU updates imu = self._oak.create_imu() sensors = [ dai.IMUSensor.ACCELEROMETER_RAW, @@ -476,22 +537,26 @@ def update_pipeline(self, runtime_only: bool) -> Message: print("Connected cam doesn't have IMU, skipping IMU creation...") if config.ai_model and config.ai_model.path: - cam_component = self._get_component_by_socket(config.ai_model.camera) + cam_component = self._get_component_by_socket( + config.ai_model.camera) if not cam_component: return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.") - labels: Optional[List[str]] = None if config.ai_model.path == "age-gender-recognition-retail-0013": - face_detection = self._oak.create_nn("face-detection-retail-0004", cam_component) - self._nnet = self._oak.create_nn("age-gender-recognition-retail-0013", input=face_detection) + face_detection = self._oak.create_nn( + "face-detection-retail-0004", cam_component) + self._nnet = self._oak.create_nn( + "age-gender-recognition-retail-0013", input=face_detection) else: - self._nnet = self._oak.create_nn(config.ai_model.path, cam_component) - labels = getattr(classification_labels, config.ai_model.path.upper().replace("-", "_"), None) + self._nnet = self._oak.create_nn( + config.ai_model.path, cam_component) - camera = self._get_camera_config_by_socket(config, config.ai_model.camera) + camera = self._get_camera_config_by_socket( + config, config.ai_model.camera) if not camera: return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.") - self._queues.append((self._nnet, self._oak.queue(self._nnet.out.main))) + self._queues.append( + (self._nnet, self._oak.queue(self._nnet.out.main))) sys_logger_xlink = self._oak.pipeline.createXLinkOut() logger = self._oak.pipeline.createSystemLogger() @@ -508,8 +573,10 @@ def update_pipeline(self, runtime_only: bool) -> Message: running = self._oak.running() if running: self._pipeline_start_t = time.time() - self._sys_info_q = self._oak.device.getOutputQueue("sys_logger", 1, False) - self.store.set_pipeline_config(config) # We might have modified the config, so store it + self._sys_info_q = self._oak.device.getOutputQueue( + "sys_logger", 1, False) + # We might have modified the config, so store it + self.store.set_pipeline_config(config) try: self._oak.poll() except RuntimeError: @@ -540,7 +607,8 @@ def update(self) -> None: sys_info = self._sys_info_q.tryGet() # type: ignore[attr-defined] if sys_info is not None and self._pipeline_start_t is not None: print("----------------------------------------") - print(f"[{int(time.time() - self._pipeline_start_t)}s] System information") + print( + f"[{int(time.time() - self._pipeline_start_t)}s] System information") print("----------------------------------------") print_system_information(sys_info) # if time.time() - self.start > 10: diff --git a/rerun_py/depthai_viewer/_backend/device_configuration.py b/rerun_py/depthai_viewer/_backend/device_configuration.py index 19f1316736fd..0343c6bedbf6 100644 --- a/rerun_py/depthai_viewer/_backend/device_configuration.py +++ b/rerun_py/depthai_viewer/_backend/device_configuration.py @@ -10,7 +10,7 @@ # enabled: bool = True -class DepthConfiguration(BaseModel): # type: ignore[misc] +class StereoDepthConfiguration(BaseModel): # type: ignore[misc] median: Optional[dai.MedianFilter] = dai.MedianFilter.KERNEL_7x7 lr_check: Optional[bool] = True lrc_threshold: int = 5 # 0..10 @@ -37,7 +37,8 @@ def __init__(self, **v) -> None: # type: ignore[no-untyped-def] ) return super().__init__(**v) # type: ignore[no-any-return] - def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def dict(self, *args, **kwargs) -> Dict[str, Any]: return { "median": self.median.name if self.median else None, "lr_check": self.lr_check, @@ -143,6 +144,7 @@ class CameraSensorResolution(Enum): THE_1440X1080: str = "THE_1440X1080" THE_1080_P: str = "THE_1080_P" THE_1200_P: str = "THE_1200_P" + THE_1280_P: str = "THE_1280_P" THE_4_K: str = "THE_4_K" THE_4000X3000: str = "THE_4000X3000" THE_12_MP: str = "THE_12_MP" @@ -175,13 +177,15 @@ class Config: def __init__(self, **v) -> None: # type: ignore[no-untyped-def] if v.get("board_socket", None): if isinstance(v["board_socket"], str): - v["board_socket"] = getattr(dai.CameraBoardSocket, v["board_socket"]) + v["board_socket"] = getattr( + dai.CameraBoardSocket, v["board_socket"]) if v.get("kind", None): if isinstance(v["kind"], str): v["kind"] = getattr(dai.CameraSensorType, v["kind"]) return super().__init__(**v) # type: ignore[no-any-return] - def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def dict(self, *args, **kwargs) -> Dict[str, Any]: return { "fps": self.fps, "resolution": self.resolution.dict(), @@ -192,7 +196,8 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de } @classmethod - def create_left(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def create_left(cls, **kwargs) -> "CameraConfiguration": if not kwargs.get("kind", None): kwargs["kind"] = dai.CameraSensorType.MONO if not kwargs.get("resolution", None): @@ -200,7 +205,8 @@ def create_left(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-unty return cls(board_socket="LEFT", **kwargs) @classmethod - def create_right(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def create_right(cls, **kwargs) -> "CameraConfiguration": if not kwargs.get("kind", None): kwargs["kind"] = dai.CameraSensorType.MONO if not kwargs.get("resolution", None): @@ -208,7 +214,8 @@ def create_right(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-unt return cls(board_socket="RIGHT", **kwargs) @classmethod - def create_color(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def create_color(cls, **kwargs) -> "CameraConfiguration": if not kwargs.get("kind", None): kwargs["kind"] = dai.CameraSensorType.COLOR if not kwargs.get("resolution", None): @@ -229,7 +236,8 @@ class Config: arbitrary_types_allowed = True use_enum_values = True - def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def dict(self, *args, **kwargs) -> Dict[str, Any]: return { "resolutions": [r for r in self.resolutions], "max_fps": self.max_fps, @@ -243,7 +251,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de class PipelineConfiguration(BaseModel): # type: ignore[misc] auto: bool = False # Should the backend automatically create a pipeline? cameras: List[CameraConfiguration] = [] - depth: Optional[DepthConfiguration] + depth: Optional[StereoDepthConfiguration] ai_model: Optional[AiModelConfiguration] imu: ImuConfiguration = ImuConfiguration() @@ -266,22 +274,26 @@ class DeviceProperties(BaseModel): # type: ignore[misc] stereo_pairs: List[ Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket] ] = [] # Which cameras can be paired for stereo - default_stereo_pair: Optional[Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket]] = None + default_stereo_pair: Optional[Tuple[dai.CameraBoardSocket, + dai.CameraBoardSocket]] = None info: DeviceInfo = DeviceInfo() class Config: arbitrary_types_allowed = True use_enum_values = True - def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def __init__(self, *args, **kwargs) -> None: if kwargs.get("stereo_pairs", None) and all(isinstance(pair[0], str) for pair in kwargs["stereo_pairs"]): kwargs["stereo_pairs"] = [ - (getattr(dai.CameraBoardSocket, pair[0]), getattr(dai.CameraBoardSocket, pair[1])) + (getattr(dai.CameraBoardSocket, pair[0]), getattr( + dai.CameraBoardSocket, pair[1])) for pair in kwargs["stereo_pairs"] ] return super().__init__(*args, **kwargs) # type: ignore[no-any-return] - def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + # type: ignore[no-untyped-def] + def dict(self, *args, **kwargs) -> Dict[str, Any]: return { "id": self.id, "cameras": [cam.dict() for cam in self.cameras], @@ -302,14 +314,15 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de (640, 400): CameraSensorResolution.THE_400_P, (640, 480): CameraSensorResolution.THE_480_P, # OV7251 (1280, 720): CameraSensorResolution.THE_720_P, - # (1280, 962): CameraSensorResolution.THE_1280P, # TOF + (1280, 962): CameraSensorResolution.THE_1280_P, # TOF (1280, 800): CameraSensorResolution.THE_800_P, # OV9782 (2592, 1944): CameraSensorResolution.THE_5_MP, # OV5645 (1440, 1080): CameraSensorResolution.THE_1440X1080, (1920, 1080): CameraSensorResolution.THE_1080_P, (1920, 1200): CameraSensorResolution.THE_1200_P, # AR0234 (3840, 2160): CameraSensorResolution.THE_4_K, - (4000, 3000): CameraSensorResolution.THE_4000X3000, # IMX582 with binning enabled + # IMX582 with binning enabled + (4000, 3000): CameraSensorResolution.THE_4000X3000, (4056, 3040): CameraSensorResolution.THE_12_MP, # IMX378, IMX477, IMX577 (4208, 3120): CameraSensorResolution.THE_13_MP, # AR214 (5312, 6000): CameraSensorResolution.THE_5312X6000, # IMX582 cropped diff --git a/rerun_py/depthai_viewer/_backend/main.py b/rerun_py/depthai_viewer/_backend/main.py index c68f22b9dc71..7b7274b55685 100644 --- a/rerun_py/depthai_viewer/_backend/main.py +++ b/rerun_py/depthai_viewer/_backend/main.py @@ -1,4 +1,5 @@ import threading +import traceback from multiprocessing import Queue from queue import Empty as QueueEmptyException from typing import Optional @@ -96,7 +97,11 @@ def on_update_pipeline(self, runtime_only: bool) -> Message: print("No device selected, can't update pipeline!") return ErrorMessage("No device selected, can't update pipeline!") print("Updating pipeline...") - message = self._device.update_pipeline(runtime_only) + try: + message = self._device.update_pipeline(runtime_only) + except RuntimeError as e: + print("Failed to update pipeline:", e) + return ErrorMessage(str(e)) if isinstance(message, InfoMessage): return PipelineMessage(self.store.pipeline_config) return message diff --git a/rerun_py/depthai_viewer/_backend/packet_handler.py b/rerun_py/depthai_viewer/_backend/packet_handler.py index eba4fbdaa14c..14164489553a 100644 --- a/rerun_py/depthai_viewer/_backend/packet_handler.py +++ b/rerun_py/depthai_viewer/_backend/packet_handler.py @@ -1,26 +1,24 @@ -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, List, Optional, Tuple, Union import cv2 import depthai as dai import numpy as np from ahrs.filters import Mahony from depthai_sdk.classes.packets import ( # PointcloudPacket, + BasePacket, DepthPacket, + Detection, DetectionPacket, + DisparityDepthPacket, FramePacket, IMUPacket, TwoStagePacket, - Detection, - BasePacket, - DisparityDepthPacket, ) -from depthai_sdk.components import Component, CameraComponent, StereoComponent, NNComponent +from depthai_sdk.components import CameraComponent, Component, NNComponent, StereoComponent from depthai_sdk.components.tof_component import ToFComponent from numpy.typing import NDArray -from pydantic import BaseModel import depthai_viewer as viewer -from depthai_viewer._backend.device_configuration import CameraConfiguration from depthai_viewer._backend.store import Store from depthai_viewer._backend.topic import Topic from depthai_viewer.components.rect2d import RectFormat @@ -29,7 +27,8 @@ class PacketHandler: store: Store _ahrs: Mahony - _get_camera_intrinsics: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] + _get_camera_intrinsics: Callable[[ + dai.CameraBoardSocket, int, int], NDArray[np.float32]] def __init__( self, store: Store, intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] @@ -45,12 +44,11 @@ def reset(self) -> None: self._ahrs = Mahony(frequency=100) self._ahrs.Q = np.array([1, 0, 0, 0], dtype=np.float64) - def set_camera_intrinsics_getter( self, camera_intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] ) -> None: - self._get_camera_intrinsics = camera_intrinsics_getter # type: ignore[assignment, misc] - + # type: ignore[assignment, misc] + self._get_camera_intrinsics = camera_intrinsics_getter def log_packet( self, @@ -61,7 +59,8 @@ def log_packet( if isinstance(component, CameraComponent): self._on_camera_frame(packet, component._socket) else: - print("Unknown component type:", type(component), "for packet:", type(packet)) + print("Unknown component type:", type( + component), "for packet:", type(packet)) # Create dai.CameraBoardSocket from descriptor elif type(packet) is DepthPacket: if isinstance(component, StereoComponent): @@ -72,7 +71,8 @@ def log_packet( elif isinstance(component, StereoComponent): self._on_stereo_frame(packet, component) else: - print("Unknown component type:", type(component), "for packet:", type(packet)) + print("Unknown component type:", type( + component), "for packet:", type(packet)) elif type(packet) is DetectionPacket: self._on_detections(packet, component) elif type(packet) is TwoStagePacket: @@ -85,10 +85,12 @@ def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSoc f"{board_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" ) # TODO(filip): Enable the user to lock the camera rotation in the UI - img_frame = packet.frame if packet.msg.getType() == dai.RawImgFrame.Type.RAW8 else packet.msg.getData() + img_frame = packet.frame if packet.msg.getType( + ) == dai.RawImgFrame.Type.RAW8 else packet.msg.getData() h, w = packet.msg.getHeight(), packet.msg.getWidth() if packet.msg.getType() == dai.ImgFrame.Type.BITSTREAM: - img_frame = cv2.cvtColor(cv2.imdecode(img_frame, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB) + img_frame = cv2.cvtColor(cv2.imdecode( + img_frame, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB) h, w = img_frame.shape[:2] child_from_parent: NDArray[np.float32] @@ -98,7 +100,8 @@ def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSoc ) except Exception: f_len = (w * h) ** 0.5 - child_from_parent = np.array([[f_len, 0, w / 2], [0, f_len, h / 2], [0, 0, 1]]) + child_from_parent = np.array( + [[f_len, 0, w / 2], [0, f_len, h / 2], [0, 0, 1]]) cam = cam_kind_from_frame_type(packet.msg.getType()) viewer.log_pinhole( f"{board_socket.name}/transform/{cam}/", @@ -125,11 +128,13 @@ def on_imu(self, packet: IMUPacket) -> None: mag: dai.IMUReportMagneticField = packet.magneticField # TODO(filip): Move coordinate mapping to sdk self._ahrs.Q = self._ahrs.updateIMU( - self._ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array([accel.z, accel.x, accel.y]) + self._ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array( + [accel.z, accel.x, accel.y]) ) if Topic.ImuData not in self.store.subscriptions: return - viewer.log_imu([accel.z, accel.x, accel.y], [gyro.z, gyro.x, gyro.y], self._ahrs.Q, [mag.x, mag.y, mag.z]) + viewer.log_imu([accel.z, accel.x, accel.y], [ + gyro.z, gyro.x, gyro.y], self._ahrs.Q, [mag.x, mag.y, mag.z]) def _on_stereo_frame(self, packet: Union[DepthPacket, DisparityDepthPacket], component: StereoComponent) -> None: depth_frame = packet.frame @@ -149,7 +154,8 @@ def _on_tof_packet( viewer.log_rigid3( f"{component.camera_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" ) - intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) + intrinsics = np.array( + [[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) viewer.log_pinhole( f"{component.camera_socket.name}/transform/tof", child_from_parent=intrinsics, @@ -161,7 +167,8 @@ def _on_tof_packet( viewer.log_depth_image(path, depth_frame, meter=1e3) def _on_detections(self, packet: DetectionPacket, component: NNComponent) -> None: - rects, colors, labels = self._detections_to_rects_colors_labels(packet, component.get_labels()) + rects, colors, labels = self._detections_to_rects_colors_labels( + packet, component.get_labels()) cam = "color_cam" if component._get_camera_comp().is_color() else "mono_cam" viewer.log_rects( f"{component._get_camera_comp()._socket.name}/transform/{cam}/Detections", @@ -178,19 +185,22 @@ def _detections_to_rects_colors_labels( colors = [] labels = [] for detection in packet.detections: - rects.append(self._rect_from_detection(detection, packet.frame.shape[0], packet.frame.shape[1])) + rects.append(self._rect_from_detection( + detection, packet.frame.shape[0], packet.frame.shape[1])) colors.append([0, 255, 0]) label: str = detection.label_str # Open model zoo models output label index if omz_labels is not None and isinstance(label, int): label += omz_labels[label] - label += ", " + str(int(detection.img_detection.confidence * 100)) + "%" + label += ", " + \ + str(int(detection.img_detection.confidence * 100)) + "%" labels.append(label) return rects, colors, labels def _on_age_gender_packet(self, packet: TwoStagePacket, component: NNComponent) -> None: for det, rec in zip(packet.detections, packet.nnData): - age = int(float(np.squeeze(np.array(rec.getLayerFp16("age_conv3")))) * 100) + age = int( + float(np.squeeze(np.array(rec.getLayerFp16("age_conv3")))) * 100) gender = np.squeeze(np.array(rec.getLayerFp16("prob"))) gender_str = "Woman" if gender[0] > gender[1] else "Man" label = f"{gender_str}, {age}" @@ -200,7 +210,8 @@ def _on_age_gender_packet(self, packet: TwoStagePacket, component: NNComponent) cam = "color_cam" if component._get_camera_comp().is_color() else "mono_cam" viewer.log_rect( f"{component._get_camera_comp()._socket.name}/transform/{cam}/Detection", - self._rect_from_detection(det, packet.frame.shape[0], packet.frame.shape[1]), + self._rect_from_detection( + det, packet.frame.shape[0], packet.frame.shape[1]), rect_format=RectFormat.XYXY, color=color, label=label, @@ -208,10 +219,10 @@ def _on_age_gender_packet(self, packet: TwoStagePacket, component: NNComponent) def _rect_from_detection(self, detection: Detection, max_height: int, max_width: int) -> List[int]: return [ - max(min(detection.bottom_right[0], max_width), 0), - max(min(detection.bottom_right[1], max_height), 0), - max(min(detection.top_left[0], max_width), 0), - max(min(detection.top_left[1], max_height), 0), + max(min(detection.bottom_right[0], max_width), 0) * max_width, + max(min(detection.bottom_right[1], max_height), 0) * max_height, + max(min(detection.top_left[0], max_width), 0) * max_width, + max(min(detection.top_left[1], max_height), 0) * max_height, ] diff --git a/rerun_py/depthai_viewer/_backend/topic.py b/rerun_py/depthai_viewer/_backend/topic.py index 3d7562ce6db2..2413337fbbff 100644 --- a/rerun_py/depthai_viewer/_backend/topic.py +++ b/rerun_py/depthai_viewer/_backend/topic.py @@ -16,9 +16,9 @@ class Topic(Enum): @classmethod def create(cls, name_or_id: Union[str, int]) -> "Topic": - if type(name_or_id) == str: + if isinstance(name_or_id, str): return Topic[name_or_id] - elif type(name_or_id) == int: + elif isinstance(name_or_id, int): return Topic(name_or_id) else: raise ValueError("Invalid topic name or id: ", name_or_id) diff --git a/rerun_py/depthai_viewer/install_requirements.py b/rerun_py/depthai_viewer/install_requirements.py index 1349b6046e31..1ace066628ca 100644 --- a/rerun_py/depthai_viewer/install_requirements.py +++ b/rerun_py/depthai_viewer/install_requirements.py @@ -94,8 +94,10 @@ def create_venv_and_install_dependencies() -> None: "-m", "pip", "install", - # "depthai-sdk==1.11.0" - "git+https://github.com/luxonis/depthai@develop#subdirectory=depthai_sdk", + "depthai-sdk==1.13.1.dev0+b0340e0c4ad869711d7d5fff48e41c46fe41f475", + "--extra-index-url", + "https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/", + # "git+https://github.com/luxonis/depthai@develop#subdirectory=depthai_sdk", ], check=True, )