diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index aedba554dbef..5133d225a277 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -86,10 +86,7 @@ jobs: run: | matrix=() - if [[ $TAGGED_OR_MAIN ]]; then - # MacOS build is really slow (>30 mins); uses up a lot of CI minutes - matrix+=('{"platform": "macos", "runs_on": "macos-latest", "pip": "pip", "python": "python3"},') - fi + matrix+=('{"platform": "macos", "runs_on": "macos-latest", "pip": "pip", "python": "python3"},') matrix+=('{"platform": "windows", "runs_on": "windows-latest", "pip": "pip", "python": "python"},') matrix+=('{"platform": "linux", "runs_on": "ubuntu-latest", container: {"image": "rerunio/ci_docker:0.5"}, "pip": "pip", "python": "python3"},') matrix+=('{"platform": "aarch64", "runs_on": ["self-hosted", "linux", "ARM64"], container: {"image": "quay.io/pypa/manylinux_2_28_aarch64"}, "pip": "python3.8 -m pip", "python": "python3.8"}') diff --git a/crates/re_log_types/src/component_types/tensor.rs b/crates/re_log_types/src/component_types/tensor.rs index bdfff6de3ca0..7ccdc8bf695f 100644 --- a/crates/re_log_types/src/component_types/tensor.rs +++ b/crates/re_log_types/src/component_types/tensor.rs @@ -1,11 +1,11 @@ -use arrow2::array::{ FixedSizeBinaryArray, MutableFixedSizeBinaryArray }; +use arrow2::array::{FixedSizeBinaryArray, MutableFixedSizeBinaryArray}; use arrow2::buffer::Buffer; use arrow2_convert::deserialize::ArrowDeserialize; use arrow2_convert::field::ArrowField; -use arrow2_convert::{ serialize::ArrowSerialize, ArrowDeserialize, ArrowField, ArrowSerialize }; +use arrow2_convert::{serialize::ArrowSerialize, ArrowDeserialize, ArrowField, ArrowSerialize}; use crate::Component; -use crate::{ TensorDataType, TensorElement }; +use crate::{TensorDataType, TensorElement}; use super::arrow_convert_shims::BinaryBuffer; @@ -59,7 +59,7 @@ impl ArrowSerialize for TensorId { #[inline] fn arrow_serialize( v: &::Type, - array: &mut Self::MutableArrayType + array: &mut Self::MutableArrayType, ) -> arrow2::error::Result<()> { array.try_push(Some(v.0.as_bytes())) } @@ -70,9 +70,10 @@ impl ArrowDeserialize for TensorId { #[inline] fn arrow_deserialize( - v: <&Self::ArrayType as IntoIterator>::Item + v: <&Self::ArrayType as IntoIterator>::Item, ) -> Option<::Type> { - v.and_then(|bytes| uuid::Uuid::from_slice(bytes).ok()).map(Self) + v.and_then(|bytes| uuid::Uuid::from_slice(bytes).ok()) + .map(Self) } } @@ -210,7 +211,7 @@ impl TensorData { pub fn is_compressed_image(&self) -> bool { match self { - | Self::U8(_) + Self::U8(_) | Self::U16(_) | Self::U32(_) | Self::U64(_) @@ -342,7 +343,9 @@ pub enum TensorDataMeaning { Depth, } -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, ArrowField, ArrowSerialize, ArrowDeserialize)] +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, ArrowField, ArrowSerialize, ArrowDeserialize, +)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[arrow_field(type = "dense")] pub enum TensorColormap { @@ -353,7 +356,7 @@ pub enum TensorColormap { Plasma, #[default] Turbo, - Viridis + Viridis, } /// A Multi-dimensional Tensor. @@ -418,6 +421,9 @@ pub struct Tensor { pub colormap: TensorColormap, pub unit: Option, + + pub depth_min: Option, + pub depth_max: Option, } impl Tensor { @@ -441,7 +447,7 @@ impl Tensor { [y, x] => { vec![ TensorDimension::height((((y.size as f64) * 2.0) / 3.0) as u64), - TensorDimension::width(x.size) + TensorDimension::width(x.size), ] } _ => panic!("Invalid shape for NV12 encoding: {:?}", shape), @@ -487,9 +493,9 @@ impl Tensor { } pub fn is_shaped_like_an_image(&self) -> bool { - self.num_dim() == 2 || - (self.num_dim() == 3 && - ({ + self.num_dim() == 2 + || (self.num_dim() == 3 + && ({ matches!( self.shape.last().unwrap().size, // gray, rgb, rgba @@ -542,32 +548,28 @@ impl Tensor { pub fn get_nv12_pixel(&self, index: &[u64; 2]) -> Option<[TensorElement; 3]> { let [row, col] = index; match self.real_shape().as_slice() { - [h, w] => { - match &self.data { - TensorData::NV12(buf) => { - let uv_offset = (w.size * h.size) as u64; - let y = ((buf[(*row * w.size + *col) as usize] as f64) - 16.0) / 216.0; - let u = - ((buf[(uv_offset + (*row / 2) * w.size + *col) as usize] as f64) - - 128.0) / - 224.0; - let v = - ((buf[((uv_offset + (*row / 2) * w.size + *col) as usize) + 1] as f64) - - 128.0) / - 224.0; - let r = y + 1.402 * v; - let g = y - 0.344 * u + 0.714 * v; - let b = y + 1.772 * u; - - Some([ - TensorElement::U8(f64::clamp(r * 255.0, 0.0, 255.0) as u8), - TensorElement::U8(f64::clamp(g * 255.0, 0.0, 255.0) as u8), - TensorElement::U8(f64::clamp(b * 255.0, 0.0, 255.0) as u8), - ]) - } - _ => None, + [h, w] => match &self.data { + TensorData::NV12(buf) => { + let uv_offset = (w.size * h.size) as u64; + let y = ((buf[(*row * w.size + *col) as usize] as f64) - 16.0) / 216.0; + let u = ((buf[(uv_offset + (*row / 2) * w.size + *col) as usize] as f64) + - 128.0) + / 224.0; + let v = ((buf[((uv_offset + (*row / 2) * w.size + *col) as usize) + 1] as f64) + - 128.0) + / 224.0; + let r = y + 1.402 * v; + let g = y - 0.344 * u + 0.714 * v; + let b = y + 1.772 * u; + + Some([ + TensorElement::U8(f64::clamp(r * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(g * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(b * 255.0, 0.0, 255.0) as u8), + ]) } - } + _ => None, + }, _ => None, } } @@ -593,7 +595,8 @@ pub enum TensorCastError { #[error("ndarray type mismatch with tensor storage")] TypeMismatch, - #[error("tensor shape did not match storage length")] BadTensorShape { + #[error("tensor shape did not match storage length")] + BadTensorShape { #[from] source: ndarray::ShapeError, }, @@ -646,6 +649,8 @@ macro_rules! tensor_type { meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }), None => Ok(Tensor { tensor_id: TensorId::random(), @@ -655,6 +660,8 @@ macro_rules! tensor_type { meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }), } } @@ -682,6 +689,8 @@ macro_rules! tensor_type { meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }) .ok_or(TensorCastError::NotContiguousStdOrder) } @@ -717,21 +726,22 @@ impl<'a> TryFrom<&'a Tensor> for ::ndarray::ArrayViewD<'a, half::f16> { #[cfg(feature = "image")] #[derive(thiserror::Error, Clone, Debug)] pub enum TensorImageLoadError { - #[error(transparent)] Image(std::sync::Arc), + #[error(transparent)] + Image(std::sync::Arc), - #[error( - "Unsupported JPEG color type: {0:?}. Only RGB Jpegs are supported" - )] UnsupportedJpegColorType(image::ColorType), + #[error("Unsupported JPEG color type: {0:?}. Only RGB Jpegs are supported")] + UnsupportedJpegColorType(image::ColorType), #[error( "Unsupported color type: {0:?}. We support 8-bit, 16-bit, and f32 images, and RGB, RGBA, Luminance, and Luminance-Alpha." - )] UnsupportedImageColorType(image::ColorType), + )] + UnsupportedImageColorType(image::ColorType), - #[error("Failed to load file: {0}")] ReadError(std::sync::Arc), + #[error("Failed to load file: {0}")] + ReadError(std::sync::Arc), - #[error( - "The encoded tensor did not match its metadata {expected:?} != {found:?}" - )] InvalidMetaData { + #[error("The encoded tensor did not match its metadata {expected:?} != {found:?}")] + InvalidMetaData { expected: Vec, found: Vec, }, @@ -757,11 +767,11 @@ impl From for TensorImageLoadError { #[cfg(feature = "image")] #[derive(thiserror::Error, Debug)] pub enum TensorImageSaveError { - #[error("Expected image-shaped tensor, got {0:?}")] ShapeNotAnImage(Vec), + #[error("Expected image-shaped tensor, got {0:?}")] + ShapeNotAnImage(Vec), - #[error( - "Cannot convert tensor with {0} channels and datatype {1} to an image" - )] UnsupportedChannelsDtype(u64, TensorDataType), + #[error("Cannot convert tensor with {0} channels and datatype {1} to an image")] + UnsupportedChannelsDtype(u64, TensorDataType), #[error("The tensor data did not match tensor dimensions")] BadData, @@ -776,6 +786,8 @@ impl Tensor { meter: Option, colormap: TensorColormap, unit: Option, + depth_min: Option, + depth_max: Option, ) -> Self { Self { tensor_id, @@ -785,6 +797,8 @@ impl Tensor { meter, colormap, unit, + depth_min, + depth_max, } } } @@ -796,7 +810,7 @@ impl Tensor { /// Requires the `image` feature. #[cfg(not(target_arch = "wasm32"))] pub fn tensor_from_jpeg_file( - image_path: impl AsRef + image_path: impl AsRef, ) -> Result { let jpeg_bytes = std::fs::read(image_path)?; Self::tensor_from_jpeg_bytes(jpeg_bytes) @@ -810,7 +824,9 @@ impl Tensor { let jpeg = image::codecs::jpeg::JpegDecoder::new(std::io::Cursor::new(&jpeg_bytes))?; if jpeg.color_type() != image::ColorType::Rgb8 { // TODO(emilk): support gray-scale jpeg as well - return Err(TensorImageLoadError::UnsupportedJpegColorType(jpeg.color_type())); + return Err(TensorImageLoadError::UnsupportedJpegColorType( + jpeg.color_type(), + )); } let (w, h) = jpeg.dimensions(); @@ -819,13 +835,15 @@ impl Tensor { shape: vec![ TensorDimension::height(h as _), TensorDimension::width(w as _), - TensorDimension::depth(3) + TensorDimension::depth(3), ], data: TensorData::JPEG(jpeg_bytes.into()), meaning: TensorDataMeaning::Unknown, meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }) } @@ -835,7 +853,7 @@ impl Tensor { /// /// This is a convenience function that calls [`DecodedTensor::from_image`]. pub fn from_image( - image: impl Into + image: impl Into, ) -> Result { Self::from_dynamic_image(image.into()) } @@ -851,21 +869,21 @@ impl Tensor { /// Predicts if [`Self::to_dynamic_image`] is likely to succeed, without doing anything expensive pub fn could_be_dynamic_image(&self) -> bool { - self.is_shaped_like_an_image() && - matches!( + self.is_shaped_like_an_image() + && matches!( self.dtype(), - TensorDataType::U8 | - TensorDataType::U16 | - TensorDataType::F16 | - TensorDataType::F32 | - TensorDataType::F64 + TensorDataType::U8 + | TensorDataType::U16 + | TensorDataType::F16 + | TensorDataType::F32 + | TensorDataType::F64 ) } /// Try to convert an image-like tensor into an [`image::DynamicImage`]. pub fn to_dynamic_image(&self) -> Result { - use ecolor::{ gamma_u8_from_linear_f32, linear_u8_from_linear_f32 }; - use image::{ DynamicImage, GrayImage, RgbImage, RgbaImage }; + use ecolor::{gamma_u8_from_linear_f32, linear_u8_from_linear_f32}; + use image::{DynamicImage, GrayImage, RgbImage, RgbaImage}; type Rgb16Image = image::ImageBuffer, Vec>; type Rgba16Image = image::ImageBuffer, Vec>; @@ -877,85 +895,87 @@ impl Tensor { let w = w as u32; let h = h as u32; - let dyn_img_result = match (channels, &self.data) { - (1, TensorData::U8(buf)) => { - GrayImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma8) - } - (1, TensorData::U16(buf)) => - Gray16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma16), - // TODO(emilk) f16 - (1, TensorData::F32(buf)) => { - let pixels = buf - .iter() - .map(|pixel| gamma_u8_from_linear_f32(*pixel)) - .collect(); - GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) - } - (1, TensorData::F64(buf)) => { - let pixels = buf - .iter() - .map(|&pixel| gamma_u8_from_linear_f32(pixel as f32)) - .collect(); - GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) - } + let dyn_img_result = + match (channels, &self.data) { + (1, TensorData::U8(buf)) => { + GrayImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma8) + } + (1, TensorData::U16(buf)) => Gray16Image::from_raw(w, h, buf.as_slice().to_vec()) + .map(DynamicImage::ImageLuma16), + // TODO(emilk) f16 + (1, TensorData::F32(buf)) => { + let pixels = buf + .iter() + .map(|pixel| gamma_u8_from_linear_f32(*pixel)) + .collect(); + GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) + } + (1, TensorData::F64(buf)) => { + let pixels = buf + .iter() + .map(|&pixel| gamma_u8_from_linear_f32(pixel as f32)) + .collect(); + GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) + } - (3, TensorData::U8(buf)) => { - RgbImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb8) - } - (3, TensorData::U16(buf)) => - Rgb16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb16), - (3, TensorData::F32(buf)) => { - let pixels = buf.iter().copied().map(gamma_u8_from_linear_f32).collect(); - RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) - } - (3, TensorData::F64(buf)) => { - let pixels = buf - .iter() - .map(|&comp| gamma_u8_from_linear_f32(comp as f32)) - .collect(); - RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) - } + (3, TensorData::U8(buf)) => { + RgbImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb8) + } + (3, TensorData::U16(buf)) => Rgb16Image::from_raw(w, h, buf.as_slice().to_vec()) + .map(DynamicImage::ImageRgb16), + (3, TensorData::F32(buf)) => { + let pixels = buf.iter().copied().map(gamma_u8_from_linear_f32).collect(); + RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) + } + (3, TensorData::F64(buf)) => { + let pixels = buf + .iter() + .map(|&comp| gamma_u8_from_linear_f32(comp as f32)) + .collect(); + RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) + } - (4, TensorData::U8(buf)) => { - RgbaImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba8) - } - (4, TensorData::U16(buf)) => - Rgba16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba16), - (4, TensorData::F32(buf)) => { - let rgba: &[[f32; 4]] = bytemuck::cast_slice(buf.as_slice()); - let pixels: Vec = rgba - .iter() - .flat_map(|&[r, g, b, a]| { - let r = gamma_u8_from_linear_f32(r); - let g = gamma_u8_from_linear_f32(g); - let b = gamma_u8_from_linear_f32(b); - let a = linear_u8_from_linear_f32(a); - [r, g, b, a] - }) - .collect(); - RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) - } - (4, TensorData::F64(buf)) => { - let rgba: &[[f64; 4]] = bytemuck::cast_slice(buf.as_slice()); - let pixels: Vec = rgba - .iter() - .flat_map(|&[r, g, b, a]| { - let r = gamma_u8_from_linear_f32(r as _); - let g = gamma_u8_from_linear_f32(g as _); - let b = gamma_u8_from_linear_f32(b as _); - let a = linear_u8_from_linear_f32(a as _); - [r, g, b, a] - }) - .collect(); - RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) - } + (4, TensorData::U8(buf)) => { + RgbaImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba8) + } + (4, TensorData::U16(buf)) => Rgba16Image::from_raw(w, h, buf.as_slice().to_vec()) + .map(DynamicImage::ImageRgba16), + (4, TensorData::F32(buf)) => { + let rgba: &[[f32; 4]] = bytemuck::cast_slice(buf.as_slice()); + let pixels: Vec = rgba + .iter() + .flat_map(|&[r, g, b, a]| { + let r = gamma_u8_from_linear_f32(r); + let g = gamma_u8_from_linear_f32(g); + let b = gamma_u8_from_linear_f32(b); + let a = linear_u8_from_linear_f32(a); + [r, g, b, a] + }) + .collect(); + RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) + } + (4, TensorData::F64(buf)) => { + let rgba: &[[f64; 4]] = bytemuck::cast_slice(buf.as_slice()); + let pixels: Vec = rgba + .iter() + .flat_map(|&[r, g, b, a]| { + let r = gamma_u8_from_linear_f32(r as _); + let g = gamma_u8_from_linear_f32(g as _); + let b = gamma_u8_from_linear_f32(b as _); + let a = linear_u8_from_linear_f32(a as _); + [r, g, b, a] + }) + .collect(); + RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) + } - (_, _) => { - return Err( - TensorImageSaveError::UnsupportedChannelsDtype(channels, self.data.dtype()) - ); - } - }; + (_, _) => { + return Err(TensorImageSaveError::UnsupportedChannelsDtype( + channels, + self.data.dtype(), + )); + } + }; dyn_img_result.ok_or(TensorImageSaveError::BadData) } @@ -986,7 +1006,7 @@ impl TryFrom for DecodedTensor { fn try_from(tensor: Tensor) -> Result { match &tensor.data { - | TensorData::U8(_) + TensorData::U8(_) | TensorData::U16(_) | TensorData::U32(_) | TensorData::U64(_) @@ -1009,7 +1029,7 @@ impl DecodedTensor { /// /// Requires the `image` feature. pub fn from_image( - image: impl Into + image: impl Into, ) -> Result { Self::from_dynamic_image(image.into()) } @@ -1018,7 +1038,7 @@ impl DecodedTensor { /// /// Requires the `image` feature. pub fn from_dynamic_image( - image: image::DynamicImage + image: image::DynamicImage, ) -> Result { let (w, h) = (image.width(), image.height()); @@ -1053,7 +1073,9 @@ impl DecodedTensor { } _ => { // It is very annoying that DynamicImage is #[non_exhaustive] - return Err(TensorImageLoadError::UnsupportedImageColorType(image.color())); + return Err(TensorImageLoadError::UnsupportedImageColorType( + image.color(), + )); } }; let tensor = Tensor { @@ -1061,13 +1083,15 @@ impl DecodedTensor { shape: vec![ TensorDimension::height(h as _), TensorDimension::width(w as _), - TensorDimension::depth(depth) + TensorDimension::depth(depth), ], data, meaning: TensorDataMeaning::Unknown, meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }; Ok(DecodedTensor(tensor)) } @@ -1076,7 +1100,7 @@ impl DecodedTensor { crate::profile_function!(); // NV12 get's decoded in the shader, so we don't need to do anything here. match &maybe_encoded_tensor.data { - | TensorData::U8(_) + TensorData::U8(_) | TensorData::U16(_) | TensorData::U32(_) | TensorData::U64(_) @@ -1148,7 +1172,7 @@ fn test_ndarray() { TensorDimension { size: 2, name: None, - } + }, ], data: TensorData::U16(vec![1, 2, 3, 4].into()), meaning: TensorDataMeaning::Unknown, @@ -1166,7 +1190,7 @@ fn test_ndarray() { #[test] fn test_arrow() { - use arrow2_convert::{ deserialize::TryIntoCollection, serialize::TryIntoArrow }; + use arrow2_convert::{deserialize::TryIntoCollection, serialize::TryIntoArrow}; let tensors_in = vec![ Tensor { @@ -1192,7 +1216,7 @@ fn test_arrow() { meter: None, colormap: TensorColormap::None, unit: None, - } + }, ]; let array: Box = tensors_in.iter().try_into_arrow().unwrap(); diff --git a/crates/re_renderer/shader/depth_cloud.wgsl b/crates/re_renderer/shader/depth_cloud.wgsl index 34d7dd7cbc7a..da3bfd2476d3 100644 --- a/crates/re_renderer/shader/depth_cloud.wgsl +++ b/crates/re_renderer/shader/depth_cloud.wgsl @@ -182,7 +182,7 @@ fn compute_point_data(quad_idx: u32) -> PointData { } } } else { - color = Vec4(colormap_srgb(depth_cloud_info.colormap, world_space_depth), 1.0); + color = Vec4(colormap_srgb(depth_cloud_info.colormap, world_space_depth / depth_cloud_info.max_depth_in_world), 1.0); } // TODO(cmc): This assumes a pinhole camera; need to support other kinds at some point. let intrinsics = depth_cloud_info.depth_camera_intrinsics; diff --git a/crates/re_viewer/src/depthai/depthai.rs b/crates/re_viewer/src/depthai/depthai.rs index 3cefbb35d294..ed531a727535 100644 --- a/crates/re_viewer/src/depthai/depthai.rs +++ b/crates/re_viewer/src/depthai/depthai.rs @@ -735,7 +735,7 @@ fn default_neural_networks() -> Vec> { camera: CameraBoardSocket::CAM_A, }), Some(AiModel { - path: String::from("yolov6nr3_coco_640x352d"), + path: String::from("yolov6nr3_coco_640x352"), display_name: String::from("Yolo V6"), camera: CameraBoardSocket::CAM_A, }), diff --git a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs index 0bc97d33f450..bbd8af16fa7d 100644 --- a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs +++ b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs @@ -4,18 +4,18 @@ use anyhow::Context; use std::borrow::Cow; use std::mem; -use bytemuck::{ allocation::pod_collect_to_vec, cast_slice, Pod }; +use bytemuck::{allocation::pod_collect_to_vec, cast_slice, Pod}; use egui::util::hash; use wgpu::TextureFormat; -use re_log_types::component_types::{ DecodedTensor, Tensor, TensorData, TensorColormap }; +use re_log_types::component_types::{DecodedTensor, Tensor, TensorColormap, TensorData}; use re_renderer::{ - renderer::{ ColorMapper, ColormappedTexture, TextureEncoding }, + renderer::{ColorMapper, ColormappedTexture, TextureEncoding}, resource_managers::Texture2DCreationDesc, RenderContext, }; -use crate::{ gpu_bridge::get_or_create_texture, misc::caches::TensorStats }; +use crate::{gpu_bridge::get_or_create_texture, misc::caches::TensorStats}; use super::try_get_or_create_texture; @@ -32,16 +32,14 @@ pub fn tensor_to_gpu( debug_name: &str, tensor: &DecodedTensor, tensor_stats: &TensorStats, - annotations: &crate::ui::Annotations + annotations: &crate::ui::Annotations, ) -> anyhow::Result { - crate::profile_function!( - format!( - "meaning: {:?}, dtype: {}, shape: {:?}", - tensor.meaning, - tensor.dtype(), - tensor.shape() - ) - ); + crate::profile_function!(format!( + "meaning: {:?}, dtype: {}, shape: {:?}", + tensor.meaning, + tensor.dtype(), + tensor.shape() + )); use re_log_types::component_types::TensorDataMeaning; @@ -61,25 +59,24 @@ pub fn tensor_to_gpu( /// Pad and cast a slice of RGB values to RGBA with only one copy. fn pad_and_cast_rgb(data: &[u8], alpha: u8) -> Cow<'static, [u8]> { crate::profile_function!(); - ( - if cfg!(debug_assertions) { - // fastest version in debug builds. - // 5x faster in debug builds, but 2x slower in release - let mut padded = vec![alpha; data.len() / 3 * 4]; - for i in 0..data.len() / 3 { - padded[4 * i] = data[3 * i]; - padded[4 * i + 1] = data[3 * i + 1]; - padded[4 * i + 2] = data[3 * i + 2]; - } - padded - } else { - // fastest version in optimized builds - data.chunks_exact(3) - .flat_map(|chunk| [chunk[0], chunk[1], chunk[2], alpha]) - .collect::>() - .into() + (if cfg!(debug_assertions) { + // fastest version in debug builds. + // 5x faster in debug builds, but 2x slower in release + let mut padded = vec![alpha; data.len() / 3 * 4]; + for i in 0..data.len() / 3 { + padded[4 * i] = data[3 * i]; + padded[4 * i + 1] = data[3 * i + 1]; + padded[4 * i + 2] = data[3 * i + 2]; } - ).into() + padded + } else { + // fastest version in optimized builds + data.chunks_exact(3) + .flat_map(|chunk| [chunk[0], chunk[1], chunk[2], alpha]) + .collect::>() + .into() + }) + .into() } // ---------------------------------------------------------------------------- @@ -89,7 +86,7 @@ fn color_tensor_to_gpu( render_ctx: &mut RenderContext, debug_name: &str, tensor: &DecodedTensor, - tensor_stats: &TensorStats + tensor_stats: &TensorStats, ) -> anyhow::Result { let [height, width, depth] = height_width_depth(tensor)?; let texture_handle = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { @@ -105,14 +102,15 @@ fn color_tensor_to_gpu( (1, TensorData::I8(buf)) => (cast_slice_to_cow(buf), TextureFormat::R8Snorm), // Special handling for sRGB(A) textures: - (3, TensorData::U8(buf)) => - (pad_and_cast_rgb(buf.as_slice(), 255), TextureFormat::Rgba8UnormSrgb), - (4, TensorData::U8(buf)) => - ( - // TODO(emilk): premultiply alpha - cast_slice_to_cow(buf.as_slice()), - TextureFormat::Rgba8UnormSrgb, - ), + (3, TensorData::U8(buf)) => ( + pad_and_cast_rgb(buf.as_slice(), 255), + TextureFormat::Rgba8UnormSrgb, + ), + (4, TensorData::U8(buf)) => ( + // TODO(emilk): premultiply alpha + cast_slice_to_cow(buf.as_slice()), + TextureFormat::Rgba8UnormSrgb, + ), _ => { // Fallback to general case: @@ -128,13 +126,17 @@ fn color_tensor_to_gpu( width, height, }) - }).map_err(|err| anyhow::anyhow!("Failed to create texture for color tensor: {err}"))?; + }) + .map_err(|err| anyhow::anyhow!("Failed to create texture for color tensor: {err}"))?; let texture_format = texture_handle.format(); let encoding: Option = (&tensor.data).into(); // Special casing for normalized textures used above: - let range = if matches!(texture_format, TextureFormat::R8Unorm | TextureFormat::Rgba8UnormSrgb) { + let range = if matches!( + texture_format, + TextureFormat::R8Unorm | TextureFormat::Rgba8UnormSrgb + ) { [0.0, 1.0] } else if texture_format == TextureFormat::R8Snorm { [-1.0, 1.0] @@ -144,12 +146,13 @@ fn color_tensor_to_gpu( crate::gpu_bridge::range(tensor_stats)? }; - let color_mapper = if - encoding != Some(TextureEncoding::Nv12) && - re_renderer::texture_info::num_texture_components(texture_format) == 1 + let color_mapper = if encoding != Some(TextureEncoding::Nv12) + && re_renderer::texture_info::num_texture_components(texture_format) == 1 { match tensor.inner().colormap { - TensorColormap::Grayscale => Some(ColorMapper::Function(re_renderer::Colormap::Grayscale)), + TensorColormap::Grayscale => { + Some(ColorMapper::Function(re_renderer::Colormap::Grayscale)) + } TensorColormap::Viridis => Some(ColorMapper::Function(re_renderer::Colormap::Viridis)), TensorColormap::Plasma => Some(ColorMapper::Function(re_renderer::Colormap::Plasma)), TensorColormap::Inferno => Some(ColorMapper::Function(re_renderer::Colormap::Inferno)), @@ -179,13 +182,22 @@ fn class_id_tensor_to_gpu( debug_name: &str, tensor: &DecodedTensor, tensor_stats: &TensorStats, - annotations: &crate::ui::Annotations + annotations: &crate::ui::Annotations, ) -> anyhow::Result { let [_height, _width, depth] = height_width_depth(tensor)?; - anyhow::ensure!(depth == 1, "Cannot apply annotations to tensor of shape {:?}", tensor.shape); - anyhow::ensure!(tensor.dtype().is_integer(), "Only integer tensors can be annotated"); + anyhow::ensure!( + depth == 1, + "Cannot apply annotations to tensor of shape {:?}", + tensor.shape + ); + anyhow::ensure!( + tensor.dtype().is_integer(), + "Only integer tensors can be annotated" + ); - let (min, max) = tensor_stats.range.ok_or_else(|| anyhow::anyhow!("compressed_tensor!?"))?; + let (min, max) = tensor_stats + .range + .ok_or_else(|| anyhow::anyhow!("compressed_tensor!?"))?; anyhow::ensure!(0.0 <= min, "Negative class id"); anyhow::ensure!(max <= 65535.0, "Too many class ids"); // we only support u8 and u16 tensors @@ -196,29 +208,32 @@ fn class_id_tensor_to_gpu( let colormap_width = 256; let colormap_height = (num_colors + colormap_width - 1) / colormap_width; - let colormap_texture_handle = get_or_create_texture(render_ctx, hash(annotations.row_id), || { - let data: Vec = (0..colormap_width * colormap_height) - .flat_map(|id| { - let color = annotations - .class_description(Some(re_log_types::component_types::ClassId(id as u16))) - .annotation_info() - .color(None, crate::ui::DefaultColor::TransparentBlack); - color.to_array() // premultiplied! - }) - .collect(); - - Texture2DCreationDesc { - label: "class_id_colormap".into(), - data: data.into(), - format: TextureFormat::Rgba8UnormSrgb, - width: colormap_width as u32, - height: colormap_height as u32, - } - }).context("Failed to create class_id_colormap.")?; + let colormap_texture_handle = + get_or_create_texture(render_ctx, hash(annotations.row_id), || { + let data: Vec = (0..colormap_width * colormap_height) + .flat_map(|id| { + let color = annotations + .class_description(Some(re_log_types::component_types::ClassId(id as u16))) + .annotation_info() + .color(None, crate::ui::DefaultColor::TransparentBlack); + color.to_array() // premultiplied! + }) + .collect(); + + Texture2DCreationDesc { + label: "class_id_colormap".into(), + data: data.into(), + format: TextureFormat::Rgba8UnormSrgb, + width: colormap_width as u32, + height: colormap_height as u32, + } + }) + .context("Failed to create class_id_colormap.")?; let main_texture_handle = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { general_texture_creation_desc_from_tensor(debug_name, tensor) - }).map_err(|err| anyhow::anyhow!("Failed to create texture for class id tensor: {err}"))?; + }) + .map_err(|err| anyhow::anyhow!("Failed to create texture for class id tensor: {err}"))?; Ok(ColormappedTexture { texture: main_texture_handle, @@ -237,15 +252,20 @@ fn depth_tensor_to_gpu( render_ctx: &mut RenderContext, debug_name: &str, tensor: &DecodedTensor, - tensor_stats: &TensorStats + tensor_stats: &TensorStats, ) -> anyhow::Result { let [_height, _width, depth] = height_width_depth(tensor)?; - anyhow::ensure!(depth == 1, "Depth tensor of weird shape: {:?}", tensor.shape); - let (min, max) = depth_tensor_range(tensor, tensor_stats)?; + anyhow::ensure!( + depth == 1, + "Depth tensor of weird shape: {:?}", + tensor.shape + ); + let (mut min, mut max) = depth_tensor_range(tensor, tensor_stats)?; let texture = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { general_texture_creation_desc_from_tensor(debug_name, tensor) - }).map_err(|err| anyhow::anyhow!("Failed to create depth tensor texture: {err}"))?; + }) + .map_err(|err| anyhow::anyhow!("Failed to create depth tensor texture: {err}"))?; Ok(ColormappedTexture { texture, @@ -258,21 +278,34 @@ fn depth_tensor_to_gpu( fn depth_tensor_range( tensor: &DecodedTensor, - tensor_stats: &TensorStats + tensor_stats: &TensorStats, ) -> anyhow::Result<(f64, f64)> { - let range = tensor_stats.range.ok_or( - anyhow::anyhow!("Tensor has no range!? Was this compressed?") - )?; + let range = tensor_stats.range.ok_or(anyhow::anyhow!( + "Tensor has no range!? Was this compressed?" + ))?; let (mut min, mut max) = range; - anyhow::ensure!(min.is_finite() && max.is_finite(), "Tensor has non-finite values"); + anyhow::ensure!( + min.is_finite() && max.is_finite(), + "Tensor has non-finite values" + ); min = min.min(0.0); // Depth usually start at zero. if min == max { // Uniform image. We can't remap it to a 0-1 range, so do whatever: min = 0.0; - max = if tensor.dtype().is_float() { 1.0 } else { tensor.dtype().max_value() }; + max = if tensor.dtype().is_float() { + 1.0 + } else { + tensor.dtype().max_value() + }; + } + if let Some(depth_min) = tensor.inner().depth_min { + min = depth_min; + } + if let Some(depth_max) = tensor.inner().depth_max { + max = depth_max; } Ok((min, max)) @@ -284,7 +317,7 @@ fn depth_tensor_range( /// Uses no `Unorm/Snorm` formats. fn general_texture_creation_desc_from_tensor<'a>( debug_name: &str, - tensor: &'a DecodedTensor + tensor: &'a DecodedTensor, ) -> anyhow::Result> { let [height, width, depth] = height_width_depth(tensor)?; @@ -305,7 +338,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::R32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::R32Float), // narrowing to f32! - TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::JPEG(_) => { + unreachable!("DecodedTensor cannot contain a JPEG") + } TensorData::NV12(buf) => { (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) } @@ -328,7 +363,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rg32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rg32Float), // narrowing to f32! - TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::JPEG(_) => { + unreachable!("DecodedTensor cannot contain a JPEG") + } TensorData::NV12(_) => { panic!("NV12 cannot be a two channel tensor!"); } @@ -340,34 +377,35 @@ fn general_texture_creation_desc_from_tensor<'a>( // To be safe, we pad with the MAX value of integers, and with 1.0 for floats. // TODO(emilk): tell the shader to ignore the alpha channel instead! match &tensor.data { - TensorData::U8(buf) => - (pad_and_cast(buf.as_slice(), u8::MAX), TextureFormat::Rgba8Uint), + TensorData::U8(buf) => ( + pad_and_cast(buf.as_slice(), u8::MAX), + TextureFormat::Rgba8Uint, + ), TensorData::U16(buf) => (pad_and_cast(buf, u16::MAX), TextureFormat::Rgba16Uint), TensorData::U32(buf) => (pad_and_cast(buf, u32::MAX), TextureFormat::Rgba32Uint), - TensorData::U64(buf) => - ( - pad_and_narrow_and_cast(buf, 1.0, |x: u64| x as f32), - TextureFormat::Rgba32Float, - ), + TensorData::U64(buf) => ( + pad_and_narrow_and_cast(buf, 1.0, |x: u64| x as f32), + TextureFormat::Rgba32Float, + ), TensorData::I8(buf) => (pad_and_cast(buf, i8::MAX), TextureFormat::Rgba8Sint), TensorData::I16(buf) => (pad_and_cast(buf, i16::MAX), TextureFormat::Rgba16Sint), TensorData::I32(buf) => (pad_and_cast(buf, i32::MAX), TextureFormat::Rgba32Sint), - TensorData::I64(buf) => - ( - pad_and_narrow_and_cast(buf, 1.0, |x: i64| x as f32), - TextureFormat::Rgba32Float, - ), + TensorData::I64(buf) => ( + pad_and_narrow_and_cast(buf, 1.0, |x: i64| x as f32), + TextureFormat::Rgba32Float, + ), // TensorData::F16(buf) => (pad_and_cast(buf, 1.0), TextureFormat::Rgba16Float), TODO(#854) TensorData::F32(buf) => (pad_and_cast(buf, 1.0), TextureFormat::Rgba32Float), - TensorData::F64(buf) => - ( - pad_and_narrow_and_cast(buf, 1.0, |x: f64| x as f32), - TextureFormat::Rgba32Float, - ), + TensorData::F64(buf) => ( + pad_and_narrow_and_cast(buf, 1.0, |x: f64| x as f32), + TextureFormat::Rgba32Float, + ), - TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::JPEG(_) => { + unreachable!("DecodedTensor cannot contain a JPEG") + } TensorData::NV12(_) => { panic!("NV12 cannot be a three channel tensor!"); } @@ -392,7 +430,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rgba32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rgba32Float), // narrowing to f32! - TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::JPEG(_) => { + unreachable!("DecodedTensor cannot contain a JPEG") + } TensorData::NV12(_) => { panic!("NV12 cannot be a four channel tensor!"); } @@ -476,7 +516,7 @@ fn pad_and_cast(data: &[T], pad: T) -> Cow<'static, [u8]> { fn pad_and_narrow_and_cast( data: &[T], pad: f32, - narrow: impl Fn(T) -> f32 + narrow: impl Fn(T) -> f32, ) -> Cow<'static, [u8]> { crate::profile_function!(); diff --git a/crates/re_viewer/src/ui/data_ui/image.rs b/crates/re_viewer/src/ui/data_ui/image.rs index 89b1610f2a03..78b83197c5a0 100644 --- a/crates/re_viewer/src/ui/data_ui/image.rs +++ b/crates/re_viewer/src/ui/data_ui/image.rs @@ -220,7 +220,7 @@ pub fn tensor_summary_ui_grid_contents( tensor: &Tensor, tensor_stats: &TensorStats ) { - let Tensor { tensor_id: _, shape, data, meaning, meter, colormap: _, unit: _ } = tensor; + let Tensor { tensor_id: _, shape, data, meaning, meter, colormap: _, unit: _ , depth_min: _, depth_max: _} = tensor; re_ui .grid_left_hand_label(ui, "Data type") diff --git a/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs b/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs index 2e00c22fbb24..5836b54d073c 100644 --- a/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs +++ b/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs @@ -2,24 +2,25 @@ use egui::NumExt; use glam::Vec3; use itertools::Itertools; -use re_data_store::{ query_latest_single, EntityPath, EntityProperties, EditableAutoValue }; +use re_data_store::{query_latest_single, EditableAutoValue, EntityPath, EntityProperties}; use re_log_types::{ - component_types::{ ColorRGBA, InstanceKey, Tensor, TensorData, TensorDataMeaning }, - Component, - DecodedTensor, - Transform, + component_types::{ColorRGBA, InstanceKey, Tensor, TensorData, TensorDataMeaning}, + Component, DecodedTensor, Transform, }; -use re_query::{ query_primary_with_history, EntityView, QueryError }; +use re_query::{query_primary_with_history, EntityView, QueryError}; use re_renderer::{ - renderer::{ DepthCloud, RectangleOptions, ColormappedTexture }, + renderer::{ColormappedTexture, DepthCloud, RectangleOptions}, resource_managers::Texture2DCreationDesc, - Colormap, - OutlineMaskPreference, + Colormap, OutlineMaskPreference, }; use crate::{ - misc::{ SpaceViewHighlights, SpaceViewOutlineMasks, TransformCache, ViewerContext }, - ui::{ scene::SceneQuery, view_spatial::{ Image, SceneSpatial }, Annotations, DefaultColor }, + misc::{SpaceViewHighlights, SpaceViewOutlineMasks, TransformCache, ViewerContext}, + ui::{ + scene::SceneQuery, + view_spatial::{Image, SceneSpatial}, + Annotations, DefaultColor, + }, }; use super::ScenePart; @@ -31,7 +32,7 @@ fn to_textured_rect( ent_path: &EntityPath, tensor: &DecodedTensor, multiplicative_tint: egui::Rgba, - outline_mask: OutlineMaskPreference + outline_mask: OutlineMaskPreference, ) -> Option { crate::profile_function!(); @@ -42,15 +43,13 @@ fn to_textured_rect( let debug_name = ent_path.to_string(); let tensor_stats = ctx.cache.tensor_stats(tensor); - match - crate::gpu_bridge::tensor_to_gpu( - ctx.render_ctx, - &debug_name, - tensor, - tensor_stats, - annotations - ) - { + match crate::gpu_bridge::tensor_to_gpu( + ctx.render_ctx, + &debug_name, + tensor, + tensor_stats, + annotations, + ) { Ok(colormapped_texture) => { // TODO(emilk): let users pick texture filtering. // Always use nearest for magnification: let users see crisp individual pixels when they zoom @@ -95,44 +94,41 @@ fn handle_image_layering(scene: &mut SceneSpatial) { // Handle layered rectangles that are on (roughly) the same plane and were logged in sequence. // First, group by similar plane. // TODO(andreas): Need planes later for picking as well! - let images_grouped_by_plane = ( - { - let mut cur_plane = macaw::Plane3::from_normal_dist(Vec3::NAN, std::f32::NAN); - let mut rectangle_group = Vec::new(); - scene.primitives.images - .drain(..) // We rebuild the list as we might reorder as well! - .batching(move |it| { - for image in it { - let rect = &image.textured_rect; - - let prev_plane = cur_plane; - cur_plane = macaw::Plane3::from_normal_point( - rect.extent_u.cross(rect.extent_v).normalize(), - rect.top_left_corner_position - ); - - // Are the image planes too unsimilar? Then this is a new group. - if - !rectangle_group.is_empty() && - prev_plane.normal.dot(cur_plane.normal) < 0.99 && - prev_plane.d - cur_plane.d < 0.01 - { - let previous_group = std::mem::replace( - &mut rectangle_group, - vec![image] - ); - return Some(previous_group); - } - rectangle_group.push(image); - } - if !rectangle_group.is_empty() { - Some(rectangle_group.drain(..).collect()) - } else { - None + let images_grouped_by_plane = ({ + let mut cur_plane = macaw::Plane3::from_normal_dist(Vec3::NAN, std::f32::NAN); + let mut rectangle_group = Vec::new(); + scene + .primitives + .images + .drain(..) // We rebuild the list as we might reorder as well! + .batching(move |it| { + for image in it { + let rect = &image.textured_rect; + + let prev_plane = cur_plane; + cur_plane = macaw::Plane3::from_normal_point( + rect.extent_u.cross(rect.extent_v).normalize(), + rect.top_left_corner_position, + ); + + // Are the image planes too unsimilar? Then this is a new group. + if !rectangle_group.is_empty() + && prev_plane.normal.dot(cur_plane.normal) < 0.99 + && prev_plane.d - cur_plane.d < 0.01 + { + let previous_group = std::mem::replace(&mut rectangle_group, vec![image]); + return Some(previous_group); } - }) - } - ).collect_vec(); + rectangle_group.push(image); + } + if !rectangle_group.is_empty() { + Some(rectangle_group.drain(..).collect()) + } else { + None + } + }) + }) + .collect_vec(); // Then, for each planar group do resorting and change transparency. for mut grouped_images in images_grouped_by_plane { @@ -143,13 +139,20 @@ fn handle_image_layering(scene: &mut SceneSpatial) { for (idx, image) in grouped_images.iter_mut().enumerate() { // Set depth offset for correct order and avoid z fighting when there is a 3d camera. // Keep behind depth offset 0 for correct picking order. - image.textured_rect.options.depth_offset = ((idx as isize) - - (total_num_images as isize)) as re_renderer::DepthOffset; + image.textured_rect.options.depth_offset = + ((idx as isize) - (total_num_images as isize)) as re_renderer::DepthOffset; // make top images transparent - let opacity = if idx == 0 { 1.0 } else { 1.0 / (total_num_images.at_most(20) as f32) }; // avoid precision problems in framebuffer - image.textured_rect.options.multiplicative_tint = - image.textured_rect.options.multiplicative_tint.multiply(opacity); + let opacity = if idx == 0 { + 1.0 + } else { + 1.0 / (total_num_images.at_most(20) as f32) + }; // avoid precision problems in framebuffer + image.textured_rect.options.multiplicative_tint = image + .textured_rect + .options + .multiplicative_tint + .multiply(opacity); } scene.primitives.images.extend(grouped_images); @@ -168,7 +171,7 @@ impl ImagesPart { properties: &mut EntityProperties, ent_path: &EntityPath, world_from_obj: glam::Mat4, - highlights: &SpaceViewHighlights + highlights: &SpaceViewHighlights, ) -> Result<(), QueryError> { crate::profile_function!(); @@ -201,28 +204,23 @@ impl ImagesPart { if *properties.backproject_depth.get() && tensor.meaning == TensorDataMeaning::Depth { let query = ctx.current_query(); - let pinhole_ent_path = crate::misc::queries::closest_pinhole_transform( - ctx, - ent_path, - &query - ); + let pinhole_ent_path = + crate::misc::queries::closest_pinhole_transform(ctx, ent_path, &query); if let Some(pinhole_ent_path) = pinhole_ent_path { // NOTE: we don't pass in `world_from_obj` because this corresponds to the // transform of the projection plane, which is of no use to us here. // What we want are the extrinsics of the depth camera! - match - Self::process_entity_view_as_depth_cloud( - scene, - ctx, - transforms, - properties, - &tensor, - ent_path, - &pinhole_ent_path, - entity_highlight - ) - { + match Self::process_entity_view_as_depth_cloud( + scene, + ctx, + transforms, + properties, + &tensor, + ent_path, + &pinhole_ent_path, + entity_highlight, + ) { Ok(()) => { return Ok(()); } @@ -233,22 +231,20 @@ impl ImagesPart { } } - let color = annotations - .class_description(None) - .annotation_info() - .color(color.map(|c| c.to_array()).as_ref(), DefaultColor::OpaqueWhite); - - if - let Some(textured_rect) = to_textured_rect( - ctx, - &annotations, - world_from_obj, - ent_path, - &tensor, - color.into(), - entity_highlight.overall - ) - { + let color = annotations.class_description(None).annotation_info().color( + color.map(|c| c.to_array()).as_ref(), + DefaultColor::OpaqueWhite, + ); + + if let Some(textured_rect) = to_textured_rect( + ctx, + &annotations, + world_from_obj, + ent_path, + &tensor, + color.into(), + entity_highlight.overall, + ) { scene.primitives.images.push(Image { ent_path: ent_path.clone(), tensor, @@ -269,16 +265,18 @@ impl ImagesPart { tensor: &DecodedTensor, ent_path: &EntityPath, pinhole_ent_path: &EntityPath, - entity_highlight: &SpaceViewOutlineMasks + entity_highlight: &SpaceViewOutlineMasks, ) -> Result<(), String> { crate::profile_function!(); let Some(re_log_types::Transform::Pinhole(intrinsics)) = query_latest_single::( &ctx.log_db.entity_db, pinhole_ent_path, - &ctx.current_query() + &ctx.current_query(), ) else { - return Err(format!("Couldn't fetch pinhole intrinsics at {pinhole_ent_path:?}")); + return Err(format!( + "Couldn't fetch pinhole intrinsics at {pinhole_ent_path:?}" + )); }; // TODO(cmc): getting to those extrinsics is no easy task :| @@ -286,7 +284,9 @@ impl ImagesPart { .parent() .and_then(|ent_path| transforms.reference_from_entity(&ent_path)); let Some(world_from_obj) = world_from_obj else { - return Err(format!("Couldn't fetch pinhole extrinsics at {pinhole_ent_path:?}")); + return Err(format!( + "Couldn't fetch pinhole extrinsics at {pinhole_ent_path:?}" + )); }; let Some([height, width, _]) = tensor.image_height_width_channels() else { @@ -297,47 +297,47 @@ impl ImagesPart { let tensor_stats = ctx.cache.tensor_stats(tensor).clone(); let debug_name = ent_path.to_string(); - let depth_texture = crate::gpu_bridge - ::tensor_to_gpu(ctx.render_ctx, &debug_name, tensor, &tensor_stats, &annotations) - .map_err(|_| format!("Couldn't create depth texture"))?; + let depth_texture = crate::gpu_bridge::tensor_to_gpu( + ctx.render_ctx, + &debug_name, + tensor, + &tensor_stats, + &annotations, + ) + .map_err(|_| format!("Couldn't create depth texture"))?; let depth_from_world_scale = *properties.depth_from_world_scale.get(); let world_depth_from_texture_depth = 1.0 / depth_from_world_scale; let mut colormap = match *properties.color_mapper.get() { - re_data_store::ColorMapper::Colormap(colormap) => - match colormap { - re_data_store::Colormap::Grayscale => Colormap::Grayscale, - re_data_store::Colormap::Turbo => Colormap::Turbo, - re_data_store::Colormap::Viridis => Colormap::Viridis, - re_data_store::Colormap::Plasma => Colormap::Plasma, - re_data_store::Colormap::Magma => Colormap::Magma, - re_data_store::Colormap::Inferno => Colormap::Inferno, - } + re_data_store::ColorMapper::Colormap(colormap) => match colormap { + re_data_store::Colormap::Grayscale => Colormap::Grayscale, + re_data_store::Colormap::Turbo => Colormap::Turbo, + re_data_store::Colormap::Viridis => Colormap::Viridis, + re_data_store::Colormap::Plasma => Colormap::Plasma, + re_data_store::Colormap::Magma => Colormap::Magma, + re_data_store::Colormap::Inferno => Colormap::Inferno, + }, re_data_store::ColorMapper::AlbedoTexture => Colormap::AlbedoTexture, }; let mut albedo_texture: Option = None; if colormap == Colormap::AlbedoTexture { - let tensor = properties.albedo_texture - .as_ref() - .and_then(|path| { - query_latest_single::(&ctx.log_db.entity_db, path, &ctx.current_query()) - }); + let tensor = properties.albedo_texture.as_ref().and_then(|path| { + query_latest_single::(&ctx.log_db.entity_db, path, &ctx.current_query()) + }); if let Some(tensor) = tensor { - albedo_texture = match - crate::gpu_bridge::tensor_to_gpu( - ctx.render_ctx, - &debug_name, - &tensor - .try_into() - .map_err(|_| format!("Couldn't convert albedo texture to RGB"))?, - &tensor_stats, - &annotations - ) - { + albedo_texture = match crate::gpu_bridge::tensor_to_gpu( + ctx.render_ctx, + &debug_name, + &tensor + .try_into() + .map_err(|_| format!("Couldn't convert albedo texture to RGB"))?, + &tensor_stats, + &annotations, + ) { anyhow::Result::Ok(texture) => Some(texture), - anyhow::Result::Err(_) => { None } + anyhow::Result::Err(_) => None, }; } else { re_log::debug_once!( @@ -346,7 +346,7 @@ impl ImagesPart { ); colormap = Colormap::Turbo; // Would need some way to update the space view blueprint properties here - to reflect the change in colormap. - // For now set the matching default in selection_panel.rs + // For now set the matching default in selection_panel.rs } } @@ -358,7 +358,7 @@ impl ImagesPart { let radius_scale = *properties.backproject_radius_scale.get(); let point_radius_from_world_depth = radius_scale * pixel_width_from_depth; - let max_data_value = if let Some((_min, max)) = ctx.cache.tensor_stats(tensor).range { + let mut max_data_value = if let Some((_min, max)) = ctx.cache.tensor_stats(tensor).range { max as f32 } else { // This could only happen for Jpegs, and we should never get here. @@ -369,6 +369,9 @@ impl ImagesPart { _ => 10.0, } }; + if let Some(depth_max) = tensor.inner().depth_max { + max_data_value = depth_max as f32; + } scene.primitives.depth_clouds.clouds.push(DepthCloud { world_from_obj, @@ -395,7 +398,7 @@ impl ScenePart for ImagesPart { ctx: &mut ViewerContext<'_>, query: &SceneQuery<'_>, transforms: &TransformCache, - highlights: &SpaceViewHighlights + highlights: &SpaceViewHighlights, ) { crate::profile_scope!("ImagesPart"); @@ -404,30 +407,29 @@ impl ScenePart for ImagesPart { continue; }; - match - query_primary_with_history::( - &ctx.log_db.entity_db.data_store, - &query.timeline, - &query.latest_at, - &props.visible_history, - ent_path, - [Tensor::name(), InstanceKey::name(), ColorRGBA::name()] - ).and_then(|entities| { - for entity in entities { - Self::process_entity_view( - &entity, - scene, - ctx, - transforms, - &mut props, - ent_path, - world_from_obj, - highlights - )?; - } - Ok(()) - }) - { + match query_primary_with_history::( + &ctx.log_db.entity_db.data_store, + &query.timeline, + &query.latest_at, + &props.visible_history, + ent_path, + [Tensor::name(), InstanceKey::name(), ColorRGBA::name()], + ) + .and_then(|entities| { + for entity in entities { + Self::process_entity_view( + &entity, + scene, + ctx, + transforms, + &mut props, + ent_path, + world_from_obj, + highlights, + )?; + } + Ok(()) + }) { Ok(_) | Err(QueryError::PrimaryNotFound) => {} Err(err) => { re_log::error_once!("Unexpected error querying {ent_path:?}: {err}"); diff --git a/rerun_py/depthai_viewer/_backend/device.py b/rerun_py/depthai_viewer/_backend/device.py index 47dc8880130e..5dd3615e1fc7 100644 --- a/rerun_py/depthai_viewer/_backend/device.py +++ b/rerun_py/depthai_viewer/_backend/device.py @@ -32,7 +32,7 @@ get_size_from_resolution, size_to_resolution, ) -from depthai_viewer._backend.device_defaults import oak_t_default +from depthai_viewer._backend.device_defaults import oak_d_sr_poe_default, oak_t_default from depthai_viewer._backend.messages import ( ErrorMessage, InfoMessage, @@ -223,7 +223,8 @@ def get_device_properties(self) -> DeviceProperties: [ size_to_resolution.get((w, h), None) for w, h in ordered_resolutions - if (w * h) <= (biggest_height * biggest_width) + if (w, h) + in [(conf.width, conf.height) for conf in cam.configs if conf.type == prioritized_type] ], ) ) @@ -394,6 +395,8 @@ def update_pipeline(self, runtime_only: bool) -> Message: if config.auto: if self._oak.device.getDeviceName() == "OAK-T": config = oak_t_default.config + elif self._oak.device.getDeviceName() == "OAK-D-SR-POE": + config = oak_d_sr_poe_default.config else: self._create_auto_pipeline_config(config) diff --git a/rerun_py/depthai_viewer/_backend/device_configuration.py b/rerun_py/depthai_viewer/_backend/device_configuration.py index 1ae0d63b0eb7..4fc43ac4b255 100644 --- a/rerun_py/depthai_viewer/_backend/device_configuration.py +++ b/rerun_py/depthai_viewer/_backend/device_configuration.py @@ -235,10 +235,10 @@ class ToFConfig(BaseModel): # type: ignore[misc] phase_unwrapping_level: int = 4 phase_unwrap_error_threshold: int = 100 enable_phase_unwrapping: Optional[bool] = True - enable_fppn_correction: Optional[bool] = None - enable_optical_correction: Optional[bool] = None - enable_temperature_correction: Optional[bool] = None - enable_wiggle_correction: Optional[bool] = None + enable_fppn_correction: Optional[bool] = True + enable_optical_correction: Optional[bool] = True + enable_temperature_correction: Optional[bool] = False + enable_wiggle_correction: Optional[bool] = True class Config: arbitrary_types_allowed = True diff --git a/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_sr_poe_default.py b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_sr_poe_default.py new file mode 100644 index 000000000000..2ba35f721d96 --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_sr_poe_default.py @@ -0,0 +1,36 @@ +import depthai as dai +from depthai_viewer._backend.device_configuration import ( + CameraConfiguration, + CameraSensorResolution, + PipelineConfiguration, +) + +config = PipelineConfiguration( + cameras=[ + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_480_P, + kind=dai.CameraSensorType.TOF, + board_socket=dai.CameraBoardSocket.CAM_A, + name="ToF", + ), + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_720_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_B, + stream_enabled=True, + name="Left", + ), + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_720_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_C, + stream_enabled=True, + name="Right", + ), + ], + depth=None, + ai_model=None, +) diff --git a/rerun_py/depthai_viewer/_backend/main.py b/rerun_py/depthai_viewer/_backend/main.py index 31052ec52821..7a876bf51d68 100644 --- a/rerun_py/depthai_viewer/_backend/main.py +++ b/rerun_py/depthai_viewer/_backend/main.py @@ -151,7 +151,9 @@ def handle_action(self, action: Action, **kwargs) -> Message: # type: ignore[no if self._device and self._device._oak: if tof_component := self._device.get_tof_component(): if tof_config := kwargs.get("tof_config", None): - tof_component.control.send_controls(tof_config.to_dai()) + tof_cfg = tof_config.to_dai() + self.store.set_tof_config(tof_cfg) + tof_component.control.send_controls(tof_cfg) return InfoMessage("ToF config updated successfully") return ErrorMessage("ToF config not provided") return ErrorMessage("Failed to update ToF config. ToF node wasn't found.") diff --git a/rerun_py/depthai_viewer/_backend/packet_handler.py b/rerun_py/depthai_viewer/_backend/packet_handler.py index c3145536a263..201f607c8fba 100644 --- a/rerun_py/depthai_viewer/_backend/packet_handler.py +++ b/rerun_py/depthai_viewer/_backend/packet_handler.py @@ -219,7 +219,10 @@ def _on_tof_packet( viewer.log_rigid3( f"{component.camera_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" ) - intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) + try: + intrinsics = self._get_camera_intrinsics(component.camera_socket, 640, 480) + except Exception: + intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) viewer.log_pinhole( f"{component.camera_socket.name}/transform/tof", child_from_parent=intrinsics, @@ -228,7 +231,21 @@ def _on_tof_packet( ) path = f"{component.camera_socket.name}/transform/tof/Depth" - viewer.log_depth_image(path, depth_frame, meter=1e3) + viewer.log_depth_image( + path, + depth_frame, + meter=1e3, + min=200.0, + max=1874 + * ( + ( + self.store.tof_config.phaseUnwrappingLevel # type: ignore[attr-defined] + if self.store.tof_config + else 4.0 + ) + + 1 + ), + ) def _on_detections(self, packet: DetectionPacket, component: NNComponent) -> None: rects, colors, labels = self._detections_to_rects_colors_labels(packet, component.get_labels()) diff --git a/rerun_py/depthai_viewer/_backend/store.py b/rerun_py/depthai_viewer/_backend/store.py index f2fe3944f227..392e4fb68a1e 100644 --- a/rerun_py/depthai_viewer/_backend/store.py +++ b/rerun_py/depthai_viewer/_backend/store.py @@ -1,6 +1,8 @@ from multiprocessing import Queue from typing import List, Optional +import depthai as dai + from depthai_viewer._backend.device_configuration import PipelineConfiguration from depthai_viewer._backend.messages import Message from depthai_viewer._backend.topic import Topic @@ -15,6 +17,7 @@ class Store: _send_message_queue: Queue # type: ignore[type-arg] _dot_brightness: int = 0 _flood_brightness: int = 0 + _tof_config: Optional[dai.RawToFConfig] = None def __init__(self) -> None: self._send_message_queue = Queue() @@ -31,6 +34,9 @@ def set_dot_brightness(self, brightness: int) -> None: def set_flood_brightness(self, brightness: int) -> None: self._flood_brightness = brightness + def set_tof_config(self, tof_config: dai.RawToFConfig) -> None: + self._tof_config = tof_config + def reset(self) -> None: self._pipeline_config = None self._subscriptions = [] @@ -51,5 +57,9 @@ def dot_brightness(self) -> int: def flood_brightness(self) -> int: return self._flood_brightness + @property + def tof_config(self) -> Optional[dai.RawToFConfig]: + return self._tof_config + def send_message_to_frontend(self, message: Message) -> None: self._send_message_queue.put(message) diff --git a/rerun_py/depthai_viewer/components/tensor.py b/rerun_py/depthai_viewer/components/tensor.py index a525aa8040ac..4367bcb48ab0 100644 --- a/rerun_py/depthai_viewer/components/tensor.py +++ b/rerun_py/depthai_viewer/components/tensor.py @@ -75,6 +75,8 @@ def from_numpy( meaning: bindings.TensorDataMeaning = None, meter: float | None = None, unit: str | None = None, + depth_min: float | None = None, + depth_max: float | None = None, ) -> TensorArray: """Build a `TensorArray` from an numpy array.""" # Build a random tensor_id @@ -122,9 +124,17 @@ def from_numpy( meter = pa.array([meter], mask=[False], type=pa.float32()) unit = pa.array([unit if unit is not None else ""], type=pa.string()) + if depth_min is not None: + depth_min = pa.array([depth_min], type=pa.float64()) + else: + depth_min = pa.array([0.0], mask=[True], type=pa.float64()) + if depth_max is not None: + depth_max = pa.array([depth_max], type=pa.float64()) + else: + depth_max = pa.array([0.0], mask=[True], type=pa.float64()) storage = pa.StructArray.from_arrays( - [tensor_id, shape, data, meaning, meter, colormap, unit], + [tensor_id, shape, data, meaning, meter, colormap, unit, depth_min, depth_max], fields=list(TensorType.storage_type), ).cast(TensorType.storage_type) storage.validate(full=True) diff --git a/rerun_py/depthai_viewer/log/image.py b/rerun_py/depthai_viewer/log/image.py index a2ba0c32e04f..8040dad29fe0 100644 --- a/rerun_py/depthai_viewer/log/image.py +++ b/rerun_py/depthai_viewer/log/image.py @@ -89,6 +89,8 @@ def log_depth_image( entity_path: str, image: Tensor, *, + min: Optional[float] = None, + max: Optional[float] = None, meter: Optional[float] = None, ext: Optional[Dict[str, Any]] = None, timeless: bool = False, @@ -108,6 +110,10 @@ def log_depth_image( Path to the image in the space hierarchy. image: A [Tensor][rerun.log.tensor.Tensor] representing the depth image to log. + min: + Optional minimum depth value. + max: + Optional maximum depth value. meter: How long is a meter in the given dtype? For instance: with uint16, perhaps meter=1000 which would mean @@ -144,6 +150,8 @@ def log_depth_image( ext=ext, timeless=timeless, meaning=bindings.TensorDataMeaning.Depth, + depth_min=min, + depth_max=max, ) diff --git a/rerun_py/depthai_viewer/log/tensor.py b/rerun_py/depthai_viewer/log/tensor.py index e24e46a9a999..d5a5db90cd97 100644 --- a/rerun_py/depthai_viewer/log/tensor.py +++ b/rerun_py/depthai_viewer/log/tensor.py @@ -88,6 +88,8 @@ def _log_tensor( encoding: Optional[ImageEncoding] = None, colormap: Optional[Colormap] = None, unit: Optional[str] = None, + depth_min: Optional[float] = None, + depth_max: Optional[float] = None, ) -> None: """Log a general tensor, perhaps with named dimensions.""" @@ -130,7 +132,9 @@ def _log_tensor( instanced: Dict[str, Any] = {} splats: Dict[str, Any] = {} - instanced["rerun.tensor"] = TensorArray.from_numpy(tensor, encoding, colormap, names, meaning, meter, unit) + instanced["rerun.tensor"] = TensorArray.from_numpy( + tensor, encoding, colormap, names, meaning, meter, unit, depth_min, depth_max + ) if ext: _add_extension_components(instanced, splats, ext, None) diff --git a/rerun_py/depthai_viewer/requirements.txt b/rerun_py/depthai_viewer/requirements.txt index 431a5f5235b2..259ae947a792 100644 --- a/rerun_py/depthai_viewer/requirements.txt +++ b/rerun_py/depthai_viewer/requirements.txt @@ -1,10 +1,10 @@ numpy>=1.23 -pyarrow==10.0.1 +pyarrow==16.0.0 setuptools ahrs # depthai_sdk conflicts with depthai, so it's installed seperatelly in __main__.py --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local -depthai==2.25.0.0.dev0+dca8245b0b1c52349b73e5a0bf23589b7c4ac3e8 +depthai==2.25.0.0.dev0+f1cd4d974e041f1b3ea84480afcdc5a8e3975299 websockets pydantic==1.9 deprecated diff --git a/rerun_py/pyproject.toml b/rerun_py/pyproject.toml index c679bf18914e..e958106cb131 100644 --- a/rerun_py/pyproject.toml +++ b/rerun_py/pyproject.toml @@ -3,7 +3,7 @@ build-backend = "maturin" requires = ["maturin>=0.14.0,<0.15"] [project] -dependencies = ["deprecated", "numpy>=1.23", "pyarrow==10.0.1", "setuptools"] +dependencies = ["deprecated", "numpy>=1.23", "pyarrow==16.0.0", "setuptools"] classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", diff --git a/rerun_py/src/python_bridge.rs b/rerun_py/src/python_bridge.rs index ed1e0387fa9c..54d6fce55392 100644 --- a/rerun_py/src/python_bridge.rs +++ b/rerun_py/src/python_bridge.rs @@ -945,6 +945,8 @@ fn log_image_file( meter: None, colormap: TensorColormap::None, unit: None, + depth_min: None, + depth_max: None, }; let row = DataRow::from_cells1( diff --git a/scripts/fix_py_lint.sh b/scripts/fix_py_lint.sh new file mode 100755 index 000000000000..0de6c2cd78a9 --- /dev/null +++ b/scripts/fix_py_lint.sh @@ -0,0 +1,2 @@ +black --config rerun_py/pyproject.toml examples rerun_py scripts +ruff check --config rerun_py/pyproject.toml examples rerun_py scripts --fix