diff --git a/CHANGELOG.md b/CHANGELOG.md index 0993a62ea6bd..0f7a42b815fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Depthai Viewer changelog +## 0.2.1 + +- Fixed point cloud coloring for RGB albedo textures. +- Better device defaults. +- Improved OAK-D-SR-POE support. +- Fixed temperature readings on OAK-T. + ## 0.2.0 - Fixed point cloud colormap normalization diff --git a/Cargo.lock b/Cargo.lock index 7a795a15c322..dea60f8edfb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -174,7 +174,7 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "api_demo" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "clap 4.4.11", @@ -1390,7 +1390,7 @@ dependencies = [ [[package]] name = "depthai-viewer" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "backtrace", @@ -1495,7 +1495,7 @@ dependencies = [ [[package]] name = "dna" -version = "0.2.0" +version = "0.2.1" dependencies = [ "depthai-viewer", "itertools", @@ -3105,7 +3105,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal" -version = "0.2.0" +version = "0.2.1" dependencies = [ "depthai-viewer", ] @@ -3118,7 +3118,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "minimal_options" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "clap 4.4.11", @@ -3599,7 +3599,7 @@ dependencies = [ [[package]] name = "objectron" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "clap 4.4.11", @@ -4324,7 +4324,7 @@ checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" [[package]] name = "raw_mesh" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "bytes", @@ -4371,7 +4371,7 @@ dependencies = [ [[package]] name = "re_analytics" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "crossbeam", @@ -4392,7 +4392,7 @@ dependencies = [ [[package]] name = "re_arrow_store" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "anyhow", @@ -4419,7 +4419,7 @@ dependencies = [ [[package]] name = "re_build_build_info" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "time", @@ -4427,18 +4427,18 @@ dependencies = [ [[package]] name = "re_build_info" -version = "0.2.0" +version = "0.2.1" [[package]] name = "re_build_web_viewer" -version = "0.2.0" +version = "0.2.1" dependencies = [ "cargo_metadata", ] [[package]] name = "re_data_store" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "criterion", @@ -4461,14 +4461,14 @@ dependencies = [ [[package]] name = "re_error" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", ] [[package]] name = "re_format" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arrow2", "arrow2_convert", @@ -4478,7 +4478,7 @@ dependencies = [ [[package]] name = "re_int_histogram" -version = "0.2.0" +version = "0.2.1" dependencies = [ "criterion", "insta", @@ -4489,7 +4489,7 @@ dependencies = [ [[package]] name = "re_log" -version = "0.2.0" +version = "0.2.1" dependencies = [ "env_logger", "js-sys", @@ -4502,7 +4502,7 @@ dependencies = [ [[package]] name = "re_log_encoding" -version = "0.2.0" +version = "0.2.1" dependencies = [ "criterion", "ehttp", @@ -4527,7 +4527,7 @@ dependencies = [ [[package]] name = "re_log_types" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "array-init", @@ -4566,7 +4566,7 @@ dependencies = [ [[package]] name = "re_memory" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "backtrace", @@ -4586,7 +4586,7 @@ dependencies = [ [[package]] name = "re_query" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arrow2", "criterion", @@ -4604,7 +4604,7 @@ dependencies = [ [[package]] name = "re_renderer" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "anyhow", @@ -4657,7 +4657,7 @@ dependencies = [ [[package]] name = "re_sdk" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arrow2_convert", "document-features", @@ -4677,7 +4677,7 @@ dependencies = [ [[package]] name = "re_sdk_comms" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "anyhow", @@ -4693,7 +4693,7 @@ dependencies = [ [[package]] name = "re_smart_channel" -version = "0.2.0" +version = "0.2.1" dependencies = [ "crossbeam", "instant", @@ -4701,7 +4701,7 @@ dependencies = [ [[package]] name = "re_string_interner" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "nohash-hasher", @@ -4712,7 +4712,7 @@ dependencies = [ [[package]] name = "re_tensor_ops" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "ndarray", @@ -4722,7 +4722,7 @@ dependencies = [ [[package]] name = "re_tuid" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arrow2", "arrow2_convert", @@ -4736,7 +4736,7 @@ dependencies = [ [[package]] name = "re_ui" -version = "0.2.0" +version = "0.2.1" dependencies = [ "eframe", "egui", @@ -4755,7 +4755,7 @@ dependencies = [ [[package]] name = "re_viewer" -version = "0.2.0" +version = "0.2.1" dependencies = [ "ahash", "anyhow", @@ -4830,7 +4830,7 @@ dependencies = [ [[package]] name = "re_web_viewer_server" -version = "0.2.0" +version = "0.2.1" dependencies = [ "cargo_metadata", "ctrlc", @@ -4847,7 +4847,7 @@ dependencies = [ [[package]] name = "re_ws_comms" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "bincode", @@ -4987,7 +4987,7 @@ dependencies = [ [[package]] name = "rerun_py" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arrow2", "depthai-viewer", @@ -5106,7 +5106,7 @@ dependencies = [ [[package]] name = "run_wasm" -version = "0.2.0" +version = "0.2.1" dependencies = [ "cargo-run-wasm", "pico-args", @@ -5876,7 +5876,7 @@ dependencies = [ [[package]] name = "test_image_memory" -version = "0.2.0" +version = "0.2.1" dependencies = [ "depthai-viewer", "mimalloc", diff --git a/Cargo.toml b/Cargo.toml index bf847640db09..526b9d20f581 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,39 +16,39 @@ include = ["../../LICENSE-APACHE", "../../LICENSE-MIT", "**/*.rs", "Cargo.toml"] license = "MIT OR Apache-2.0" repository = "https://github.com/rerun-io/rerun" rust-version = "1.74" -version = "0.2.0" +version = "0.2.1" [workspace.dependencies] # When using alpha-release, always use exact version, e.g. `version = "=0.x.y-alpha.z" # This is because we treat alpha-releases as incompatible, but semver doesn't. # In particular: if we compile rerun 0.3.0-alpha.0 we only want it to use # re_log_types 0.3.0-alpha.0, NOT 0.3.0-alpha.4 even though it is newer and semver-compatible. -re_sdk_comms = { path = "crates/re_sdk_comms", version = "0.2.0" } -re_analytics = { path = "crates/re_analytics", version = "0.2.0" } -re_arrow_store = { path = "crates/re_arrow_store", version = "0.2.0" } -re_build_build_info = { path = "crates/re_build_build_info", version = "0.2.0" } -re_build_info = { path = "crates/re_build_info", version = "0.2.0" } -re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "0.2.0", default-features = false } -re_data_store = { path = "crates/re_data_store", version = "0.2.0" } -re_error = { path = "crates/re_error", version = "0.2.0" } -re_format = { path = "crates/re_format", version = "0.2.0" } -re_int_histogram = { path = "crates/re_int_histogram", version = "0.2.0" } -re_log = { path = "crates/re_log", version = "0.2.0" } -re_log_encoding = { path = "crates/re_log_encoding", version = "0.2.0" } -re_log_types = { path = "crates/re_log_types", version = "0.2.0" } -re_memory = { path = "crates/re_memory", version = "0.2.0" } -re_query = { path = "crates/re_query", version = "0.2.0" } -re_renderer = { path = "crates/re_renderer", version = "0.2.0", default-features = false } -re_sdk = { path = "crates/re_sdk", version = "0.2.0" } -re_smart_channel = { path = "crates/re_smart_channel", version = "0.2.0" } -re_string_interner = { path = "crates/re_string_interner", version = "0.2.0" } -re_tensor_ops = { path = "crates/re_tensor_ops", version = "0.2.0" } -re_tuid = { path = "crates/re_tuid", version = "0.2.0" } -re_ui = { path = "crates/re_ui", version = "0.2.0" } -re_viewer = { path = "crates/re_viewer", version = "0.2.0", default-features = false } -re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "0.2.0" } -re_ws_comms = { path = "crates/re_ws_comms", version = "0.2.0" } -depthai-viewer = { path = "crates/rerun", version = "0.2.0" } +re_sdk_comms = { path = "crates/re_sdk_comms", version = "0.2.1" } +re_analytics = { path = "crates/re_analytics", version = "0.2.1" } +re_arrow_store = { path = "crates/re_arrow_store", version = "0.2.1" } +re_build_build_info = { path = "crates/re_build_build_info", version = "0.2.1" } +re_build_info = { path = "crates/re_build_info", version = "0.2.1" } +re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "0.2.1", default-features = false } +re_data_store = { path = "crates/re_data_store", version = "0.2.1" } +re_error = { path = "crates/re_error", version = "0.2.1" } +re_format = { path = "crates/re_format", version = "0.2.1" } +re_int_histogram = { path = "crates/re_int_histogram", version = "0.2.1" } +re_log = { path = "crates/re_log", version = "0.2.1" } +re_log_encoding = { path = "crates/re_log_encoding", version = "0.2.1" } +re_log_types = { path = "crates/re_log_types", version = "0.2.1" } +re_memory = { path = "crates/re_memory", version = "0.2.1" } +re_query = { path = "crates/re_query", version = "0.2.1" } +re_renderer = { path = "crates/re_renderer", version = "0.2.1", default-features = false } +re_sdk = { path = "crates/re_sdk", version = "0.2.1" } +re_smart_channel = { path = "crates/re_smart_channel", version = "0.2.1" } +re_string_interner = { path = "crates/re_string_interner", version = "0.2.1" } +re_tensor_ops = { path = "crates/re_tensor_ops", version = "0.2.1" } +re_tuid = { path = "crates/re_tuid", version = "0.2.1" } +re_ui = { path = "crates/re_ui", version = "0.2.1" } +re_viewer = { path = "crates/re_viewer", version = "0.2.1", default-features = false } +re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "0.2.1" } +re_ws_comms = { path = "crates/re_ws_comms", version = "0.2.1" } +depthai-viewer = { path = "crates/rerun", version = "0.2.1" } ahash = "0.8" anyhow = "1.0" diff --git a/crates/re_log_types/src/component_types/tensor.rs b/crates/re_log_types/src/component_types/tensor.rs index 7ccdc8bf695f..abff328385cc 100644 --- a/crates/re_log_types/src/component_types/tensor.rs +++ b/crates/re_log_types/src/component_types/tensor.rs @@ -163,12 +163,14 @@ pub enum TensorData { F64(Buffer), JPEG(BinaryBuffer), NV12(BinaryBuffer), + Yuv420p(BinaryBuffer), } impl Into> for &TensorData { fn into(self) -> Option { match self { &TensorData::NV12(_) => Some(TextureEncoding::Nv12), + &TensorData::Yuv420p(_) => Some(TextureEncoding::Yuv420p), _ => None, } } @@ -177,7 +179,7 @@ impl Into> for &TensorData { impl TensorData { pub fn dtype(&self) -> TensorDataType { match self { - Self::U8(_) | Self::JPEG(_) | Self::NV12(_) => TensorDataType::U8, + Self::U8(_) | Self::JPEG(_) | Self::NV12(_) | Self::Yuv420p(_) => TensorDataType::U8, Self::U16(_) => TensorDataType::U16, Self::U32(_) => TensorDataType::U32, Self::U64(_) => TensorDataType::U64, @@ -192,7 +194,7 @@ impl TensorData { pub fn size_in_bytes(&self) -> usize { match self { - Self::U8(buf) | Self::JPEG(buf) | Self::NV12(buf) => buf.0.len(), + Self::U8(buf) | Self::JPEG(buf) | Self::NV12(buf) | Self::Yuv420p(buf) => buf.0.len(), Self::U16(buf) => buf.len(), Self::U32(buf) => buf.len(), Self::U64(buf) => buf.len(), @@ -222,7 +224,7 @@ impl TensorData { | Self::F32(_) | Self::F64(_) => false, - Self::JPEG(_) | Self::NV12(_) => true, + Self::JPEG(_) | Self::NV12(_) | Self::Yuv420p(_) => true, } } } @@ -242,6 +244,7 @@ impl std::fmt::Debug for TensorData { Self::F64(_) => write!(f, "F64({} bytes)", self.size_in_bytes()), Self::JPEG(_) => write!(f, "JPEG({} bytes)", self.size_in_bytes()), Self::NV12(_) => write!(f, "NV12({} bytes)", self.size_in_bytes()), + Self::Yuv420p(_) => write!(f, "Yuv420p({} bytes)", self.size_in_bytes()), } } } @@ -441,7 +444,7 @@ impl Tensor { #[inline] pub fn real_shape(&self) -> Vec { match &self.data { - &TensorData::NV12(_) => { + &TensorData::NV12(_) | &TensorData::Yuv420p(_) => { let shape = self.shape.as_slice(); match shape { [y, x] => { @@ -466,7 +469,7 @@ impl Tensor { /// Takes into account the encoding pub fn image_height_width_channels(&self) -> Option<[u64; 3]> { match &self.data { - &TensorData::NV12(_) => { + &TensorData::NV12(_) | &TensorData::Yuv420p(_) => { let shape = self.real_shape(); if let [y, x] = shape.as_slice() { Some([y.size, x.size, 1]) @@ -541,6 +544,7 @@ impl Tensor { // You would need to call get once for each channel. // That would meant that you have to manually supply the channel, so using get_nv12_pixel is easier. TensorData::NV12(_) => None, + TensorData::Yuv420p(_) => None, TensorData::JPEG(_) => None, // Too expensive to unpack here. } } @@ -574,6 +578,36 @@ impl Tensor { } } + pub fn get_yuv420p_pixel(&self, index: &[u64; 2]) -> Option<[TensorElement; 3]> { + let [row, col] = index; + match self.real_shape().as_slice() { + [h, w] => match &self.data { + TensorData::Yuv420p(buf) => { + let uv_offset = (w.size * h.size) as u64; + let y = ((buf[(*row * w.size + *col) as usize] as f64) - 16.0) / 219.0; + let u = ((buf[(uv_offset + (*row / 2) * w.size + *col / 2) as usize] as f64) + - 128.0) + / 224.0; + let v = ((buf[((uv_offset + (*row / 2) * w.size + *col / 2) as usize) + 1] + as f64) + - 128.0) + / 224.0; + let r = y + 1.402 * v; + let g = y - 0.344 * u + 0.714 * v; + let b = y + 1.772 * u; + + Some([ + TensorElement::U8(f64::clamp(r * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(g * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(b * 255.0, 0.0, 255.0) as u8), + ]) + } + _ => None, + }, + _ => None, + } + } + pub fn dtype(&self) -> TensorDataType { self.data.dtype() } @@ -1016,7 +1050,7 @@ impl TryFrom for DecodedTensor { | TensorData::I64(_) | TensorData::F32(_) | TensorData::F64(_) - | TensorData::NV12(_) => Ok(Self(tensor)), + | TensorData::NV12(_) | TensorData::Yuv420p(_) => Ok(Self(tensor)), TensorData::JPEG(_) => Err(tensor), } @@ -1110,7 +1144,7 @@ impl DecodedTensor { | TensorData::I64(_) | TensorData::F32(_) | TensorData::F64(_) - | TensorData::NV12(_) => Ok(Self(maybe_encoded_tensor)), + | TensorData::NV12(_) | TensorData::Yuv420p(_) => Ok(Self(maybe_encoded_tensor)), TensorData::JPEG(buf) => { use image::io::Reader as ImageReader; diff --git a/crates/re_renderer/shader/decodings.wgsl b/crates/re_renderer/shader/decodings.wgsl index 4e774a135f9a..e46164ed1921 100644 --- a/crates/re_renderer/shader/decodings.wgsl +++ b/crates/re_renderer/shader/decodings.wgsl @@ -17,3 +17,22 @@ fn decode_nv12(texture: texture_2d, in_tex_coords: Vec2) -> Vec4 { let b = pow(y + 1.772 * u, 2.2); return Vec4(r, g, b, 1.0); } + +fn decode_yuv420p(texture: texture_2d, in_tex_coords: Vec2) -> Vec4 { + let texture_dim = Vec2(textureDimensions(texture).xy); + let uv_offset = u32(floor(texture_dim.y / 1.5)); + let uv_row = u32(floor(in_tex_coords.y * texture_dim.y) / 2.0) * 2u; + var uv_col = u32(floor(in_tex_coords.x * texture_dim.x / 2.0)); + + let coords = UVec2(in_tex_coords * Vec2(texture_dim.x, texture_dim.y)); + let y = (f32(textureLoad(texture, coords, 0).r) - 16.0) / 219.0; + let u = (f32(textureLoad(texture, UVec2(u32(uv_col), uv_offset + uv_row), 0).r) - 128.0) / 224.0; + let v = (f32(textureLoad(texture, UVec2((u32(uv_col)), uv_offset + uv_row + 1u), 0).r) - 128.0) / 224.0; + + // Get RGB values and apply reverse gamma correction since we are rendering to sRGB framebuffer + let r = pow(y + 1.402 * v, 2.2); + let g = pow(y - (0.344 * u + 0.714 * v), 2.2); + let b = pow(y + 1.772 * u, 2.2); + + return Vec4(r, g, b, 1.0); +} diff --git a/crates/re_renderer/shader/depth_cloud.wgsl b/crates/re_renderer/shader/depth_cloud.wgsl index da3bfd2476d3..d61814648a39 100644 --- a/crates/re_renderer/shader/depth_cloud.wgsl +++ b/crates/re_renderer/shader/depth_cloud.wgsl @@ -20,6 +20,10 @@ const SAMPLE_TYPE_UINT_NOFILTER = 4u; // Encoded textures // ------------------- const SAMPLE_TYPE_NV12 = 5u; +const SAMPLE_TYPE_YUV420P = 6u; + +const ALBEDO_COLOR_RGB = 0u; +const ALBEDO_COLOR_MONO = 1u; // --- @@ -52,6 +56,9 @@ struct DepthCloudInfo { /// Configures color mapping mode, see `colormap.wgsl`. colormap: u32, + + /// Either ALBEDO_COLOR_RGB or ALBEDO_COLOR_MONO. Govourns how to interpret the albedo texture. + albedo_color: u32, /// Configures the sample type for the albedo texture. albedo_sample_type: u32, @@ -63,9 +70,6 @@ struct DepthCloudInfo { radius_boost_in_ui_points: f32, }; -const ALBEDO_COLOR_RGB: u32 = 0u; -const ALBEDO_COLOR_MONO: u32 = 1u; - @group(1) @binding(0) var depth_cloud_info: DepthCloudInfo; @@ -145,6 +149,8 @@ fn compute_point_data(quad_idx: u32) -> PointData { if depth_cloud_info.colormap == ALBEDO_TEXTURE { if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_NV12 { color = decode_nv12(albedo_texture_uint, Vec2(f32(texcoords.x), f32(texcoords.y)) / Vec2(f32(wh.x), f32(wh.x))); + } else if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_YUV420P { + color = decode_yuv420p(albedo_texture_uint, Vec2(f32(texcoords.x), f32(texcoords.y)) / Vec2(f32(wh.x), f32(wh.x))); } else { // TODO(filip): Support all sample types like in rectangle_fs.wgsl if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_FLOAT_FILTER { color = textureSampleLevel( @@ -153,23 +159,15 @@ fn compute_point_data(quad_idx: u32) -> PointData { Vec2(texcoords) / Vec2(wh), 0.0 ); - color = Vec4(Vec3(color.r), 1.0); } else if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_FLOAT_NOFILTER { let coord = Vec2(texcoords) / Vec2(textureDimensions(depth_texture_float)); - let v00 = textureLoad(albedo_texture_float_nofilter, IVec2(coord) + IVec2(0, 0), 0); - let v01 = textureLoad(albedo_texture_float_nofilter, IVec2(coord) + IVec2(0, 1), 0); - let v10 = textureLoad(albedo_texture_float_nofilter, IVec2(coord) + IVec2(1, 0), 0); - let v11 = textureLoad(albedo_texture_float_nofilter, IVec2(coord) + IVec2(1, 1), 0); - let top = mix(v00, v10, fract(coord.x)); - let bottom = mix(v01, v11, fract(coord.x)); - color = mix(top, bottom, fract(coord.y)); + color = Vec4(textureLoad(albedo_texture_float_nofilter, IVec2(coord + vec2(0.5)), 0)); } else if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_UINT_NOFILTER { color = Vec4(textureLoad( albedo_texture_uint, texcoords, 0 )) / 255.0; - color = Vec4(linear_from_srgb(Vec3(color.r)), 1.0); } else if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_SINT_NOFILTER { color = Vec4(textureLoad( albedo_texture_sint, @@ -181,6 +179,9 @@ fn compute_point_data(quad_idx: u32) -> PointData { color = ERROR_RGBA; } } + if depth_cloud_info.albedo_color == ALBEDO_COLOR_MONO { + color = Vec4(Vec3(color.r), 1.0); + } } else { color = Vec4(colormap_srgb(depth_cloud_info.colormap, world_space_depth / depth_cloud_info.max_depth_in_world), 1.0); } diff --git a/crates/re_renderer/shader/rectangle.wgsl b/crates/re_renderer/shader/rectangle.wgsl index 8459532fd753..42465702d9d0 100644 --- a/crates/re_renderer/shader/rectangle.wgsl +++ b/crates/re_renderer/shader/rectangle.wgsl @@ -14,7 +14,7 @@ const SAMPLE_TYPE_UINT_NOFILTER = 4u; // Encoded textures // ------------------ const SAMPLE_TYPE_NV12 = 5u; - +const SAMPLE_TYPE_YUV420P = 6u; // How do we do colormapping? const COLOR_MAPPER_OFF = 1u; diff --git a/crates/re_renderer/shader/rectangle_fs.wgsl b/crates/re_renderer/shader/rectangle_fs.wgsl index 8cb214285d93..ea91fffa2229 100644 --- a/crates/re_renderer/shader/rectangle_fs.wgsl +++ b/crates/re_renderer/shader/rectangle_fs.wgsl @@ -67,7 +67,10 @@ fn fs_main(in: VertexOut) -> @location(0) Vec4 { } } else if rect_info.sample_type == SAMPLE_TYPE_NV12 { sampled_value = decode_nv12(texture_uint, in.texcoord); - } else { + } else if rect_info.sample_type == SAMPLE_TYPE_YUV420P { + sampled_value = decode_yuv420p(texture_uint, in.texcoord); + } + else { return ERROR_RGBA; // unknown sample type } diff --git a/crates/re_renderer/shader/rectangle_vs.wgsl b/crates/re_renderer/shader/rectangle_vs.wgsl index 3d30c441ba0a..791520f95f85 100644 --- a/crates/re_renderer/shader/rectangle_vs.wgsl +++ b/crates/re_renderer/shader/rectangle_vs.wgsl @@ -10,7 +10,7 @@ fn vs_main(@builtin(vertex_index) v_idx: u32) -> VertexOut { out.position = apply_depth_offset(frame.projection_from_world * Vec4(pos, 1.0), rect_info.depth_offset); // out.texcoord = (texcoord.x * rect_info.extent_u + texcoord.y * rect_info.extent_v).xy; out.texcoord = texcoord; - if rect_info.sample_type == SAMPLE_TYPE_NV12 { + if rect_info.sample_type == SAMPLE_TYPE_NV12 || rect_info.sample_type == SAMPLE_TYPE_YUV420P { out.texcoord.y *= (2.0 / 3.0); } return out; diff --git a/crates/re_renderer/src/renderer/depth_cloud.rs b/crates/re_renderer/src/renderer/depth_cloud.rs index f84b92b4b049..a209aa4bce01 100644 --- a/crates/re_renderer/src/renderer/depth_cloud.rs +++ b/crates/re_renderer/src/renderer/depth_cloud.rs @@ -16,27 +16,16 @@ use smallvec::smallvec; use crate::{ allocator::create_and_fill_uniform_buffer_batch, - draw_phases::{ DrawPhase, OutlineMaskProcessor }, + draw_phases::{DrawPhase, OutlineMaskProcessor}, include_shader_module, resource_managers::{GpuTexture2D, ResourceManagerError}, texture_info, view_builder::ViewBuilder, wgpu_resources::{ - BindGroupDesc, - BindGroupEntry, - BindGroupLayoutDesc, - GpuBindGroup, - GpuBindGroupLayoutHandle, - GpuRenderPipelineHandle, - GpuTexture, - PipelineLayoutDesc, - RenderPipelineDesc, - TextureDesc, + BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle, + GpuRenderPipelineHandle, GpuTexture, PipelineLayoutDesc, RenderPipelineDesc, TextureDesc, }, - Colormap, - OutlineMaskPreference, - PickingLayerObjectId, - PickingLayerProcessor, + Colormap, OutlineMaskPreference, PickingLayerObjectId, PickingLayerProcessor, }; use super::{ @@ -64,6 +53,10 @@ mod gpu_data { // Encoded textures // ------------------ const SAMPLE_TYPE_NV12: u32 = 5; + const SAMPLE_TYPE_YUV420P: u32 = 6; + + const ALBEDO_COLOR_RGB: u32 = 0; + const ALBEDO_COLOR_MONO: u32 = 1; /// Keep in sync with mirror in `depth_cloud.wgsl.` #[repr(C, align(256))] @@ -89,16 +82,20 @@ mod gpu_data { /// Which colormap should be used. pub colormap: u32, + /// Which color encoding to use. + /// 0: RGB, 1: MONO + pub albedo_color: u32, + /// Which texture sample to use - pub albedo_sample_type: U32RowPadded, + pub albedo_sample_type: u32, /// Which texture sample to use - pub depth_sample_type: U32RowPadded, + pub depth_sample_type: u32, /// Changes over different draw-phases. - pub radius_boost_in_ui_points: wgpu_buffer_types::F32RowPadded, + pub radius_boost_in_ui_points: f32, - pub end_padding: [wgpu_buffer_types::PaddingRow; 16 - 4 - 3 - 1 - 1 - 1 - 1 - 1], + pub end_padding: [wgpu_buffer_types::PaddingRow; 16 - 4 - 3 - 1 - 1 - 1], } impl DepthCloudInfoUBO { @@ -136,6 +133,7 @@ mod gpu_data { } Some(wgpu::TextureSampleType::Uint) => match colormapped_texture.encoding { Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, + Some(TextureEncoding::Yuv420p) => SAMPLE_TYPE_YUV420P, _ => SAMPLE_TYPE_UINT_NOFILTER, }, Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT_NOFILTER, @@ -144,7 +142,21 @@ mod gpu_data { } _ => 0, }; - + let albedo_color = albedo_texture + .as_ref() + .and_then(|albedo_texture| { + if albedo_texture.encoding.is_some() { + return Some(ALBEDO_COLOR_RGB); + } + Some( + match texture_info::num_texture_components(albedo_texture.texture.format()) + { + 1 => ALBEDO_COLOR_MONO, + _ => ALBEDO_COLOR_RGB, + }, + ) + }) + .unwrap_or(ALBEDO_COLOR_RGB); let depth_sample_type = match depth_texture.texture.format().sample_type(None) { Some(wgpu::TextureSampleType::Float { .. }) => { if texture_info::is_float_filterable( @@ -164,14 +176,15 @@ mod gpu_data { world_from_obj: (*world_from_obj).into(), depth_camera_intrinsics: (*depth_camera_intrinsics).into(), outline_mask_id: outline_mask_id.0.unwrap_or_default().into(), - world_depth_from_texture_depth: *world_depth_from_texture_depth, - point_radius_from_world_depth: *point_radius_from_world_depth, - max_depth_in_world: *max_depth_in_world, - colormap: *colormap as u32, + world_depth_from_texture_depth: (*world_depth_from_texture_depth).into(), + point_radius_from_world_depth: (*point_radius_from_world_depth).into(), + max_depth_in_world: (*max_depth_in_world).into(), + colormap: (*colormap as u32).into(), radius_boost_in_ui_points: radius_boost_in_ui_points.into(), picking_layer_object_id: *picking_object_id, albedo_sample_type: albedo_sample_type.into(), depth_sample_type: depth_sample_type.into(), + albedo_color: albedo_color.into(), end_padding: Default::default(), } } @@ -271,9 +284,8 @@ impl DrawData for DepthCloudDrawData { #[derive(thiserror::Error, Debug)] pub enum DepthCloudDrawDataError { - #[error( - "Depth texture format was {0:?}, only formats with sample type float are supported" - )] InvalidDepthTextureFormat(wgpu::TextureFormat), + #[error("Depth texture format was {0:?}, only formats with sample type float are supported")] + InvalidDepthTextureFormat(wgpu::TextureFormat), #[error("Invalid albedo texture format {0:?}")] InvalidAlbedoTextureFormat(wgpu::TextureFormat), @@ -285,21 +297,25 @@ pub enum DepthCloudDrawDataError { impl DepthCloudDrawData { pub fn new( ctx: &mut RenderContext, - depth_clouds: &DepthClouds + depth_clouds: &DepthClouds, ) -> Result { crate::profile_function!(); - let DepthClouds { clouds: depth_clouds, radius_boost_in_ui_points_for_outlines } = - depth_clouds; + let DepthClouds { + clouds: depth_clouds, + radius_boost_in_ui_points_for_outlines, + } = depth_clouds; - let bg_layout = ctx.renderers + let bg_layout = ctx + .renderers .write() .get_or_create::<_, DepthCloudRenderer>( &ctx.shared_renderer_data, &mut ctx.gpu_resources, &ctx.device, - &mut ctx.resolver - ).bind_group_layout; + &mut ctx.resolver, + ) + .bind_group_layout; if depth_clouds.is_empty() { return Ok(DepthCloudDrawData { @@ -402,7 +418,7 @@ impl DepthCloudDrawData { BindGroupEntry::DefaultTextureView(albedo_texture_float_filterable) ], layout: bg_layout, - }) + }), ) }; @@ -432,14 +448,18 @@ impl Renderer for DepthCloudRenderer { type RendererDrawData = DepthCloudDrawData; fn participated_phases() -> &'static [DrawPhase] { - &[DrawPhase::Opaque, DrawPhase::PickingLayer, DrawPhase::OutlineMask] + &[ + DrawPhase::Opaque, + DrawPhase::PickingLayer, + DrawPhase::OutlineMask, + ] } fn create_renderer( shared_data: &SharedRendererData, pools: &mut WgpuResourcePools, device: &wgpu::Device, - resolver: &mut FileResolver + resolver: &mut FileResolver, ) -> Self { crate::profile_function!(); @@ -454,9 +474,8 @@ impl Renderer for DepthCloudRenderer { ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: ( - std::mem::size_of::() as u64 - ) + min_binding_size: (std::mem::size_of::() + as u64) .try_into() .ok(), }, @@ -523,7 +542,7 @@ impl Renderer for DepthCloudRenderer { count: None, }, ], - }) + }), ); let pipeline_layout = pools.pipeline_layouts.get_or_create( @@ -532,13 +551,13 @@ impl Renderer for DepthCloudRenderer { label: "depth_cloud_rp_layout".into(), entries: vec![shared_data.global_bindings.layout, bind_group_layout], }), - &pools.bind_group_layouts + &pools.bind_group_layouts, ); let shader_module = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/depth_cloud.wgsl") + &include_shader_module!("../../shader/depth_cloud.wgsl"), ); let render_pipeline_desc_color = RenderPipelineDesc { @@ -566,7 +585,7 @@ impl Renderer for DepthCloudRenderer { device, &render_pipeline_desc_color, &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); let render_pipeline_picking_layer = pools.render_pipelines.get_or_create( device, @@ -579,7 +598,7 @@ impl Renderer for DepthCloudRenderer { ..render_pipeline_desc_color.clone() }), &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); let render_pipeline_outline_mask = pools.render_pipelines.get_or_create( device, @@ -590,12 +609,12 @@ impl Renderer for DepthCloudRenderer { depth_stencil: OutlineMaskProcessor::MASK_DEPTH_STATE, // Alpha to coverage doesn't work with the mask integer target. multisample: OutlineMaskProcessor::mask_default_msaa_state( - shared_data.config.hardware_tier + shared_data.config.hardware_tier, ), ..render_pipeline_desc_color }), &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); DepthCloudRenderer { @@ -611,7 +630,7 @@ impl Renderer for DepthCloudRenderer { pools: &'a WgpuResourcePools, phase: DrawPhase, pass: &mut wgpu::RenderPass<'a>, - draw_data: &'a Self::RendererDrawData + draw_data: &'a Self::RendererDrawData, ) -> anyhow::Result<()> { crate::profile_function!(); if draw_data.instances.is_empty() { diff --git a/crates/re_renderer/src/renderer/rectangles.rs b/crates/re_renderer/src/renderer/rectangles.rs index 3a84ab0057ca..f32fd4e8222b 100644 --- a/crates/re_renderer/src/renderer/rectangles.rs +++ b/crates/re_renderer/src/renderer/rectangles.rs @@ -10,41 +10,26 @@ //! Since we're not allowed to bind many textures at once (no widespread bindless support!), //! we are forced to have individual bind groups per rectangle and thus a draw call per rectangle. -use itertools::{ izip, Itertools as _ }; +use itertools::{izip, Itertools as _}; use smallvec::smallvec; use crate::{ allocator::create_and_fill_uniform_buffer_batch, depth_offset::DepthOffset, - draw_phases::{ DrawPhase, OutlineMaskProcessor }, + draw_phases::{DrawPhase, OutlineMaskProcessor}, include_shader_module, - resource_managers::{ GpuTexture2D, ResourceManagerError }, + resource_managers::{GpuTexture2D, ResourceManagerError}, texture_info, view_builder::ViewBuilder, wgpu_resources::{ - BindGroupDesc, - BindGroupEntry, - BindGroupLayoutDesc, - GpuBindGroup, - GpuBindGroupLayoutHandle, - GpuRenderPipelineHandle, - PipelineLayoutDesc, - RenderPipelineDesc, - SamplerDesc, + BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle, + GpuRenderPipelineHandle, PipelineLayoutDesc, RenderPipelineDesc, SamplerDesc, }, - Colormap, - OutlineMaskPreference, - PickingLayerProcessor, - Rgba, + Colormap, OutlineMaskPreference, PickingLayerProcessor, Rgba, }; use super::{ - DrawData, - FileResolver, - FileSystem, - RenderContext, - Renderer, - SharedRendererData, + DrawData, FileResolver, FileSystem, RenderContext, Renderer, SharedRendererData, WgpuResourcePools, }; @@ -70,6 +55,7 @@ pub enum TextureEncoding { Rgb, Rgba, Nv12, + Yuv420p, } /// Describes a texture and how to map it to a color. @@ -129,10 +115,9 @@ impl ColormappedTexture { pub fn width_height(&self) -> [u32; 2] { let texture_dim = self.texture.width_height(); match &self.encoding { - &Some(TextureEncoding::Nv12) => { - let real_dim = - glam::Vec2::new(texture_dim[0] as f32, texture_dim[1] as f32) * - glam::Vec2::new(1.0, 2.0 / 3.0); + &Some(TextureEncoding::Nv12) | &Some(TextureEncoding::Yuv420p) => { + let real_dim = glam::Vec2::new(texture_dim[0] as f32, texture_dim[1] as f32) + * glam::Vec2::new(1.0, 2.0 / 3.0); [real_dim.x as u32, real_dim.y as u32] } _ => texture_dim, @@ -185,9 +170,11 @@ impl Default for RectangleOptions { #[derive(thiserror::Error, Debug)] pub enum RectangleError { - #[error(transparent)] ResourceManagerError(#[from] ResourceManagerError), + #[error(transparent)] + ResourceManagerError(#[from] ResourceManagerError), - #[error("Texture required special features: {0:?}")] SpecialFeatures(wgpu::Features), + #[error("Texture required special features: {0:?}")] + SpecialFeatures(wgpu::Features), // There's really no need for users to be able to sample depth textures. // We don't get filtering of depth textures any way. @@ -197,21 +184,19 @@ pub enum RectangleError { #[error("Color mapping is being applied to a four-component RGBA texture")] ColormappingRgbaTexture, - #[error( - "Only 1 and 4 component textures are supported, got {0} components" - )] UnsupportedComponentCount(u8), + #[error("Only 1 and 4 component textures are supported, got {0} components")] + UnsupportedComponentCount(u8), #[error("No color mapper was supplied for this 1-component texture")] MissingColorMapper, - #[error("Invalid color map texture format: {0:?}")] UnsupportedColormapTextureFormat( - wgpu::TextureFormat, - ), + #[error("Invalid color map texture format: {0:?}")] + UnsupportedColormapTextureFormat(wgpu::TextureFormat), } mod gpu_data { - use super::{ ColorMapper, RectangleError, TextureEncoding, TexturedRect }; - use crate::{ texture_info, wgpu_buffer_types }; + use super::{ColorMapper, RectangleError, TextureEncoding, TexturedRect}; + use crate::{texture_info, wgpu_buffer_types}; // Keep in sync with mirror in rectangle.wgsl @@ -224,6 +209,7 @@ mod gpu_data { // Encoded textures // ------------------ const SAMPLE_TYPE_NV12: u32 = 5; + const SAMPLE_TYPE_YUV420P: u32 = 6; // How do we do colormapping? const COLOR_MAPPER_OFF: u32 = 1; @@ -263,7 +249,7 @@ mod gpu_data { impl UniformBuffer { pub fn from_textured_rect( rectangle: &super::TexturedRect, - device_features: wgpu::Features + device_features: wgpu::Features, ) -> Result { let texture_format = rectangle.colormapped_texture.texture.format(); @@ -292,20 +278,19 @@ mod gpu_data { } = options; let sample_type = match texture_format.sample_type(None) { - Some(wgpu::TextureSampleType::Float { .. }) => if - texture_info::is_float_filterable(texture_format, device_features) - { - SAMPLE_TYPE_FLOAT_FILTER - } else { - SAMPLE_TYPE_FLOAT_NOFILTER - } - Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT_NOFILTER, - Some(wgpu::TextureSampleType::Uint) => { - match encoding { - Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, - _ => SAMPLE_TYPE_UINT_NOFILTER, + Some(wgpu::TextureSampleType::Float { .. }) => { + if texture_info::is_float_filterable(texture_format, device_features) { + SAMPLE_TYPE_FLOAT_FILTER + } else { + SAMPLE_TYPE_FLOAT_NOFILTER } } + Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT_NOFILTER, + Some(wgpu::TextureSampleType::Uint) => match encoding { + Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, + Some(TextureEncoding::Yuv420p) => SAMPLE_TYPE_YUV420P, + _ => SAMPLE_TYPE_UINT_NOFILTER, + }, _ => { return Err(RectangleError::DepthTexturesNotSupported); } @@ -315,22 +300,23 @@ mod gpu_data { let color_mapper_int; match texture_info::num_texture_components(texture_format) { - 1 => - match color_mapper { - Some(ColorMapper::Function(colormap)) => { - color_mapper_int = COLOR_MAPPER_FUNCTION; - colormap_function = *colormap as u32; - } - Some(ColorMapper::Texture(_)) => { - color_mapper_int = COLOR_MAPPER_TEXTURE; - } - None => { - if encoding != &Some(TextureEncoding::Nv12) { - return Err(RectangleError::MissingColorMapper); - } - color_mapper_int = COLOR_MAPPER_OFF; + 1 => match color_mapper { + Some(ColorMapper::Function(colormap)) => { + color_mapper_int = COLOR_MAPPER_FUNCTION; + colormap_function = *colormap as u32; + } + Some(ColorMapper::Texture(_)) => { + color_mapper_int = COLOR_MAPPER_TEXTURE; + } + None => { + if encoding != &Some(TextureEncoding::Nv12) + && encoding != &Some(TextureEncoding::Yuv420p) + { + return Err(RectangleError::MissingColorMapper); } + color_mapper_int = COLOR_MAPPER_OFF; } + }, 4 => { if color_mapper.is_some() { return Err(RectangleError::ColormappingRgbaTexture); @@ -389,7 +375,7 @@ impl DrawData for RectangleDrawData { impl RectangleDrawData { pub fn new( ctx: &mut RenderContext, - rectangles: &[TexturedRect] + rectangles: &[TexturedRect], ) -> Result { crate::profile_function!(); @@ -398,7 +384,7 @@ impl RectangleDrawData { &ctx.shared_renderer_data, &mut ctx.gpu_resources, &ctx.device, - &mut ctx.resolver + &mut ctx.resolver, ); if rectangles.is_empty() { @@ -416,7 +402,7 @@ impl RectangleDrawData { let uniform_buffer_bindings = create_and_fill_uniform_buffer_batch( ctx, "rectangle uniform buffers".into(), - uniform_buffers.into_iter() + uniform_buffers.into_iter(), ); let mut instances = Vec::with_capacity(rectangles.len()); @@ -427,9 +413,9 @@ impl RectangleDrawData { &(SamplerDesc { label: format!( "rectangle sampler mag {:?} min {:?}", - options.texture_filter_magnification, - options.texture_filter_minification - ).into(), + options.texture_filter_magnification, options.texture_filter_minification + ) + .into(), mag_filter: match options.texture_filter_magnification { TextureFilterMag::Linear => wgpu::FilterMode::Linear, TextureFilterMag::Nearest => wgpu::FilterMode::Nearest, @@ -440,13 +426,15 @@ impl RectangleDrawData { }, mipmap_filter: wgpu::FilterMode::Nearest, ..Default::default() - }) + }), ); let texture = &rectangle.colormapped_texture.texture; let texture_format = texture.creation_desc.format; if texture_format.required_features() != Default::default() { - return Err(RectangleError::SpecialFeatures(texture_format.required_features())); + return Err(RectangleError::SpecialFeatures( + texture_format.required_features(), + )); } // We set up several texture sources, then instruct the shader to read from at most one of them. @@ -475,8 +463,8 @@ impl RectangleDrawData { } // We also set up an optional colormap texture. - let colormap_texture = if - let Some(ColorMapper::Texture(handle)) = &rectangle.colormapped_texture.color_mapper + let colormap_texture = if let Some(ColorMapper::Texture(handle)) = + &rectangle.colormapped_texture.color_mapper { let format = handle.format(); if format != wgpu::TextureFormat::Rgba8UnormSrgb { @@ -503,7 +491,7 @@ impl RectangleDrawData { BindGroupEntry::DefaultTextureView(texture_float_filterable) ], layout: rectangle_renderer.bind_group_layout, - }) + }), ), draw_outline_mask: rectangle.options.outline_mask.is_some(), }); @@ -527,7 +515,7 @@ impl Renderer for RectangleRenderer { shared_data: &SharedRendererData, pools: &mut WgpuResourcePools, device: &wgpu::Device, - resolver: &mut FileResolver + resolver: &mut FileResolver, ) -> Self { crate::profile_function!(); @@ -544,9 +532,8 @@ impl Renderer for RectangleRenderer { // We could use dynamic offset here into a single large buffer. // But we have to set a new texture anyways and its doubtful that splitting the bind group is of any use. has_dynamic_offset: false, - min_binding_size: ( - std::mem::size_of::() as u64 - ) + min_binding_size: (std::mem::size_of::() + as u64) .try_into() .ok(), }, @@ -613,9 +600,9 @@ impl Renderer for RectangleRenderer { multisampled: false, }, count: None, - } + }, ], - }) + }), ); let pipeline_layout = pools.pipeline_layouts.get_or_create( @@ -624,18 +611,18 @@ impl Renderer for RectangleRenderer { label: "RectangleRenderer::pipeline_layout".into(), entries: vec![shared_data.global_bindings.layout, bind_group_layout], }), - &pools.bind_group_layouts + &pools.bind_group_layouts, ); let shader_module_vs = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/rectangle_vs.wgsl") + &include_shader_module!("../../shader/rectangle_vs.wgsl"), ); let shader_module_fs = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/rectangle_fs.wgsl") + &include_shader_module!("../../shader/rectangle_fs.wgsl"), ); let render_pipeline_desc_color = RenderPipelineDesc { @@ -646,14 +633,12 @@ impl Renderer for RectangleRenderer { fragment_entrypoint: "fs_main".into(), fragment_handle: shader_module_fs, vertex_buffers: smallvec![], - render_targets: smallvec![ - Some(wgpu::ColorTargetState { - format: ViewBuilder::MAIN_TARGET_COLOR_FORMAT, - // TODO(andreas): have two render pipelines, an opaque one and a transparent one. Transparent shouldn't write depth! - blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING), - write_mask: wgpu::ColorWrites::ALL, - }) - ], + render_targets: smallvec![Some(wgpu::ColorTargetState { + format: ViewBuilder::MAIN_TARGET_COLOR_FORMAT, + // TODO(andreas): have two render pipelines, an opaque one and a transparent one. Transparent shouldn't write depth! + blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING), + write_mask: wgpu::ColorWrites::ALL, + })], primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleStrip, cull_mode: None, @@ -666,7 +651,7 @@ impl Renderer for RectangleRenderer { device, &render_pipeline_desc_color, &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); let render_pipeline_picking_layer = pools.render_pipelines.get_or_create( device, @@ -679,7 +664,7 @@ impl Renderer for RectangleRenderer { ..render_pipeline_desc_color.clone() }), &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); let render_pipeline_outline_mask = pools.render_pipelines.get_or_create( device, @@ -689,12 +674,12 @@ impl Renderer for RectangleRenderer { render_targets: smallvec![Some(OutlineMaskProcessor::MASK_FORMAT.into())], depth_stencil: OutlineMaskProcessor::MASK_DEPTH_STATE, multisample: OutlineMaskProcessor::mask_default_msaa_state( - shared_data.config.hardware_tier + shared_data.config.hardware_tier, ), ..render_pipeline_desc_color }), &pools.pipeline_layouts, - &pools.shader_modules + &pools.shader_modules, ); RectangleRenderer { @@ -710,7 +695,7 @@ impl Renderer for RectangleRenderer { pools: &'a WgpuResourcePools, phase: DrawPhase, pass: &mut wgpu::RenderPass<'a>, - draw_data: &'a Self::RendererDrawData + draw_data: &'a Self::RendererDrawData, ) -> anyhow::Result<()> { crate::profile_function!(); if draw_data.instances.is_empty() { @@ -740,6 +725,10 @@ impl Renderer for RectangleRenderer { fn participated_phases() -> &'static [DrawPhase] { // TODO(andreas): This a hack. We have both opaque and transparent. - &[DrawPhase::OutlineMask, DrawPhase::Opaque, DrawPhase::PickingLayer] + &[ + DrawPhase::OutlineMask, + DrawPhase::Opaque, + DrawPhase::PickingLayer, + ] } } diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 73db692a8308..7616b577ab9e 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -22,7 +22,7 @@ use crate::{ depthai::{dependency_installer::DependencyInstaller, depthai}, misc::{AppOptions, Caches, RecordingConfig, ViewerContext}, ui::{data_ui::ComponentUiRegistry, Blueprint}, - viewer_analytics::ViewerAnalytics, + // viewer_analytics::ViewerAnalytics, AppEnvironment, }; @@ -113,7 +113,7 @@ pub struct App { pending_commands: Vec, cmd_palette: re_ui::CommandPalette, - analytics: ViewerAnalytics, + // analytics: ViewerAnalytics, icon_status: AppIconStatus, @@ -210,8 +210,8 @@ impl App { AppState::default() }; - let mut analytics = ViewerAnalytics::new(); - analytics.on_viewer_started(&build_info, app_env); + // let mut analytics = ViewerAnalytics::new(); + // analytics.on_viewer_started(&build_info, app_env); #[cfg(not(target_arch = "wasm32"))] let backend_environment = match app_env { @@ -247,7 +247,7 @@ impl App { pending_commands: Default::default(), cmd_palette: Default::default(), - analytics, + // analytics, icon_status: AppIconStatus::NotSetTryAgain, @@ -905,7 +905,7 @@ impl App { // Do analytics after ingesting the new message, // because thats when the `log_db.recording_info` is set, // which we use in the analytics call. - self.analytics.on_open_recording(log_db); + // self.analytics.on_open_recording(log_db); } if start.elapsed() > instant::Duration::from_millis(10) { @@ -1037,7 +1037,7 @@ impl App { } fn show_log_db(&mut self, log_db: LogDb) { - self.analytics.on_open_recording(&log_db); + // self.analytics.on_open_recording(&log_db); self.state.selected_rec_id = log_db.recording_id(); self.log_dbs.insert(log_db.recording_id(), log_db); } diff --git a/crates/re_viewer/src/depthai/depthai.rs b/crates/re_viewer/src/depthai/depthai.rs index ed531a727535..acd1b39b2b9b 100644 --- a/crates/re_viewer/src/depthai/depthai.rs +++ b/crates/re_viewer/src/depthai/depthai.rs @@ -131,6 +131,7 @@ pub enum CameraSensorResolution { THE_1080_P, THE_1200_P, THE_1280_P, + THE_1280X3848, THE_4_K, THE_4000X3000, THE_12_MP, @@ -152,6 +153,7 @@ impl fmt::Display for CameraSensorResolution { Self::THE_1080_P => write!(f, "1080p"), Self::THE_1200_P => write!(f, "1200p"), Self::THE_1280_P => write!(f, "1280p"), + Self::THE_1280X3848 => write!(f, "1280x3848"), Self::THE_4_K => write!(f, "4k"), Self::THE_4000X3000 => write!(f, "4000x3000"), Self::THE_12_MP => write!(f, "12MP"), @@ -355,6 +357,8 @@ pub struct ToFConfig { enable_temperature_correction: Option, enable_wiggle_correction: Option, enable_phase_unwrapping: Option, + enable_phase_shuffle_temporal_filter: Option, + enable_burst_mode: Option, #[serde(skip)] modified: bool, } @@ -370,6 +374,8 @@ impl Default for ToFConfig { enable_temperature_correction: None, enable_wiggle_correction: None, enable_phase_unwrapping: None, + enable_phase_shuffle_temporal_filter: Some(true), + enable_burst_mode: Some(false), modified: false, } } @@ -408,6 +414,14 @@ impl ToFConfig { self.enable_phase_unwrapping } + pub fn get_enable_phase_shuffle_temporal_filter(&self) -> Option { + self.enable_phase_shuffle_temporal_filter + } + + pub fn get_enable_burst_mode(&self) -> Option { + self.enable_burst_mode + } + pub fn set_median_filter(&mut self, median: MedianFilter) { if self.median != median { self.modified = true; @@ -464,6 +478,20 @@ impl ToFConfig { self.enable_phase_unwrapping = enable; } + pub fn set_enable_phase_shuffle_temporal_filter(&mut self, enable: Option) { + if self.enable_phase_shuffle_temporal_filter != enable { + self.modified = true; + } + self.enable_phase_shuffle_temporal_filter = enable; + } + + pub fn set_enable_burst_mode(&mut self, enable: Option) { + if self.enable_burst_mode != enable { + self.modified = true; + } + self.enable_burst_mode = enable; + } + pub fn is_modified(&self) -> bool { self.modified } diff --git a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs index bbd8af16fa7d..4e3a19a150a7 100644 --- a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs +++ b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs @@ -98,6 +98,9 @@ fn color_tensor_to_gpu( } (1, TensorData::NV12(buf)) => { (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Uint) + }, + (1, TensorData::Yuv420p(buf)) => { + (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Uint) } (1, TensorData::I8(buf)) => (cast_slice_to_cow(buf), TextureFormat::R8Snorm), @@ -140,7 +143,7 @@ fn color_tensor_to_gpu( [0.0, 1.0] } else if texture_format == TextureFormat::R8Snorm { [-1.0, 1.0] - } else if encoding == Some(TextureEncoding::Nv12) { + } else if encoding == Some(TextureEncoding::Nv12) || encoding == Some(TextureEncoding::Yuv420p) { [0.0, 1.0] } else { crate::gpu_bridge::range(tensor_stats)? @@ -343,6 +346,9 @@ fn general_texture_creation_desc_from_tensor<'a>( } TensorData::NV12(buf) => { (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) + }, + TensorData::Yuv420p(buf) => { + (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) } } } @@ -369,6 +375,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::NV12(_) => { panic!("NV12 cannot be a two channel tensor!"); } + TensorData::Yuv420p(_) => { + panic!("Yuv420p cannot be a two channel tensor!"); + } } } 3 => { @@ -409,6 +418,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::NV12(_) => { panic!("NV12 cannot be a three channel tensor!"); } + TensorData::Yuv420p(_) => { + panic!("Yuv420p cannot be a three channel tensor!"); + } } } 4 => { @@ -436,6 +448,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::NV12(_) => { panic!("NV12 cannot be a four channel tensor!"); } + TensorData::Yuv420p(_) => { + panic!("Yuv420p cannot be a four channel tensor!"); + } } } depth => { diff --git a/crates/re_viewer/src/ui/data_ui/image.rs b/crates/re_viewer/src/ui/data_ui/image.rs index 78b83197c5a0..0280737f6923 100644 --- a/crates/re_viewer/src/ui/data_ui/image.rs +++ b/crates/re_viewer/src/ui/data_ui/image.rs @@ -277,7 +277,7 @@ pub fn tensor_summary_ui_grid_contents( | re_log_types::component_types::TensorData::I64(_) | re_log_types::component_types::TensorData::F32(_) | re_log_types::component_types::TensorData::F64(_) - | re_log_types::component_types::TensorData::NV12(_) => {} + | re_log_types::component_types::TensorData::NV12(_) | re_log_types::component_types::TensorData::Yuv420p(_) => {} re_log_types::component_types::TensorData::JPEG(jpeg_bytes) => { re_ui.grid_left_hand_label(ui, "Encoding"); ui.label(format!("{} JPEG", re_format::format_bytes(jpeg_bytes.num_bytes() as _))); @@ -565,6 +565,15 @@ fn tensor_pixel_value_ui( _ => unreachable!("NV12 should only contain u8"), } }), + TensorData::Yuv420p(_) => + tensor.get_yuv420p_pixel(&[y, x]).map(|[r, g, b]| { + match (r, g, b) { + (TensorElement::U8(r), TensorElement::U8(g), TensorElement::U8(b)) => { + format!("R: {r}, G: {g}, B: {b}, #{r:02X}{g:02X}{b:02X}") + } + _ => unreachable!("YUV420 should only contain u8"), + } + }), _ => { match &tensor.unit { Some(unit) => { diff --git a/crates/re_viewer/src/ui/device_settings_panel.rs b/crates/re_viewer/src/ui/device_settings_panel.rs index ccb573d65ddf..5fa2a28dd296 100644 --- a/crates/re_viewer/src/ui/device_settings_panel.rs +++ b/crates/re_viewer/src/ui/device_settings_panel.rs @@ -257,7 +257,7 @@ impl DeviceSettingsPanel { None, "Phase unwrap level", &mut phase_unwrapping_level, - 0..=100, + 0..=6, ) .changed() { @@ -355,6 +355,35 @@ impl DeviceSettingsPanel { enable_wiggle_correction, )); } + let mut enable_phase_shuffle_temporal_filter = tof_config + .get_enable_phase_shuffle_temporal_filter() + .unwrap_or(false); + if ctx + .re_ui + .labeled_toggle_switch( + ui, + "Enable phase shuffle temporal filter", + &mut enable_phase_shuffle_temporal_filter, + ) + .changed() + { + tof_config.set_enable_phase_shuffle_temporal_filter(Some( + enable_phase_shuffle_temporal_filter, + )); + } + let mut enable_burst_mode = tof_config.get_enable_burst_mode().unwrap_or(false); + if ctx + .re_ui + .labeled_toggle_switch( + ui, + "Enable burst mode", + &mut enable_burst_mode, + ) + .changed() + { + tof_config.set_enable_burst_mode(Some(enable_burst_mode)); + } + }) }); } diff --git a/crates/re_viewer/src/ui/view_bar_chart/ui.rs b/crates/re_viewer/src/ui/view_bar_chart/ui.rs index 0fd8d42bad0f..493467ab5654 100644 --- a/crates/re_viewer/src/ui/view_bar_chart/ui.rs +++ b/crates/re_viewer/src/ui/view_bar_chart/ui.rs @@ -100,7 +100,7 @@ pub(crate) fn view_bar_chart( component_types::TensorData::F64(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } - component_types::TensorData::JPEG(_) | component_types::TensorData::NV12(_) => { + component_types::TensorData::JPEG(_) | component_types::TensorData::NV12(_) | component_types::TensorData::Yuv420p(_) => { warn_once!( "trying to display {:?} data as a bar chart ({:?})", tensor.data, diff --git a/crates/rerun/Cargo.toml b/crates/rerun/Cargo.toml index 1ab2ddad2591..20b1582e3107 100644 --- a/crates/rerun/Cargo.toml +++ b/crates/rerun/Cargo.toml @@ -22,7 +22,7 @@ name = "depthai-viewer" path = "src/main.rs" [features] -default = ["analytics", "glam", "image", "native_viewer", "server", "sdk"] +default = ["glam", "image", "native_viewer", "server", "sdk"] ## Enable telemetry using our analytics SDK. analytics = [ diff --git a/rerun_py/depthai_viewer/_backend/device.py b/rerun_py/depthai_viewer/_backend/device.py index 5dd3615e1fc7..fd7df925f4ef 100644 --- a/rerun_py/depthai_viewer/_backend/device.py +++ b/rerun_py/depthai_viewer/_backend/device.py @@ -32,7 +32,12 @@ get_size_from_resolution, size_to_resolution, ) -from depthai_viewer._backend.device_defaults import oak_d_sr_poe_default, oak_t_default +from depthai_viewer._backend.device_defaults import ( + oak_d_generic_default, + oak_d_lr_default, + oak_d_sr_poe_default, + oak_t_default, +) from depthai_viewer._backend.messages import ( ErrorMessage, InfoMessage, @@ -397,6 +402,10 @@ def update_pipeline(self, runtime_only: bool) -> Message: config = oak_t_default.config elif self._oak.device.getDeviceName() == "OAK-D-SR-POE": config = oak_d_sr_poe_default.config + elif self._oak.device.getDeviceName() == "OAK-D-LR": + config = oak_d_lr_default.config + elif "OAK-D" in self._oak.device.getDeviceName(): + config = oak_d_generic_default.config else: self._create_auto_pipeline_config(config) @@ -422,6 +431,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: self.use_encoding = is_poe or is_usb2 connected_camera_features = self._oak.device.getConnectedCameraFeatures() + create_tof_on = None for cam in config.cameras: print("Creating camera: ", cam) @@ -461,9 +471,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: # Only create a camera node if it is used by stereo or AI. if cam.stream_enabled: if dai.CameraSensorType.TOF in camera_features.supportedTypes: - sdk_cam = self._oak.create_tof(cam.board_socket) - self._tof_component = sdk_cam - self._queues.append((sdk_cam, self._oak.queue(sdk_cam.out.main))) + create_tof_on = cam.board_socket elif dai.CameraSensorType.THERMAL in camera_features.supportedTypes: thermal_cam = self._oak.pipeline.create(dai.node.Camera) # Hardcoded for OAK-T. The correct size is needed for correct detection parsing @@ -492,6 +500,28 @@ def update_pipeline(self, runtime_only: bool) -> Message: print("Skipped creating camera:", cam.board_socket, "because no valid sensor resolution was found.") continue + if create_tof_on is not None: + cam_cfg = list(filter(lambda c: c.board_socket == create_tof_on, config.cameras)) + if len(cam_cfg) != 0: + cam_cfg = cam_cfg[0] + cam_cfg.tof_align = dai.CameraBoardSocket.CAM_B # OAK-D-SR-PoE default. + tof_align = list( + filter( + lambda comp_and_q: ( # type: ignore[arg-type] + comp_and_q[0].node.getBoardSocket() == cam_cfg.tof_align + if isinstance(comp_and_q[0], CameraComponent) + else False + ), + self._queues, + ) + ) + tof_align = tof_align[0][0] if tof_align else None + sdk_cam = self._oak.create_tof(create_tof_on, tof_align) + self._tof_component = sdk_cam + self._queues.append((sdk_cam, self._oak.queue(sdk_cam.out.main))) + else: # Should never happen + print("Couldn't find camera config for ToF, can't create ToF.") + if config.stereo: print("Creating depth") stereo_pair = config.stereo.stereo_pair @@ -601,7 +631,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: try: print("Starting pipeline") - self._oak.start(blocking=False) + self._oak.start() except RuntimeError as e: print("Couldn't start pipeline: ", e) return ErrorMessage("Couldn't start pipeline") diff --git a/rerun_py/depthai_viewer/_backend/device_configuration.py b/rerun_py/depthai_viewer/_backend/device_configuration.py index 4fc43ac4b255..0982454ef3db 100644 --- a/rerun_py/depthai_viewer/_backend/device_configuration.py +++ b/rerun_py/depthai_viewer/_backend/device_configuration.py @@ -154,6 +154,7 @@ class CameraSensorResolution(Enum): THE_1080_P: str = "THE_1080_P" THE_1200_P: str = "THE_1200_P" THE_1280_P: str = "THE_1280_P" + THE_1280X3848: str = "THE_1280X3848" THE_4_K: str = "THE_4_K" THE_4000X3000: str = "THE_4000X3000" THE_12_MP: str = "THE_12_MP" @@ -180,6 +181,8 @@ class CameraConfiguration(BaseModel): # type: ignore[misc] stream_enabled: bool = True name: str = "" + tof_align: Optional[dai.CameraBoardSocket] = None + class Config: arbitrary_types_allowed = True @@ -239,6 +242,8 @@ class ToFConfig(BaseModel): # type: ignore[misc] enable_optical_correction: Optional[bool] = True enable_temperature_correction: Optional[bool] = False enable_wiggle_correction: Optional[bool] = True + enable_phase_shuffle_temporal_filter: Optional[bool] = True + enable_burst_mode: Optional[bool] = False class Config: arbitrary_types_allowed = True @@ -259,18 +264,24 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de "enable_temperature_correction": self.enable_temperature_correction, "enable_wiggle_correction": self.enable_wiggle_correction, "enable_phase_unwrapping": self.enable_phase_unwrapping, + "enable_phase_shuffle_temporal_filter": self.enable_phase_shuffle_temporal_filter, + "enable_burst_mode": self.enable_burst_mode, } def to_dai(self) -> dai.RawToFConfig: cfg = dai.RawToFConfig() - cfg.median = self.median # type: ignore[attr-defined] + cfg.median = self.median # type: ignore[attr-defined, assignment] cfg.phaseUnwrappingLevel = self.phase_unwrapping_level # type: ignore[attr-defined] cfg.phaseUnwrapErrorThreshold = self.phase_unwrap_error_threshold # type: ignore[attr-defined] - cfg.enableFPPNCorrection = self.enable_fppn_correction # type: ignore[attr-defined] - cfg.enableOpticalCorrection = self.enable_optical_correction # type: ignore[attr-defined] - cfg.enableTemperatureCorrection = self.enable_temperature_correction # type: ignore[attr-defined] - cfg.enableWiggleCorrection = self.enable_wiggle_correction # type: ignore[attr-defined] - cfg.enablePhaseUnwrapping = self.enable_phase_unwrapping # type: ignore[attr-defined] + cfg.enableFPPNCorrection = self.enable_fppn_correction # type: ignore[attr-defined, assignment] + cfg.enableOpticalCorrection = self.enable_optical_correction # type: ignore[attr-defined, assignment] + cfg.enableTemperatureCorrection = self.enable_temperature_correction # type: ignore[attr-defined, assignment] + cfg.enableWiggleCorrection = self.enable_wiggle_correction # type: ignore[attr-defined, assignment] + cfg.enablePhaseUnwrapping = self.enable_phase_unwrapping # type: ignore[attr-defined, assignment] + cfg.enablePhaseShuffleTemporalFilter = ( + self.enable_phase_shuffle_temporal_filter # type: ignore[attr-defined, assignment] + ) + cfg.enableBurstMode = self.enable_burst_mode # type: ignore[attr-defined, assignment] return cfg @@ -367,6 +378,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de (1280, 720): CameraSensorResolution.THE_720_P, (1280, 962): CameraSensorResolution.THE_1280_P, # TOF (1280, 800): CameraSensorResolution.THE_800_P, # OV9782 + (1280, 3848): CameraSensorResolution.THE_1280X3848, # TOF (2592, 1944): CameraSensorResolution.THE_5_MP, # OV5645 (1440, 1080): CameraSensorResolution.THE_1440X1080, (1920, 1080): CameraSensorResolution.THE_1080_P, diff --git a/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_generic_default.py b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_generic_default.py new file mode 100644 index 000000000000..f13c1d546353 --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_generic_default.py @@ -0,0 +1,40 @@ +import depthai as dai +from depthai_viewer._backend.device_configuration import ( + AiModelConfiguration, + CameraConfiguration, + CameraSensorResolution, + PipelineConfiguration, + StereoDepthConfiguration, +) + +config = PipelineConfiguration( + cameras=[ + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_1080_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_A, + name="Color", + ), + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_480_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_B, + stream_enabled=True, + name="Left", + ), + CameraConfiguration( + fps=15, + resolution=CameraSensorResolution.THE_480_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_C, + stream_enabled=True, + name="Right", + ), + ], + stereo=StereoDepthConfiguration( + align=dai.CameraBoardSocket.CAM_A, stereo_pair=(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C) + ), + ai_model=AiModelConfiguration(camera=dai.CameraBoardSocket.CAM_A), +) diff --git a/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_lr_default.py b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_lr_default.py new file mode 100644 index 000000000000..e4bec40e671b --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/device_defaults/oak_d_lr_default.py @@ -0,0 +1,39 @@ +import depthai as dai +from depthai_viewer._backend.device_configuration import ( + CameraConfiguration, + CameraSensorResolution, + PipelineConfiguration, + StereoDepthConfiguration, +) + +config = PipelineConfiguration( + cameras=[ + CameraConfiguration( + fps=10, + resolution=CameraSensorResolution.THE_1200_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_A, + name="Color", + ), + CameraConfiguration( + fps=10, + resolution=CameraSensorResolution.THE_1200_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_B, + stream_enabled=True, + name="Left", + ), + CameraConfiguration( + fps=10, + resolution=CameraSensorResolution.THE_1200_P, + kind=dai.CameraSensorType.COLOR, + board_socket=dai.CameraBoardSocket.CAM_C, + stream_enabled=True, + name="Right", + ), + ], + stereo=StereoDepthConfiguration( + align=dai.CameraBoardSocket.CAM_A, stereo_pair=(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C) + ), + ai_model=None, +) diff --git a/rerun_py/depthai_viewer/_backend/main.py b/rerun_py/depthai_viewer/_backend/main.py index 7a876bf51d68..936f2d11611e 100644 --- a/rerun_py/depthai_viewer/_backend/main.py +++ b/rerun_py/depthai_viewer/_backend/main.py @@ -20,14 +20,17 @@ ) from depthai_viewer._backend.store import Store -sentry_sdk.init( # type: ignore[abstract] - dsn="https://37decdc44d584dca906e43ebd7fd1508@sentry.luxonis.com/16", - # Set traces_sample_rate to 1.0 to capture 100% - # of transactions for performance monitoring. - # We recommend adjusting this value in production. - traces_sample_rate=1.0, - release=f"depthai-viewer@{depthai_viewer_version()}", -) +try: + sentry_sdk.init( # type: ignore[abstract] + dsn="https://37decdc44d584dca906e43ebd7fd1508@sentry.luxonis.com/16", + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for performance monitoring. + # We recommend adjusting this value in production. + traces_sample_rate=1.0, + release=f"depthai-viewer@{depthai_viewer_version()}", + ) +except Exception: # Be generic, a sentry failure should not crash the app + print("Failed to initialize sentry") class DepthaiViewerBack: diff --git a/rerun_py/depthai_viewer/_backend/packet_handler.py b/rerun_py/depthai_viewer/_backend/packet_handler.py index 201f607c8fba..c8957ee05d43 100644 --- a/rerun_py/depthai_viewer/_backend/packet_handler.py +++ b/rerun_py/depthai_viewer/_backend/packet_handler.py @@ -147,14 +147,18 @@ def log_packet( else: print("Unknown packet type:", type(packet)) - def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSocket) -> None: + def _log_img_frame(self, frame: dai.ImgFrame, board_socket: dai.CameraBoardSocket) -> None: viewer.log_rigid3( f"{board_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" ) # TODO(filip): Enable the user to lock the camera rotation in the UI - img_frame = packet.frame if packet.msg.getType() == dai.RawImgFrame.Type.RAW8 else packet.msg.getData() - h, w = packet.msg.getHeight(), packet.msg.getWidth() - if packet.msg.getType() == dai.ImgFrame.Type.BITSTREAM: + img_frame = ( + frame.getCvFrame() + if frame.getType() == dai.RawImgFrame.Type.RAW8 or frame.getType() == dai.RawImgFrame.Type.YUV420p + else frame.getData() + ) + h, w = frame.getHeight(), frame.getWidth() + if frame.getType() == dai.ImgFrame.Type.BITSTREAM: img_frame = cv2.cvtColor(cv2.imdecode(img_frame, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB) h, w = img_frame.shape[:2] @@ -166,7 +170,7 @@ def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSoc except Exception: f_len = (w * h) ** 0.5 child_from_parent = np.array([[f_len, 0, w / 2], [0, f_len, h / 2], [0, 0, 1]]) - cam = cam_kind_from_frame_type(packet.msg.getType()) + cam = cam_kind_from_frame_type(frame.getType()) viewer.log_pinhole( f"{board_socket.name}/transform/{cam}/", child_from_parent=child_from_parent, @@ -175,20 +179,31 @@ def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSoc ) entity_path = f"{board_socket.name}/transform/{cam}/Image" - if packet.msg.getType() == dai.RawImgFrame.Type.NV12: + if frame.getType() == dai.RawImgFrame.Type.NV12: # or frame.getType() == dai.RawImgFrame.Type.YUV420p + encoding = viewer.ImageEncoding.NV12 viewer.log_encoded_image( entity_path, img_frame, width=w, height=h, - encoding=viewer.ImageEncoding.NV12, + encoding=encoding, ) - elif packet.msg.getType() == dai.RawImgFrame.Type.GRAYF16: + elif frame.getType() == dai.RawImgFrame.Type.YUV420p: + viewer.log_image(entity_path, cv2.cvtColor(img_frame, cv2.COLOR_BGR2RGB)) + elif frame.getType() == dai.RawImgFrame.Type.RAW16: img = img_frame.view(np.float16).reshape(h, w) viewer.log_image(entity_path, img, colormap=viewer.Colormap.Magma, unit="°C") else: viewer.log_image(entity_path, img_frame) + def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSocket) -> None: + if board_socket in list( + map(lambda cam: cam.tof_align, self.store.pipeline_config.cameras if self.store.pipeline_config else []) + ): + # Skip tof aligned frames - they will be logged on_tof_packet + return + self._log_img_frame(packet.msg, board_socket) + def on_imu(self, packet: IMUPacket) -> None: gyro: dai.IMUReportGyroscope = packet.gyroscope accel: dai.IMUReportAccelerometer = packet.acceleroMeter @@ -215,24 +230,33 @@ def _on_tof_packet( packet: DisparityDepthPacket, component: ToFComponent, ) -> None: + if packet.aligned_frame: + self._log_img_frame(packet.aligned_frame, dai.CameraBoardSocket(packet.aligned_frame.getInstanceNum())) depth_frame = packet.frame - viewer.log_rigid3( - f"{component.camera_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" - ) - try: - intrinsics = self._get_camera_intrinsics(component.camera_socket, 640, 480) - except Exception: - intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) - viewer.log_pinhole( - f"{component.camera_socket.name}/transform/tof", - child_from_parent=intrinsics, - width=component.camera_node.getVideoWidth(), - height=component.camera_node.getVideoHeight(), - ) - path = f"{component.camera_socket.name}/transform/tof/Depth" + if packet.aligned_frame: + ent_path_root = dai.CameraBoardSocket(packet.aligned_frame.getInstanceNum()).name + else: + ent_path_root = component.camera_socket.name + ent_path_depth = f"{ent_path_root}/transform" + if not packet.aligned_frame: + viewer.log_rigid3(f"{ent_path_root}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF") + try: + intrinsics = self._get_camera_intrinsics(component.camera_socket, 640, 480) + except Exception: + intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) + viewer.log_pinhole( + f"{ent_path_root}/transform/tof", + child_from_parent=intrinsics, + width=component.camera_node.getVideoWidth(), + height=component.camera_node.getVideoHeight(), + ) + ent_path_depth += "/tof/Depth" + else: + cam_kind = cam_kind_from_frame_type(packet.aligned_frame.getType()) + ent_path_depth += f"/{cam_kind}/Depth" viewer.log_depth_image( - path, + ent_path_depth, depth_frame, meter=1e3, min=200.0, diff --git a/rerun_py/depthai_viewer/components/tensor.py b/rerun_py/depthai_viewer/components/tensor.py index 4367bcb48ab0..39a17b36fd50 100644 --- a/rerun_py/depthai_viewer/components/tensor.py +++ b/rerun_py/depthai_viewer/components/tensor.py @@ -25,6 +25,7 @@ class ImageEncoding(Enum): NV12 = "NV12" + # Yuv420p = "Yuv420p" class Colormap(Enum): diff --git a/rerun_py/depthai_viewer/install_requirements.py b/rerun_py/depthai_viewer/install_requirements.py index 7acc644a1f7a..7f9529ce0a58 100644 --- a/rerun_py/depthai_viewer/install_requirements.py +++ b/rerun_py/depthai_viewer/install_requirements.py @@ -91,9 +91,10 @@ def create_venv_and_install_dependencies() -> None: "-m", "pip", "install", - "depthai-sdk==1.13.1.dev0+dd1a6d8a797107d24b9b91b7b63c3fcffb932712", - "--extra-index-url", - "https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/", + "depthai-sdk==1.14.0", + # "depthai-sdk==1.13.1.dev0+dd1a6d8a797107d24b9b91b7b63c3fcffb932712", + # "--extra-index-url", + # "https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/", # "git+https://github.com/luxonis/depthai@tof_decoding#subdirectory=depthai_sdk", ], check=True, diff --git a/rerun_py/depthai_viewer/log/image.py b/rerun_py/depthai_viewer/log/image.py index 8040dad29fe0..1efb29a73ec7 100644 --- a/rerun_py/depthai_viewer/log/image.py +++ b/rerun_py/depthai_viewer/log/image.py @@ -254,7 +254,7 @@ def log_encoded_image( """ image = np.array(image, copy=False) tensor_height = height - if encoding == ImageEncoding.NV12: + if encoding == ImageEncoding.NV12: # or encoding == ImageEncoding.Yuv420p: # TODO(filip): This doesn't seem to be completely true as the image is still displayed correctly # tmp_height = int(height * 1.5) # if tmp_height % 2 != 0: diff --git a/rerun_py/depthai_viewer/requirements.txt b/rerun_py/depthai_viewer/requirements.txt index 259ae947a792..793536b93c9a 100644 --- a/rerun_py/depthai_viewer/requirements.txt +++ b/rerun_py/depthai_viewer/requirements.txt @@ -4,7 +4,7 @@ setuptools ahrs # depthai_sdk conflicts with depthai, so it's installed seperatelly in __main__.py --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local -depthai==2.25.0.0.dev0+f1cd4d974e041f1b3ea84480afcdc5a8e3975299 +depthai==2.26 websockets pydantic==1.9 deprecated