From c3af10530b98bd8beff4a7ac0b4f6dfdee9b46c0 Mon Sep 17 00:00:00 2001 From: Andreas Reich Date: Wed, 9 Oct 2024 10:59:01 +0200 Subject: [PATCH] Introduce image data conversion pipeline, taking over existing YUV conversions (#7640) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What * Major part of https://github.com/rerun-io/rerun/issues/7608 * Related to https://github.com/rerun-io/rerun/issues/3931 * since it makes the rectangle shader smaller which kept acting up in the past via `pixi run -e py python ./tests/python/chroma_downsample_image/main.py`: https://rerun.io/viewer/pr/7640?url=https%3A%2F%2Fstatic.rerun.io%2Frrd%2F0.19%2Fchroma_formats_ad9697d05933f4104fbbe66af23073ad4e9e4a58.rrd ![image](https://github.com/user-attachments/assets/76756c5f-75db-44b7-8db0-f04256f72e3a) The new functionality is tightly integrated into `TextureManager2D` which now ingests all incoming data via a new internal method `transfer_image_data_to_texture`. This in turn takes care of data upload via `GpuReadCpuWriteBelt` (new! previously, we would use `queue.write_texture` in this case) and may if needed run gpu based conversion. Gpu based conversion is for convenience implemented like a `Renderer` - while it is not _used_ like a typical `Renderer`, the lifecycle (create lazily once, store on context, feed with data bundles [...]) was so similar that it made sense to me to use this existing pattern even though it's not a perfect match. **A curious sideeffect of this is that you can now put chroma downsampled textures on custom meshes!** Next steps: * support formats required for AV1 * while on it, check if things could get simpler by us * move BGR handling into the new pipeline * revisit the re_renderer sided input data description. Not sure if I'm happy about it drifting away from the user facing one! * once we're there we're also very close to get rid of `SourceImageDataFormat::WgpuCompatible`. But it's not strictly necessary, this might yet prove a convenient loophole, although its presence makes me a little bit nervous (details see code comments) 🤔 * consider exposing color primaries to public API * the only thing keeping us from it is adding a new enum to the color format! * should it be called "primaries" or "color space" 🤔 ### Checklist * [x] test on web Mac Chrome/Firefox/Safari * [x] test on web Window Chrome/Firefox * [ ] test on web Firefox Chrome/Firefox (@jleibs plz 🥺 ) * [x] I have read and agree to [Contributor Guide](https://github.com/rerun-io/rerun/blob/main/CONTRIBUTING.md) and the [Code of Conduct](https://github.com/rerun-io/rerun/blob/main/CODE_OF_CONDUCT.md) * [x] I've included a screenshot or gif (if applicable) * [x] I have tested the web demo (if applicable): * Using examples from latest `main` build: [rerun.io/viewer](https://rerun.io/viewer/pr/7640?manifest_url=https://app.rerun.io/version/main/examples_manifest.json) * Using full set of examples from `nightly` build: [rerun.io/viewer](https://rerun.io/viewer/pr/7640?manifest_url=https://app.rerun.io/version/nightly/examples_manifest.json) * [x] The PR title and labels are set such as to maximize their usefulness for the next release's CHANGELOG * [x] If applicable, add a new check to the [release checklist](https://github.com/rerun-io/rerun/blob/main/tests/python/release_checklist)! * [x] If have noted any breaking changes to the log API in `CHANGELOG.md` and the migration guide - [PR Build Summary](https://build.rerun.io/pr/7640) - [Recent benchmark results](https://build.rerun.io/graphs/crates.html) - [Wasm size tracking](https://build.rerun.io/graphs/sizes.html) To run all checks from `main`, comment on the PR with `@rerun-bot full-check`. --- .../shader/conversions/yuv_converter.wgsl | 112 ++++++ .../viewer/re_renderer/shader/decodings.wgsl | 70 ---- .../re_renderer/shader/rectangle_fs.wgsl | 17 - crates/viewer/re_renderer/src/context.rs | 3 +- .../viewer/re_renderer/src/importer/gltf.rs | 28 +- crates/viewer/re_renderer/src/renderer/mod.rs | 2 + .../re_renderer/src/renderer/rectangles.rs | 28 +- .../image_data_to_texture.rs | 334 ++++++++++++++++++ .../re_renderer/src/resource_managers/mod.rs | 10 +- .../src/resource_managers/texture_manager.rs | 281 ++++----------- .../src/resource_managers/yuv_converter.rs | 333 +++++++++++++++++ .../re_renderer/src/workspace_shaders.rs | 12 +- crates/viewer/re_renderer_examples/2d.rs | 11 +- .../re_renderer_examples/depth_cloud.rs | 20 +- .../src/picking_ui_pixel.rs | 2 +- .../src/visualizers/videos.rs | 11 +- .../src/tensor_slice_to_gpu.rs | 13 +- .../src/gpu_bridge/colormap.rs | 7 +- .../src/gpu_bridge/image_to_gpu.rs | 143 ++++---- .../re_viewer_context/src/gpu_bridge/mod.rs | 18 +- .../re_viewer_context/src/image_info.rs | 4 + 21 files changed, 984 insertions(+), 475 deletions(-) create mode 100644 crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl delete mode 100644 crates/viewer/re_renderer/shader/decodings.wgsl create mode 100644 crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs create mode 100644 crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs diff --git a/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl b/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl new file mode 100644 index 000000000000..1f6d0701be20 --- /dev/null +++ b/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl @@ -0,0 +1,112 @@ +#import <../types.wgsl> +#import <../screen_triangle_vertex.wgsl> + +struct UniformBuffer { + format: u32, + primaries: u32, + target_texture_size: vec2u, +}; + +@group(0) @binding(0) +var uniform_buffer: UniformBuffer; + +@group(0) @binding(1) +var input_texture: texture_2d; + + +const YUV_LAYOUT_Y_UV = 0u; +const YUV_LAYOUT_YUYV16 = 1u; + +const PRIMARIES_BT601 = 0u; +const PRIMARIES_BT709 = 1u; + + +/// Returns sRGB from YUV color. +/// +/// This conversion mirrors the function in `crates/store/re_types/src/datatypes/tensor_data_ext.rs` +/// +/// Specifying the color standard should be exposed in the future [#3541](https://github.com/rerun-io/rerun/pull/3541) +fn srgb_from_yuv(yuv: vec3f, primaries: u32) -> vec3f { + // rescale YUV values + let y = (yuv[0] - 16.0) / 219.0; + let u = (yuv[1] - 128.0) / 224.0; + let v = (yuv[2] - 128.0) / 224.0; + + var rgb: vec3f; + + switch (primaries) { + // BT.601 (aka. SDTV, aka. Rec.601). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion + // Also note according to https://en.wikipedia.org/wiki/SRGB#sYCC_extended-gamut_transformation + // > Although the RGB color primaries are based on BT.709, + // > the equations for transformation from sRGB to sYCC and vice versa are based on BT.601. + case PRIMARIES_BT601: { + rgb.r = y + 1.402 * v; + rgb.g = y - 0.344 * u - 0.714 * v; + rgb.b = y + 1.772 * u; + } + + // BT.709 (aka. HDTV, aka. Rec.709). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.709_conversion + case PRIMARIES_BT709: { + rgb.r = y + 1.575 * v; + rgb.g = y - 0.187 * u - 0.468 * v; + rgb.b = y + 1.856 * u; + } + + default: { + rgb = ERROR_RGBA.rgb; + } + } + + return clamp(rgb, vec3f(0.0), vec3f(1.0)); +} + +/// Extracts YUV data from a chroma subsampling encoded texture at specific coordinates. +/// +/// See also `enum YuvPixelLayout` in `yuv_converter.rs for a specification of +/// the expected data layout. +fn sample_yuv(yuv_layout: u32, texture: texture_2d, coords: vec2f) -> vec3f { + let texture_dim = vec2f(textureDimensions(texture).xy); + var yuv: vec3f; + + switch (yuv_layout) { + case YUV_LAYOUT_Y_UV: { + let uv_offset = u32(floor(texture_dim.y / 1.5)); + let uv_row = u32(coords.y / 2); + var uv_col = u32(coords.x / 2) * 2u; + + yuv[0] = f32(textureLoad(texture, vec2u(coords), 0).r); + yuv[1] = f32(textureLoad(texture, vec2u(u32(uv_col), uv_offset + uv_row), 0).r); + yuv[2] = f32(textureLoad(texture, vec2u((u32(uv_col) + 1u), uv_offset + uv_row), 0).r); + } + + case YUV_LAYOUT_YUYV16: { + // texture is 2 * width * height + // every 4 bytes is 2 pixels + let uv_row = u32(coords.y); + // multiply by 2 because the width is multiplied by 2 + let y_col = u32(coords.x) * 2u; + yuv[0] = f32(textureLoad(texture, vec2u(y_col, uv_row), 0).r); + + // at odd pixels we're in the second half of the yuyu block, offset back by 2 + let uv_col = y_col - u32(coords.x % 2) * 2u; + yuv[1] = f32(textureLoad(texture, vec2u(uv_col + 1u, uv_row), 0).r); + yuv[2] = f32(textureLoad(texture, vec2u(uv_col + 3u, uv_row), 0).r); + } + + default: { + yuv = vec3f(0.0, 0.0, 0.0); // ERROR_RGBA doesn't apply here. + } + } + + return yuv; +} + +@fragment +fn fs_main(in: FragmentInput) -> @location(0) vec4f { + let coords = vec2f(uniform_buffer.target_texture_size) * in.texcoord; + + let yuv = sample_yuv(uniform_buffer.format, input_texture, coords); + let rgb = srgb_from_yuv(yuv, uniform_buffer.primaries); + + return vec4f(rgb, 1.0); +} diff --git a/crates/viewer/re_renderer/shader/decodings.wgsl b/crates/viewer/re_renderer/shader/decodings.wgsl deleted file mode 100644 index 2fa1079b5756..000000000000 --- a/crates/viewer/re_renderer/shader/decodings.wgsl +++ /dev/null @@ -1,70 +0,0 @@ -#import <./types.wgsl> - - -/// Loads an RGBA texel from a texture holding an NV12 or YUY2 encoded image at the given screen space coordinates. -fn decode_nv12_or_yuy2(sample_type: u32, texture: texture_2d, coords: vec2i) -> vec4f { - let texture_dim = vec2f(textureDimensions(texture).xy); - var y: f32; - var u: f32; - var v: f32; - - // WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! - // NO MORE SAMPLE TYPES CAN BE ADDED TO THIS SHADER! - // The shader is already too large and adding more sample types will push us over the size limit. - // See: https://github.com/rerun-io/rerun/issues/3931, https://github.com/rerun-io/rerun/issues/5073 - if sample_type == SAMPLE_TYPE_NV12 { - let uv_offset = u32(floor(texture_dim.y / 1.5)); - let uv_row = u32(coords.y / 2); - var uv_col = u32(coords.x / 2) * 2u; - - y = f32(textureLoad(texture, vec2u(coords), 0).r); - u = f32(textureLoad(texture, vec2u(u32(uv_col), uv_offset + uv_row), 0).r); - v = f32(textureLoad(texture, vec2u((u32(uv_col) + 1u), uv_offset + uv_row), 0).r); - } else if sample_type == SAMPLE_TYPE_YUY2 { - // texture is 2 * width * height - // every 4 bytes is 2 pixels - let uv_row = u32(coords.y); - // multiply by 2 because the width is multiplied by 2 - let y_col = u32(coords.x) * 2u; - y = f32(textureLoad(texture, vec2u(y_col, uv_row), 0).r); - - // at odd pixels we're in the second half of the yuyu block, offset back by 2 - let uv_col = y_col - u32(coords.x % 2) * 2u; - u = f32(textureLoad(texture, vec2u(uv_col + 1u, uv_row), 0).r); - v = f32(textureLoad(texture, vec2u(uv_col + 3u, uv_row), 0).r); - } - // WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! - - let rgb = rgb_from_yuv(vec3f(y, u, v)); - - return vec4f(rgb, 1.0); -} - - -/// Returns sRGB from YUV color. -/// -/// This conversion mirrors the function in `crates/store/re_types/src/datatypes/tensor_data_ext.rs` -/// -/// Specifying the color standard should be exposed in the future [#3541](https://github.com/rerun-io/rerun/pull/3541) -fn rgb_from_yuv(yuv: vec3f) -> vec3f { - // rescale YUV values - let y = (yuv.x - 16.0) / 219.0; - let u = (yuv.y - 128.0) / 224.0; - let v = (yuv.z - 128.0) / 224.0; - - // BT.601 (aka. SDTV, aka. Rec.601). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion - let r = y + 1.402 * v; - let g = y - 0.344 * u - 0.714 * v; - let b = y + 1.772 * u; - - // BT.709 (aka. HDTV, aka. Rec.709). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.709_conversion - // let r = y + 1.575 * v; - // let g = y - 0.187 * u - 0.468 * v; - // let b = y + 1.856 * u; - - return vec3f( - clamp(r, 0.0, 1.0), - clamp(g, 0.0, 1.0), - clamp(b, 0.0, 1.0) - ); -} diff --git a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl index a670524199a3..e3dcc97b7e95 100644 --- a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl +++ b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl @@ -1,11 +1,6 @@ #import <./colormap.wgsl> #import <./rectangle.wgsl> #import <./utils/srgb.wgsl> -#import <./decodings.wgsl> - -// WARNING! Adding anything else to this shader is very likely to push us over a size threshold that -// causes the failure reported in: https://github.com/rerun-io/rerun/issues/3931 -// Make sure any changes are tested in Chrome on Linux using the Intel Mesa driver. fn is_magnifying(pixel_coord: vec2f) -> bool { return fwidth(pixel_coord.x) < 1.0; @@ -86,10 +81,6 @@ fn fs_main(in: VertexOut) -> @location(0) vec4f { texture_dimensions = vec2f(textureDimensions(texture_sint).xy); } else if rect_info.sample_type == SAMPLE_TYPE_UINT { texture_dimensions = vec2f(textureDimensions(texture_uint).xy); - } else if rect_info.sample_type == SAMPLE_TYPE_NV12 { - texture_dimensions = vec2f(textureDimensions(texture_uint).xy); - } else if rect_info.sample_type == SAMPLE_TYPE_YUY2 { - texture_dimensions = vec2f(textureDimensions(texture_uint).xy); } let coord = in.texcoord * texture_dimensions; @@ -141,14 +132,6 @@ fn fs_main(in: VertexOut) -> @location(0) vec4f { vec4f(textureLoad(texture_uint, v01_coord, 0)), vec4f(textureLoad(texture_uint, v10_coord, 0)), vec4f(textureLoad(texture_uint, v11_coord, 0))); - } else if rect_info.sample_type == SAMPLE_TYPE_NV12 || rect_info.sample_type == SAMPLE_TYPE_YUY2{ - normalized_value = decode_color_and_filter_nearest_or_bilinear( - filter_nearest, - coord, - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v00_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v01_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v10_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v11_coord)); } else { return ERROR_RGBA; // unknown sample type } diff --git a/crates/viewer/re_renderer/src/context.rs b/crates/viewer/re_renderer/src/context.rs index 648389abb5ce..53a38ec3e27c 100644 --- a/crates/viewer/re_renderer/src/context.rs +++ b/crates/viewer/re_renderer/src/context.rs @@ -217,8 +217,7 @@ impl RenderContext { } let resolver = crate::new_recommended_file_resolver(); - let texture_manager_2d = - TextureManager2D::new(device.clone(), queue.clone(), &gpu_resources.textures); + let texture_manager_2d = TextureManager2D::new(&device, &queue, &gpu_resources.textures); let active_frame = ActiveFrameContext { before_view_builder_encoder: Mutex::new(FrameGlobalCommandEncoder::new(&device)), diff --git a/crates/viewer/re_renderer/src/importer/gltf.rs b/crates/viewer/re_renderer/src/importer/gltf.rs index 9098a1522411..c4bcd41b4bc2 100644 --- a/crates/viewer/re_renderer/src/importer/gltf.rs +++ b/crates/viewer/re_renderer/src/importer/gltf.rs @@ -8,7 +8,7 @@ use smallvec::SmallVec; use crate::{ mesh::{GpuMesh, Material, Mesh, MeshError}, renderer::MeshInstance, - resource_managers::{GpuTexture2D, Texture2DCreationDesc, TextureManager2D}, + resource_managers::{GpuTexture2D, ImageDataDesc, TextureManager2D}, RenderContext, Rgba32Unmul, }; @@ -84,7 +84,7 @@ pub fn load_gltf_from_buffer( #[cfg(not(debug_assertions))] let texture_names = ""; - let texture = Texture2DCreationDesc { + let texture = ImageDataDesc { label: if texture_names.is_empty() { format!("unnamed gltf image in {mesh_name}") } else { @@ -92,23 +92,17 @@ pub fn load_gltf_from_buffer( } .into(), data: data.into(), - format, - width: image.width, - height: image.height, + format: format.into(), + width_height: [image.width, image.height], }; - images_as_textures.push( - match ctx - .texture_manager_2d - .create(&ctx.gpu_resources.textures, &texture) - { - Ok(texture) => texture, - Err(err) => { - re_log::error!("Failed to create texture: {err}"); - ctx.texture_manager_2d.white_texture_unorm_handle().clone() - } - }, - ); + images_as_textures.push(match ctx.texture_manager_2d.create(ctx, texture) { + Ok(texture) => texture, + Err(err) => { + re_log::error!("Failed to create texture: {err}"); + ctx.texture_manager_2d.white_texture_unorm_handle().clone() + } + }); } let mut meshes = HashMap::with_capacity(doc.meshes().len()); diff --git a/crates/viewer/re_renderer/src/renderer/mod.rs b/crates/viewer/re_renderer/src/renderer/mod.rs index 24f770821c9f..6ab0caec0d25 100644 --- a/crates/viewer/re_renderer/src/renderer/mod.rs +++ b/crates/viewer/re_renderer/src/renderer/mod.rs @@ -82,6 +82,8 @@ pub trait Renderer { } /// Gets or creates a vertex shader module for drawing a screen filling triangle. +/// +/// The entry point of this shader is `main`. pub fn screen_triangle_vertex_shader( ctx: &RenderContext, ) -> crate::wgpu_resources::GpuShaderModuleHandle { diff --git a/crates/viewer/re_renderer/src/renderer/rectangles.rs b/crates/viewer/re_renderer/src/renderer/rectangles.rs index 2b3c60dd2f51..595da865b018 100644 --- a/crates/viewer/re_renderer/src/renderer/rectangles.rs +++ b/crates/viewer/re_renderer/src/renderer/rectangles.rs @@ -47,11 +47,9 @@ pub enum TextureFilterMin { } /// Describes how the color information is encoded in the texture. +// TODO(#7608): to be replaced by re_renderer based on-input conversion. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ShaderDecoding { - Nv12, - Yuy2, - /// Do BGR(A)->RGB(A) conversion is in the shader. Bgr, } @@ -145,17 +143,7 @@ impl ColormappedTexture { } pub fn width_height(&self) -> [u32; 2] { - match self.shader_decoding { - Some(ShaderDecoding::Nv12) => { - let [width, height] = self.texture.width_height(); - [width, height * 2 / 3] - } - Some(ShaderDecoding::Yuy2) => { - let [width, height] = self.texture.width_height(); - [width / 2, height] - } - Some(ShaderDecoding::Bgr) | None => self.texture.width_height(), - } + self.texture.width_height() } } @@ -237,8 +225,6 @@ mod gpu_data { const SAMPLE_TYPE_FLOAT: u32 = 1; const SAMPLE_TYPE_SINT: u32 = 2; const SAMPLE_TYPE_UINT: u32 = 3; - const SAMPLE_TYPE_NV12: u32 = 4; - const SAMPLE_TYPE_YUY2: u32 = 5; // How do we do colormapping? const COLOR_MAPPER_OFF_GRAYSCALE: u32 = 1; @@ -318,15 +304,7 @@ mod gpu_data { let sample_type = match texture_format.sample_type(None, None) { Some(wgpu::TextureSampleType::Float { .. }) => SAMPLE_TYPE_FLOAT, Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT, - Some(wgpu::TextureSampleType::Uint) => { - if shader_decoding == &Some(super::ShaderDecoding::Nv12) { - SAMPLE_TYPE_NV12 - } else if shader_decoding == &Some(super::ShaderDecoding::Yuy2) { - SAMPLE_TYPE_YUY2 - } else { - SAMPLE_TYPE_UINT - } - } + Some(wgpu::TextureSampleType::Uint) => SAMPLE_TYPE_UINT, _ => { return Err(RectangleError::TextureFormatNotSupported(texture_format)); } diff --git a/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs b/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs new file mode 100644 index 000000000000..42d79c78ecf7 --- /dev/null +++ b/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs @@ -0,0 +1,334 @@ +use super::yuv_converter::{YuvFormatConversionTask, YuvPixelLayout}; +use crate::{ + renderer::DrawError, + wgpu_resources::{GpuTexture, TextureDesc}, + DebugLabel, RenderContext, Texture2DBufferInfo, +}; + +/// Type of color primaries a given image is in. +/// +/// This applies both to YUV and RGB formats, but if not specified otherwise +/// we assume BT.709 primaries for all RGB(A) 8bits per channel content (details below on [`ColorPrimaries::Bt709`]). +/// Since with YUV content the color space is often less clear, we always explicitly +/// specify it. +/// +/// Ffmpeg's documentation has a short & good overview of these relationships: +/// +/// +/// Values need to be kept in sync with `yuv_converter.wgsl` +#[derive(Clone, Copy, Debug)] +pub enum ColorPrimaries { + /// BT.601 (aka. SDTV, aka. Rec.601) + /// + /// Wiki: + Bt601 = 0, + + /// BT.709 (aka. HDTV, aka. Rec.709) + /// + /// Wiki: + /// + /// These are the same primaries we usually assume and use for all our rendering + /// since they are the same primaries used by sRGB. + /// + /// The OETF/EOTF function () is different, + /// but for all other purposes they are the same. + /// (The only reason for us to convert to optical units ("linear" instead of "gamma") is for + /// lighting & tonemapping where we typically start out with an sRGB image!) + Bt709 = 2, + // + // Not yet supported. These vary a lot more from the other two! + // + // /// BT.2020 (aka. PQ, aka. Rec.2020) + // /// + // /// Wiki: + // BT2020_ConstantLuminance, + // BT2020_NonConstantLuminance, +} + +/// Image data format that can be converted to a wgpu texture. +/// +/// Names follow a similar convention as Facebook's Ocean library +/// See +// TODO(andreas): Right now this combines both color space and pixel format. Consider separating them similar to how we do on user facing APIs. +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug)] +pub enum SourceImageDataFormat { + /// The source format is already in a wgpu compatible format. + /// + /// ⚠️ Only because a format is listed in `wgpu::TextureFormat` doesn't mean we can use it on the currently active backend. + /// TODO(andreas): This is a temporary measure until we cover what rerun covers. + /// We'd really like incoming data to not reason with [`wgpu::TextureFormat`] since it's so hard to know + /// what's appropriate & available for a given device. + WgpuCompatible(wgpu::TextureFormat), + + /// YUV (== `YCbCr`) formats, typically using chroma downsampling. + Yuv { + format: YuvPixelLayout, + primaries: ColorPrimaries, + }, + // + // TODO(#7608): Add rgb (3 channels!) formats. +} + +impl From for SourceImageDataFormat { + fn from(format: wgpu::TextureFormat) -> Self { + Self::WgpuCompatible(format) + } +} + +/// Error that can occur when converting image data to a texture. +#[derive(thiserror::Error, Debug)] +pub enum ImageDataToTextureError { + #[error("Texture {0:?} has zero width or height!")] + ZeroSize(DebugLabel), + + #[error( + "Texture {label:?} was {width}x{height}, larger than the max of {max_texture_dimension_2d}" + )] + TooLarge { + label: DebugLabel, + width: u32, + height: u32, + max_texture_dimension_2d: u32, + }, + + #[error( + "Invalid data length for texture {label:?}. Expected {expected} bytes, got {actual} bytes" + )] + InvalidDataLength { + label: DebugLabel, + expected: usize, + actual: usize, + }, + + #[error(transparent)] + CpuWriteGpuReadError(#[from] crate::allocator::CpuWriteGpuReadError), + + #[error("Texture {label:?} has a format {format:?} that data can't be transferred to!")] + UnsupportedFormatForTransfer { + label: DebugLabel, + format: wgpu::TextureFormat, + }, + + #[error("Gpu-based conversion for texture {label:?} did not succeed: {err}")] + GpuBasedConversionError { label: DebugLabel, err: DrawError }, + + // TODO(andreas): As we stop using `wgpu::TextureFormat` for input, this should become obsolete. + #[error("Unsupported texture format {0:?}")] + UnsupportedTextureFormat(wgpu::TextureFormat), +} + +/// Describes image data for the purpose of creating a 2D texture. +/// +/// Arbitrary (potentially gpu based) conversions may be performed to upload the data to the GPU. +pub struct ImageDataDesc<'a> { + pub label: DebugLabel, + + /// Data for the highest mipmap level. + /// + /// Data is expected to be tightly packed. + /// I.e. it is *not* padded according to wgpu buffer->texture transfer rules, padding will happen on the fly if necessary. + /// TODO(andreas): This should be a kind of factory function/builder instead which gets target memory passed in. + pub data: std::borrow::Cow<'a, [u8]>, + pub format: SourceImageDataFormat, + + /// The size of the resulting output texture / the semantic size of the image data. + /// + /// The distinction is in particular important for planar formats. + /// Which may be represented as a larger texture than the image they represent. + /// With the output always being a ("mainstream" gpu readable) texture format, the output texture's + /// width/height is the semantic width/height of the image data! + pub width_height: [u32; 2], + //generate_mip_maps: bool, // TODO(andreas): generate mipmaps! +} + +impl<'a> ImageDataDesc<'a> { + fn validate(&self, limits: &wgpu::Limits) -> Result<(), ImageDataToTextureError> { + let Self { + label, + data, + format, + width_height, + } = self; + + if width_height[0] == 0 || width_height[1] == 0 { + return Err(ImageDataToTextureError::ZeroSize(label.clone())); + } + + let max_texture_dimension_2d = limits.max_texture_dimension_2d; + if width_height[0] > max_texture_dimension_2d || width_height[1] > max_texture_dimension_2d + { + return Err(ImageDataToTextureError::TooLarge { + label: label.clone(), + width: width_height[0], + height: width_height[1], + max_texture_dimension_2d, + }); + } + + let num_pixels = width_height[0] as usize * width_height[1] as usize; + let expected_num_bytes = match format { + SourceImageDataFormat::WgpuCompatible(format) => { + num_pixels + * format + .block_copy_size(None) + .ok_or(ImageDataToTextureError::UnsupportedTextureFormat(*format))? + as usize + } + SourceImageDataFormat::Yuv { format, .. } => { + format.num_data_buffer_bytes(*width_height) + } + }; + + // TODO(andreas): Nv12 needs height divisible by 2? + if data.len() != expected_num_bytes { + return Err(ImageDataToTextureError::InvalidDataLength { + label: label.clone(), + expected: expected_num_bytes, + actual: data.len(), + }); + } + + Ok(()) + } +} + +/// Takes raw image data and transfers & converts it to a GPU texture. +/// +/// Schedules render passes to convert the data to a samplable textures if needed. +/// +/// Generally, we currently do *not* use sRGB converting formats like [`wgpu::TextureFormat::Rgba8UnormSrgb`] in order to… +/// * have the same shader code path for high precision formats (e.g. an f16 texture that _still_ encodes sRGB data) +/// * handle alpha pre-multiply on the fly (needs to happen before sRGB decode to linear) +/// +/// Implementation note: +/// Since we're targeting WebGL, all data has always to be uploaded into textures (we can't use raw buffers!). +/// Buffer->Texture copies have restrictions on row padding, so any approach where we first +/// allocate gpu readable memory and hand it to the user would make the API a lot more complicated. +pub fn transfer_image_data_to_texture( + ctx: &RenderContext, + image_data: ImageDataDesc<'_>, +) -> Result { + re_tracing::profile_function!(); + + image_data.validate(&ctx.device.limits())?; + + let ImageDataDesc { + label, + data, + format: source_format, + width_height: output_width_height, + } = image_data; + + // Determine size of the texture the image data is uploaded into. + // Reminder: We can't use raw buffers because of WebGL compatibility. + let [data_texture_width, data_texture_height] = match source_format { + SourceImageDataFormat::WgpuCompatible(_) => output_width_height, + SourceImageDataFormat::Yuv { format, .. } => { + format.data_texture_width_height(output_width_height) + } + }; + let data_texture_format = match source_format { + SourceImageDataFormat::WgpuCompatible(format) => format, + SourceImageDataFormat::Yuv { format, .. } => format.data_texture_format(), + }; + + // Allocate gpu belt data and upload it. + let data_texture_label = match source_format { + SourceImageDataFormat::WgpuCompatible(_) => label.clone(), + SourceImageDataFormat::Yuv { .. } => format!("{label}_source_data").into(), + }; + let data_texture = ctx.gpu_resources.textures.alloc( + &ctx.device, + &TextureDesc { + label: data_texture_label, + size: wgpu::Extent3d { + width: data_texture_width, + height: data_texture_height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, // We don't have mipmap level generation yet! + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: data_texture_format, + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, + }, + ); + copy_data_to_texture(ctx, &data_texture, data.as_ref())?; + + // Build a converter task, feeding in the raw data. + let converter_task = match source_format { + SourceImageDataFormat::WgpuCompatible(_) => { + // No further conversion needed, we're done here! + return Ok(data_texture); + } + SourceImageDataFormat::Yuv { format, primaries } => YuvFormatConversionTask::new( + ctx, + format, + primaries, + &data_texture, + &label, + output_width_height, + ), + }; + + // Once there's different gpu based conversions, we should probably trait-ify this so we can keep the basic steps. + // Note that we execute the task right away, but the way things are set up (by means of using the `Renderer` framework) + // it would be fairly easy to schedule this differently! + let output_texture = converter_task + .convert_input_data_to_texture(ctx) + .map_err(|err| ImageDataToTextureError::GpuBasedConversionError { label, err })?; + + Ok(output_texture) +} + +fn copy_data_to_texture( + render_ctx: &RenderContext, + data_texture: &GpuTexture, + data: &[u8], +) -> Result<(), ImageDataToTextureError> { + re_tracing::profile_function!(); + + let buffer_info = + Texture2DBufferInfo::new(data_texture.texture.format(), data_texture.texture.size()); + + let mut cpu_write_gpu_read_belt = render_ctx.cpu_write_gpu_read_belt.lock(); + let mut gpu_read_buffer = cpu_write_gpu_read_belt.allocate::( + &render_ctx.device, + &render_ctx.gpu_resources.buffers, + buffer_info.buffer_size_padded as usize, + )?; + + if buffer_info.buffer_size_padded as usize == data.len() { + re_tracing::profile_scope!("bulk_copy"); + + // Fast path: Just copy the data over as-is. + gpu_read_buffer.extend_from_slice(data)?; + } else { + re_tracing::profile_scope!("row_by_row_copy"); + + // Copy row by row in order to jump over padding bytes. + let bytes_per_row_unpadded = buffer_info.bytes_per_row_unpadded as usize; + let num_padding_bytes_per_row = + buffer_info.bytes_per_row_padded as usize - bytes_per_row_unpadded; + debug_assert!( + num_padding_bytes_per_row > 0, + "No padding bytes, but the unpadded buffer size is not equal to the unpadded buffer." + ); + + for row in 0..data_texture.texture.size().height as usize { + gpu_read_buffer.extend_from_slice( + &data[(row * bytes_per_row_unpadded) + ..(row * bytes_per_row_unpadded + bytes_per_row_unpadded)], + )?; + gpu_read_buffer.add_n(0, num_padding_bytes_per_row)?; + } + } + + let mut before_view_builder_encoder = + render_ctx.active_frame.before_view_builder_encoder.lock(); + gpu_read_buffer + .copy_to_texture2d_entire_first_layer(before_view_builder_encoder.get(), data_texture)?; + + Ok(()) +} diff --git a/crates/viewer/re_renderer/src/resource_managers/mod.rs b/crates/viewer/re_renderer/src/resource_managers/mod.rs index 8bf5aa66ca78..6bbab468f8b6 100644 --- a/crates/viewer/re_renderer/src/resource_managers/mod.rs +++ b/crates/viewer/re_renderer/src/resource_managers/mod.rs @@ -6,8 +6,12 @@ //! This is in contrast to the pools in `crate::wgpu_resources` which are exclusively concerned with //! low level gpu resources and their efficient allocation. +mod image_data_to_texture; mod texture_manager; -pub use texture_manager::{ - GpuTexture2D, Texture2DCreationDesc, TextureCreationError, TextureManager2D, - TextureManager2DError, +mod yuv_converter; + +pub use image_data_to_texture::{ + ColorPrimaries, ImageDataDesc, ImageDataToTextureError, SourceImageDataFormat, }; +pub use texture_manager::{GpuTexture2D, TextureManager2D, TextureManager2DError}; +pub use yuv_converter::YuvPixelLayout; diff --git a/crates/viewer/re_renderer/src/resource_managers/texture_manager.rs b/crates/viewer/re_renderer/src/resource_managers/texture_manager.rs index a807c8caf834..0ed8b37eebaf 100644 --- a/crates/viewer/re_renderer/src/resource_managers/texture_manager.rs +++ b/crates/viewer/re_renderer/src/resource_managers/texture_manager.rs @@ -1,13 +1,14 @@ -use std::sync::Arc; - use ahash::{HashMap, HashSet}; use parking_lot::Mutex; use crate::{ + resource_managers::ImageDataDesc, wgpu_resources::{GpuTexture, GpuTexturePool, TextureDesc}, - DebugLabel, + RenderContext, }; +use super::{image_data_to_texture::transfer_image_data_to_texture, ImageDataToTextureError}; + /// Handle to a 2D resource. /// /// Currently, this is solely a more strongly typed regular gpu texture handle. @@ -82,73 +83,21 @@ impl std::borrow::Borrow for GpuTexture2D { } } -/// Data required to create a texture 2D resource. -/// -/// It is *not* stored along side the resulting texture resource! -pub struct Texture2DCreationDesc<'a> { - pub label: DebugLabel, - - /// Data for the highest mipmap level. - /// - /// Data is expected to be tightly packed. - /// I.e. it is *not* padded according to wgpu buffer->texture transfer rules, padding will happen on the fly if necessary. - /// TODO(andreas): This should be a kind of factory function/builder instead which gets target memory passed in. - pub data: std::borrow::Cow<'a, [u8]>, - pub format: wgpu::TextureFormat, - pub width: u32, - pub height: u32, - //generate_mip_maps: bool, // TODO(andreas): generate mipmaps! -} - -// TODO(andreas): Move this to texture pool. -#[derive(thiserror::Error, Debug)] -pub enum TextureCreationError { - #[error("Texture with debug label {0:?} has zero width or height!")] - ZeroSize(DebugLabel), - - #[error("Texture was {width}x{height}, larger than the max of {max_texture_dimension_2d}")] - TooLarge { - width: u32, - height: u32, - max_texture_dimension_2d: u32, - }, - - #[error( - "Wrong number of bytes in the texture data buffer - expected {width}x{height}x{bytes_per_texel}={expected_bytes}, got {actual_bytes}" - )] - WrongBufferSize { - width: u32, - height: u32, - bytes_per_texel: u32, - expected_bytes: usize, // product of the avbove - - actual_bytes: usize, - }, - - #[error( - "Texture with debug label {label:?} has a format {format:?} that data can't be transferred to!" - )] - UnsupportedFormatForTransfer { - label: DebugLabel, - format: wgpu::TextureFormat, - }, -} - #[derive(thiserror::Error, Debug)] pub enum TextureManager2DError { - /// Something went wrong when creating the GPU texture. + /// Something went wrong when creating the GPU texture & uploading/converting the image data. #[error(transparent)] - TextureCreation(#[from] TextureCreationError), + ImageDataToTextureError(#[from] ImageDataToTextureError), /// Something went wrong in a user-callback. #[error(transparent)] DataCreation(DataCreationError), } -impl From> for TextureCreationError { +impl From> for ImageDataToTextureError { fn from(err: TextureManager2DError) -> Self { match err { - TextureManager2DError::TextureCreation(texture_creation) => texture_creation, + TextureManager2DError::ImageDataToTextureError(texture_creation) => texture_creation, TextureManager2DError::DataCreation(never) => match never {}, } } @@ -166,14 +115,9 @@ impl From> for TextureCreationError { pub struct TextureManager2D { white_texture_unorm: GpuTexture2D, zeroed_texture_float: GpuTexture2D, - zeroed_texture_depth: GpuTexture2D, zeroed_texture_sint: GpuTexture2D, zeroed_texture_uint: GpuTexture2D, - // For convenience to reduce amount of times we need to pass them around - device: Arc, - queue: Arc, - /// The mutable part of the manager. inner: Mutex, } @@ -201,54 +145,74 @@ impl Inner { impl TextureManager2D { pub(crate) fn new( - device: Arc, - queue: Arc, + device: &wgpu::Device, + queue: &wgpu::Queue, texture_pool: &GpuTexturePool, ) -> Self { re_tracing::profile_function!(); - let white_texture_unorm = Self::create_and_upload_texture( - &device, - &queue, - texture_pool, - &Texture2DCreationDesc { + // Create the single pixel white texture ad hoc - at this point during initialization we don't have + // the render context yet and thus can't use the higher level `transfer_image_data_to_texture` function. + let white_texture_unorm = GpuTexture2D(texture_pool.alloc( + device, + &TextureDesc { label: "white pixel - unorm".into(), - data: vec![255, 255, 255, 255].into(), format: wgpu::TextureFormat::Rgba8Unorm, + size: wgpu::Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, + }, + )); + queue.write_texture( + wgpu::ImageCopyTexture { + texture: &white_texture_unorm.texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + &[255, 255, 255, 255], + wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(4), + rows_per_image: None, + }, + wgpu::Extent3d { width: 1, height: 1, + depth_or_array_layers: 1, }, - ) - .expect("Failed to create white pixel texture!"); + ); let zeroed_texture_float = - create_zero_texture(texture_pool, &device, wgpu::TextureFormat::Rgba8Unorm); - let zeroed_texture_depth = - create_zero_texture(texture_pool, &device, wgpu::TextureFormat::Depth16Unorm); + create_zero_texture(texture_pool, device, wgpu::TextureFormat::Rgba8Unorm); let zeroed_texture_sint = - create_zero_texture(texture_pool, &device, wgpu::TextureFormat::Rgba8Sint); + create_zero_texture(texture_pool, device, wgpu::TextureFormat::Rgba8Sint); let zeroed_texture_uint = - create_zero_texture(texture_pool, &device, wgpu::TextureFormat::Rgba8Uint); + create_zero_texture(texture_pool, device, wgpu::TextureFormat::Rgba8Uint); Self { white_texture_unorm, zeroed_texture_float, - zeroed_texture_depth, zeroed_texture_sint, zeroed_texture_uint, - device, - queue, inner: Default::default(), } } /// Creates a new 2D texture resource and schedules data upload to the GPU. /// TODO(jleibs): All usages of this should be replaced with `get_or_create`, which is strictly preferable + #[allow(clippy::unused_self)] pub fn create( &self, - texture_pool: &GpuTexturePool, - creation_desc: &Texture2DCreationDesc<'_>, - ) -> Result { + render_ctx: &RenderContext, + creation_desc: ImageDataDesc<'_>, + ) -> Result { // TODO(andreas): Disabled the warning as we're moving towards using this texture manager for user-logged images. // However, it's still very much a concern especially once we add mipmapping. Something we need to keep in mind. // @@ -265,7 +229,10 @@ impl TextureManager2D { // Currently we don't store any data in the texture manager. // In the future we might handle (lazy?) mipmap generation in here or keep track of lazy upload processing. - Self::create_and_upload_texture(&self.device, &self.queue, texture_pool, creation_desc) + Ok(GpuTexture2D(transfer_image_data_to_texture( + render_ctx, + creation_desc, + )?)) } /// Creates a new 2D texture resource and schedules data upload to the GPU if a texture @@ -273,10 +240,10 @@ impl TextureManager2D { pub fn get_or_create( &self, key: u64, - texture_pool: &GpuTexturePool, - texture_desc: Texture2DCreationDesc<'_>, - ) -> Result { - self.get_or_create_with(key, texture_pool, || texture_desc) + render_ctx: &RenderContext, + texture_desc: ImageDataDesc<'_>, + ) -> Result { + self.get_or_create_with(key, render_ctx, || texture_desc) } /// Creates a new 2D texture resource and schedules data upload to the GPU if a texture @@ -284,10 +251,10 @@ impl TextureManager2D { pub fn get_or_create_with<'a>( &self, key: u64, - texture_pool: &GpuTexturePool, - create_texture_desc: impl FnOnce() -> Texture2DCreationDesc<'a>, - ) -> Result { - self.get_or_try_create_with(key, texture_pool, || -> Result<_, never::Never> { + render_ctx: &RenderContext, + create_texture_desc: impl FnOnce() -> ImageDataDesc<'a>, + ) -> Result { + self.get_or_try_create_with(key, render_ctx, || -> Result<_, never::Never> { Ok(create_texture_desc()) }) .map_err(|err| err.into()) @@ -298,8 +265,8 @@ impl TextureManager2D { pub fn get_or_try_create_with<'a, Err: std::fmt::Display>( &self, key: u64, - texture_pool: &GpuTexturePool, - try_create_texture_desc: impl FnOnce() -> Result, Err>, + render_ctx: &RenderContext, + try_create_texture_desc: impl FnOnce() -> Result, Err>, ) -> Result> { let mut inner = self.inner.lock(); let texture_handle = match inner.texture_cache.entry(key) { @@ -310,12 +277,10 @@ impl TextureManager2D { // Run potentially expensive texture creation code: let tex_creation_desc = try_create_texture_desc() .map_err(|err| TextureManager2DError::DataCreation(err))?; - let texture = Self::create_and_upload_texture( - &self.device, - &self.queue, - texture_pool, - &tex_creation_desc, - )?; + let texture = GpuTexture2D(transfer_image_data_to_texture( + render_ctx, + tex_creation_desc, + )?); entry.insert(texture).clone() } }; @@ -339,11 +304,6 @@ impl TextureManager2D { &self.zeroed_texture_float.0 } - /// Returns a single zero pixel with format [`wgpu::TextureFormat::Depth16Unorm`]. - pub fn zeroed_texture_depth(&self) -> &GpuTexture { - &self.zeroed_texture_depth.0 - } - /// Returns a single zero pixel with format [`wgpu::TextureFormat::Rgba8Sint`]. pub fn zeroed_texture_sint(&self) -> &GpuTexture { &self.zeroed_texture_sint.0 @@ -354,111 +314,6 @@ impl TextureManager2D { &self.zeroed_texture_uint.0 } - fn create_and_upload_texture( - device: &wgpu::Device, - queue: &wgpu::Queue, - texture_pool: &GpuTexturePool, - creation_desc: &Texture2DCreationDesc<'_>, - ) -> Result { - re_tracing::profile_function!(); - - let Texture2DCreationDesc { - label, - data, - format, - width, - height, - } = creation_desc; - let (width, height, format) = (*width, *height, *format); - - if width == 0 || height == 0 { - return Err(TextureCreationError::ZeroSize(label.clone())); - } - - let max_texture_dimension_2d = device.limits().max_texture_dimension_2d; - if width > max_texture_dimension_2d || height > max_texture_dimension_2d { - return Err(TextureCreationError::TooLarge { - width, - height, - max_texture_dimension_2d, - }); - } - - if !format.is_compressed() { - if let Some(bytes_per_texel) = creation_desc - .format - .block_copy_size(Some(wgpu::TextureAspect::All)) - { - let expected_bytes = width as usize * height as usize * bytes_per_texel as usize; - - if data.len() != expected_bytes { - return Err(TextureCreationError::WrongBufferSize { - width, - height, - bytes_per_texel, - expected_bytes, - - actual_bytes: data.len(), - }); - } - } - } - - let size = wgpu::Extent3d { - width, - height, - depth_or_array_layers: 1, - }; - let texture = texture_pool.alloc( - device, - &TextureDesc { - label: label.clone(), - size, - mip_level_count: 1, // TODO(andreas) - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format, - usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, - }, - ); - - let width_blocks = width / format.block_dimensions().0; - let block_size = creation_desc - .format - .block_copy_size(Some(wgpu::TextureAspect::All)) - .ok_or_else(|| TextureCreationError::UnsupportedFormatForTransfer { - label: label.clone(), - format, - })?; - let bytes_per_row_unaligned = width_blocks * block_size; - - // TODO(andreas): Once we have our own temp buffer for uploading, we can do the padding inplace - // I.e. the only difference will be if we do one memcopy or one memcopy per row, making row padding a nuisance! - let data: &[u8] = data.as_ref(); - - // TODO(andreas): temp allocator for staging data? - re_tracing::profile_scope!("write_texture"); - queue.write_texture( - wgpu::ImageCopyTexture { - texture: &texture.texture, - mip_level: 0, - origin: wgpu::Origin3d::ZERO, - aspect: wgpu::TextureAspect::All, - }, - data, - wgpu::ImageDataLayout { - offset: 0, - bytes_per_row: Some(bytes_per_row_unaligned), - rows_per_image: None, - }, - size, - ); - - // TODO(andreas): mipmap generation - - Ok(GpuTexture2D(texture)) - } - pub(crate) fn begin_frame(&self, _frame_index: u64) { self.inner.lock().begin_frame(_frame_index); } @@ -466,7 +321,7 @@ impl TextureManager2D { fn create_zero_texture( texture_pool: &GpuTexturePool, - device: &Arc, + device: &wgpu::Device, format: wgpu::TextureFormat, ) -> GpuTexture2D { // Wgpu zeros out new textures automatically diff --git a/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs b/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs new file mode 100644 index 000000000000..d42854e8801f --- /dev/null +++ b/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs @@ -0,0 +1,333 @@ +use smallvec::smallvec; + +use crate::{ + allocator::create_and_fill_uniform_buffer, + include_shader_module, + renderer::{screen_triangle_vertex_shader, DrawData, DrawError, Renderer}, + wgpu_resources::{ + BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle, + GpuRenderPipelineHandle, GpuTexture, PipelineLayoutDesc, RenderPipelineDesc, TextureDesc, + }, + DebugLabel, RenderContext, +}; + +use super::ColorPrimaries; + +/// Supported chroma subsampling input formats. +/// +/// Keep indices in sync with `yuv_converter.wgsl` +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug)] +pub enum YuvPixelLayout { + /// 4:2:0 subsampling with a separate Y plane, followed by a UV plane. + /// + /// Expects single channel texture format. + /// + /// First comes entire image in Y in one plane, + /// followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc. + /// + /// width + /// __________ + /// | | + /// height | Y | + /// | | + /// |_________| + /// height/2 | U,V,U,… | + /// |_________| + Y_UV12 = 0, + + /// YUV 4:2:2 subsampling, single plane. + /// + /// Expects single channel texture format. + /// + /// The order of the channels is Y0, U0, Y1, V0, all in the same plane. + /// + /// width * 2 + /// __________________ + /// | | + /// height | Y0, U0, Y1, V0… | + /// |_________________| + /// + YUYV16 = 1, +} + +impl YuvPixelLayout { + /// Given the dimensions of the output picture, what are the expected dimensions of the input data texture. + pub fn data_texture_width_height(&self, [decoded_width, decoded_height]: [u32; 2]) -> [u32; 2] { + match self { + Self::Y_UV12 => [decoded_width, decoded_height + decoded_height / 2], + Self::YUYV16 => [decoded_width * 2, decoded_height], + } + } + + /// What format the input data texture is expected to be in. + pub fn data_texture_format(&self) -> wgpu::TextureFormat { + // TODO(andreas): How to deal with higher precision formats here? + // + // Our shader currently works with 8 bit integer formats here since while + // _technically_ YUV formats have nothing to do with concrete bit depth, + // practically there's underlying expectation for 8 bits per channel + // as long as the data is Bt.709 or Bt.601. + // In other words: The conversions implementations we have today expect 0-255 as the value range. + + #[allow(clippy::match_same_arms)] + match self { + Self::Y_UV12 => wgpu::TextureFormat::R8Uint, + // TODO(andreas): Why not use [`wgpu::TextureFormat::Rg8Uint`] here? + Self::YUYV16 => wgpu::TextureFormat::R8Uint, + } + } + + /// Size of the buffer needed to create the data texture, i.e. the raw input data. + pub fn num_data_buffer_bytes(&self, decoded_width: [u32; 2]) -> usize { + let num_pixels = decoded_width[0] as usize * decoded_width[1] as usize; + match self { + Self::Y_UV12 => 12 * num_pixels / 8, + Self::YUYV16 => 16 * num_pixels / 8, + } + } +} + +mod gpu_data { + use crate::wgpu_buffer_types; + + #[repr(C)] + #[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)] + pub struct UniformBuffer { + /// Uses [`super::YuvPixelLayout`]. + pub pixel_layout: u32, + + /// Uses [`super::ColorPrimaries`]. + pub primaries: u32, + + pub target_texture_size: [u32; 2], + + pub _end_padding: [wgpu_buffer_types::PaddingRow; 16 - 1], + } +} + +/// A work item for the subsampling converter. +pub struct YuvFormatConversionTask { + bind_group: GpuBindGroup, + target_texture: GpuTexture, +} + +impl DrawData for YuvFormatConversionTask { + type Renderer = YuvFormatConverter; +} + +impl YuvFormatConversionTask { + /// sRGB encoded 8 bit texture. + /// + /// Not using [`wgpu::TextureFormat::Rgba8UnormSrgb`] since consumers typically consume this + /// texture with software EOTF ("to linear") for more flexibility. + pub const OUTPUT_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm; + + /// Creates a new conversion task that can be used with [`YuvFormatConverter`]. + /// + /// Does *not* validate that the input data has the expected format, + /// see methods of [`YuvPixelLayout`] for details. + pub fn new( + ctx: &RenderContext, + format: YuvPixelLayout, + primaries: ColorPrimaries, + input_data: &GpuTexture, + output_label: &DebugLabel, + output_width_height: [u32; 2], + ) -> Self { + let target_texture = ctx.gpu_resources.textures.alloc( + &ctx.device, + &TextureDesc { + label: output_label.clone(), + size: wgpu::Extent3d { + width: output_width_height[0], + height: output_width_height[1], + depth_or_array_layers: 1, + }, + mip_level_count: 1, // We don't have mipmap level generation yet! + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: Self::OUTPUT_FORMAT, + usage: wgpu::TextureUsages::TEXTURE_BINDING + | wgpu::TextureUsages::COPY_DST + | wgpu::TextureUsages::RENDER_ATTACHMENT, + }, + ); + + let renderer = ctx.renderer::(); + + let uniform_buffer = create_and_fill_uniform_buffer( + ctx, + format!("{output_label}_conversion").into(), + gpu_data::UniformBuffer { + pixel_layout: format as _, + primaries: primaries as _, + target_texture_size: output_width_height, + + _end_padding: Default::default(), + }, + ); + + let bind_group = ctx.gpu_resources.bind_groups.alloc( + &ctx.device, + &ctx.gpu_resources, + &BindGroupDesc { + label: "RectangleInstance::bind_group".into(), + entries: smallvec![ + uniform_buffer, + BindGroupEntry::DefaultTextureView(input_data.handle), + ], + layout: renderer.bind_group_layout, + }, + ); + + Self { + bind_group, + target_texture, + } + } + + /// Runs the conversion from the input texture data. + pub fn convert_input_data_to_texture( + self, + ctx: &RenderContext, + ) -> Result { + // TODO(andreas): Does this have to be on the global view encoder? + // If this ever becomes a problem we could easily schedule this to another encoder as long as + // we guarantee that the conversion is enqueued before the resulting texture is used. + // Given that we already have this neatly encapsulated work package this would be quite easy to do! + let mut encoder = ctx.active_frame.before_view_builder_encoder.lock(); + let mut pass = encoder + .get() + .begin_render_pass(&wgpu::RenderPassDescriptor { + label: self.target_texture.creation_desc.label.get(), + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + view: &self.target_texture.default_view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::BLACK), + store: wgpu::StoreOp::Store, + }, + })], + ..Default::default() + }); + + ctx.renderer::().draw( + &ctx.gpu_resources.render_pipelines.resources(), + crate::draw_phases::DrawPhase::Opaque, // Don't care about the phase. + &mut pass, + &self, + )?; + + Ok(self.target_texture) + } +} + +/// Converter for chroma subsampling formats. +/// +/// Takes chroma subsampled data and draws to a fullscreen sRGB output texture. +/// Implemented as a [`Renderer`] in order to make use of the existing mechanisms for storing renderer data. +/// (we need some place to lazily create the render pipeline, store a handle to it and encapsulate the draw logic!) +pub struct YuvFormatConverter { + render_pipeline: GpuRenderPipelineHandle, + bind_group_layout: GpuBindGroupLayoutHandle, +} + +impl Renderer for YuvFormatConverter { + type RendererDrawData = YuvFormatConversionTask; + + fn create_renderer(ctx: &RenderContext) -> Self { + let vertex_handle = screen_triangle_vertex_shader(ctx); + + let bind_group_layout = ctx.gpu_resources.bind_group_layouts.get_or_create( + &ctx.device, + &BindGroupLayoutDesc { + label: "YuvFormatConverter".into(), + entries: vec![ + // Uniform buffer with some information. + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: (std::mem::size_of::() + as u64) + .try_into() + .ok(), + }, + count: None, + }, + // Input data texture. + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Uint, + }, + count: None, + }, + ], + }, + ); + + let pipeline_layout = ctx.gpu_resources.pipeline_layouts.get_or_create( + ctx, + &PipelineLayoutDesc { + label: "YuvFormatConverter".into(), + // Note that this is a fairly unusual layout for us with the first entry + // not being the globally set bind group! + entries: vec![bind_group_layout], + }, + ); + + let shader_modules = &ctx.gpu_resources.shader_modules; + let render_pipeline = ctx.gpu_resources.render_pipelines.get_or_create( + ctx, + &RenderPipelineDesc { + label: "TestTriangle::render_pipeline".into(), + pipeline_layout, + vertex_entrypoint: "main".into(), + vertex_handle, + fragment_entrypoint: "fs_main".into(), + fragment_handle: shader_modules.get_or_create( + ctx, + &include_shader_module!("../../shader/conversions/yuv_converter.wgsl"), + ), + vertex_buffers: smallvec![], + render_targets: smallvec![Some(YuvFormatConversionTask::OUTPUT_FORMAT.into())], + primitive: wgpu::PrimitiveState::default(), + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + }, + ); + + Self { + render_pipeline, + bind_group_layout, + } + } + + fn draw( + &self, + render_pipelines: &crate::wgpu_resources::GpuRenderPipelinePoolAccessor<'_>, + _phase: crate::draw_phases::DrawPhase, + pass: &mut wgpu::RenderPass<'_>, + draw_data: &Self::RendererDrawData, + ) -> Result<(), DrawError> { + let pipeline = render_pipelines.get(self.render_pipeline)?; + + pass.set_pipeline(pipeline); + pass.set_bind_group(0, &draw_data.bind_group, &[]); + pass.draw(0..3, 0..1); + + Ok(()) + } + + fn participated_phases() -> &'static [crate::draw_phases::DrawPhase] { + // Doesn't participate in regular rendering. + &[] + } +} diff --git a/crates/viewer/re_renderer/src/workspace_shaders.rs b/crates/viewer/re_renderer/src/workspace_shaders.rs index 845b90623493..e34f8c715875 100644 --- a/crates/viewer/re_renderer/src/workspace_shaders.rs +++ b/crates/viewer/re_renderer/src/workspace_shaders.rs @@ -26,20 +26,20 @@ pub fn init() { } { - let virtpath = Path::new("shader/copy_texture.wgsl"); - let content = include_str!("../shader/copy_texture.wgsl").into(); + let virtpath = Path::new("shader/conversions/yuv_converter.wgsl"); + let content = include_str!("../shader/conversions/yuv_converter.wgsl").into(); fs.create_file(virtpath, content).unwrap(); } { - let virtpath = Path::new("shader/debug_overlay.wgsl"); - let content = include_str!("../shader/debug_overlay.wgsl").into(); + let virtpath = Path::new("shader/copy_texture.wgsl"); + let content = include_str!("../shader/copy_texture.wgsl").into(); fs.create_file(virtpath, content).unwrap(); } { - let virtpath = Path::new("shader/decodings.wgsl"); - let content = include_str!("../shader/decodings.wgsl").into(); + let virtpath = Path::new("shader/debug_overlay.wgsl"); + let content = include_str!("../shader/debug_overlay.wgsl").into(); fs.create_file(virtpath, content).unwrap(); } diff --git a/crates/viewer/re_renderer_examples/2d.rs b/crates/viewer/re_renderer_examples/2d.rs index ecd62061f7d7..72f25abef82e 100644 --- a/crates/viewer/re_renderer_examples/2d.rs +++ b/crates/viewer/re_renderer_examples/2d.rs @@ -13,7 +13,7 @@ use re_renderer::{ ColormappedTexture, LineStripFlags, RectangleDrawData, RectangleOptions, TextureFilterMag, TextureFilterMin, TexturedRect, }, - resource_managers::{GpuTexture2D, Texture2DCreationDesc}, + resource_managers::{GpuTexture2D, ImageDataDesc}, view_builder::{self, Projection, TargetConfiguration, ViewBuilder}, Color32, LineDrawableBuilder, PointCloudBuilder, Size, }; @@ -40,13 +40,12 @@ impl framework::Example for Render2D { let rerun_logo_texture = re_ctx .texture_manager_2d .create( - &re_ctx.gpu_resources.textures, - &Texture2DCreationDesc { + re_ctx, + ImageDataDesc { label: "rerun logo".into(), data: image_data.into(), - format: wgpu::TextureFormat::Rgba8UnormSrgb, - width: rerun_logo.width(), - height: rerun_logo.height(), + format: wgpu::TextureFormat::Rgba8UnormSrgb.into(), + width_height: [rerun_logo.width(), rerun_logo.height()], }, ) .expect("Failed to create texture for rerun logo"); diff --git a/crates/viewer/re_renderer_examples/depth_cloud.rs b/crates/viewer/re_renderer_examples/depth_cloud.rs index a6bf6af6183e..4c0d2efcc405 100644 --- a/crates/viewer/re_renderer_examples/depth_cloud.rs +++ b/crates/viewer/re_renderer_examples/depth_cloud.rs @@ -26,7 +26,7 @@ use re_renderer::{ ColormappedTexture, DepthCloud, DepthCloudDrawData, DepthClouds, DrawData, GenericSkyboxDrawData, RectangleDrawData, RectangleOptions, TexturedRect, }, - resource_managers::{GpuTexture2D, Texture2DCreationDesc}, + resource_managers::{GpuTexture2D, ImageDataDesc}, view_builder::{self, Projection, ViewBuilder}, Color32, LineDrawableBuilder, PointCloudBuilder, Rgba, Size, }; @@ -417,13 +417,12 @@ impl DepthTexture { .texture_manager_2d .get_or_create( hash(&label), - &re_ctx.gpu_resources.textures, - Texture2DCreationDesc { + re_ctx, + ImageDataDesc { label: label.into(), data: bytemuck::cast_slice(&data).into(), - format: wgpu::TextureFormat::R32Float, - width: dimensions.x, - height: dimensions.y, + format: wgpu::TextureFormat::R32Float.into(), + width_height: dimensions.to_array(), }, ) .expect("Failed to create depth texture."); @@ -460,13 +459,12 @@ impl AlbedoTexture { .texture_manager_2d .get_or_create( hash(&label), - &re_ctx.gpu_resources.textures, - Texture2DCreationDesc { + re_ctx, + ImageDataDesc { label: label.into(), data: bytemuck::cast_slice(&rgba8).into(), - format: wgpu::TextureFormat::Rgba8UnormSrgb, - width: dimensions.x, - height: dimensions.y, + format: wgpu::TextureFormat::Rgba8UnormSrgb.into(), + width_height: dimensions.to_array(), }, ) .expect("Failed to create albedo texture."); diff --git a/crates/viewer/re_space_view_spatial/src/picking_ui_pixel.rs b/crates/viewer/re_space_view_spatial/src/picking_ui_pixel.rs index 09c0bca4b110..4dbcf14fc4d6 100644 --- a/crates/viewer/re_space_view_spatial/src/picking_ui_pixel.rs +++ b/crates/viewer/re_space_view_spatial/src/picking_ui_pixel.rs @@ -195,7 +195,7 @@ fn try_show_zoomed_image_region( interaction_id: &TextureInteractionId<'_>, center_texel: [isize; 2], ) -> anyhow::Result<()> { - let [width, height] = colormapped_texture.texture.width_height(); + let [width, height] = colormapped_texture.width_height(); const POINTS_PER_TEXEL: f32 = 5.0; let size = egui::Vec2::splat(((ZOOMED_IMAGE_TEXEL_RADIUS * 2 + 1) as f32) * POINTS_PER_TEXEL); diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/videos.rs b/crates/viewer/re_space_view_spatial/src/visualizers/videos.rs index f508ab6c258d..b7ce6e25c9b8 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/videos.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/videos.rs @@ -6,7 +6,7 @@ use re_renderer::{ renderer::{ ColormappedTexture, RectangleOptions, TextureFilterMag, TextureFilterMin, TexturedRect, }, - resource_managers::Texture2DCreationDesc, + resource_managers::ImageDataDesc, video::{Video, VideoFrameTexture}, }; use re_types::{ @@ -293,7 +293,7 @@ impl VideoFrameReferenceVisualizer { .texture_manager_2d .get_or_try_create_with::( Hash64::hash("video_error").hash64(), - &render_ctx.gpu_resources.textures, + render_ctx, || { let mut reader = image::io::Reader::new(std::io::Cursor::new( re_ui::icons::VIDEO_ERROR.png_bytes, @@ -301,12 +301,11 @@ impl VideoFrameReferenceVisualizer { reader.set_format(image::ImageFormat::Png); let dynamic_image = reader.decode()?; - Ok(Texture2DCreationDesc { + Ok(ImageDataDesc { label: "video_error".into(), data: std::borrow::Cow::Owned(dynamic_image.to_rgba8().to_vec()), - format: re_renderer::external::wgpu::TextureFormat::Rgba8UnormSrgb, - width: dynamic_image.width(), - height: dynamic_image.height(), + format: re_renderer::external::wgpu::TextureFormat::Rgba8UnormSrgb.into(), + width_height: [dynamic_image.width(), dynamic_image.height()], }) }, ); diff --git a/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs b/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs index 4855479c7ea1..861c2d022e1e 100644 --- a/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs +++ b/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs @@ -1,7 +1,7 @@ use re_chunk_store::RowId; use re_renderer::{ renderer::ColormappedTexture, - resource_managers::{GpuTexture2D, Texture2DCreationDesc, TextureManager2DError}, + resource_managers::{GpuTexture2D, ImageDataDesc, TextureManager2DError}, }; use re_types::{ blueprint::archetypes::TensorSliceSelection, @@ -67,7 +67,7 @@ fn upload_texture_slice_to_gpu( fn texture_desc_from_tensor( tensor: &TensorData, slice_selection: &TensorSliceSelection, -) -> Result, TensorUploadError> { +) -> Result, TensorUploadError> { use wgpu::TextureFormat; re_tracing::profile_function!(); @@ -142,7 +142,7 @@ fn to_texture_desc( slice_selection: &TensorSliceSelection, format: wgpu::TextureFormat, caster: impl Fn(From) -> To, -) -> Result, TensorUploadError> { +) -> Result, TensorUploadError> { re_tracing::profile_function!(); use ndarray::Dimension as _; @@ -167,11 +167,10 @@ fn to_texture_desc( } re_tracing::profile_scope!("pod_collect_to_vec"); - Ok(Texture2DCreationDesc { + Ok(ImageDataDesc { label: "tensor_slice".into(), data: bytemuck::pod_collect_to_vec(&pixels).into(), - format, - width: width as u32, - height: height as u32, + format: format.into(), + width_height: [width as u32, height as u32], }) } diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs index 8aec0aa2c54b..fa7d05f64639 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs @@ -34,12 +34,11 @@ fn colormap_preview_ui( }) .collect(); - re_renderer::resource_managers::Texture2DCreationDesc { + re_renderer::resource_managers::ImageDataDesc { label: "horizontal_gradient".into(), data: data.into(), - format: wgpu::TextureFormat::R16Float, - width, - height, + format: wgpu::TextureFormat::R16Float.into(), + width_height: [width, height], } }) .map_err(|err| anyhow::anyhow!("Failed to create horizontal gradient texture: {err}"))?; diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs index 1f3feb6e4d6c..e093a3edd121 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs @@ -10,7 +10,7 @@ use re_renderer::{ config::DeviceCaps, pad_rgb_to_rgba, renderer::{ColorMapper, ColormappedTexture, ShaderDecoding}, - resource_managers::Texture2DCreationDesc, + resource_managers::{ColorPrimaries, ImageDataDesc, SourceImageDataFormat, YuvPixelLayout}, RenderContext, }; use re_types::components::ClassId; @@ -112,7 +112,6 @@ fn color_image_to_gpu( emath::Rangef::new(-1.0, 1.0) } else if let Some(shader_decoding) = shader_decoding { match shader_decoding { - ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => emath::Rangef::new(0.0, 1.0), ShaderDecoding::Bgr => image_data_range_heuristic(image_stats, &image_format), } } else { @@ -121,10 +120,8 @@ fn color_image_to_gpu( let color_mapper = if let Some(shader_decoding) = shader_decoding { match shader_decoding { - // We only have 1D color maps, therefore chroma downsampled and BGR formats can't have color maps. - ShaderDecoding::Bgr | ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => { - ColorMapper::OffRGB - } + // We only have 1D color maps, therefore BGR formats can't have color maps. + ShaderDecoding::Bgr => ColorMapper::OffRGB, } } else if texture_format.components() == 1 { // TODO(andreas): support colormap property @@ -193,6 +190,7 @@ pub fn image_data_range_heuristic(image_stats: &ImageStats, image_format: &Image fn image_decode_srgb_gamma_heuristic(image_stats: &ImageStats, image_format: ImageFormat) -> bool { if let Some(pixel_format) = image_format.pixel_format { match pixel_format { + // Have to do the conversion because we don't use an `Srgb` texture format. PixelFormat::NV12 | PixelFormat::YUY2 => true, } } else { @@ -219,80 +217,71 @@ pub fn required_shader_decode( image_format: &ImageFormat, ) -> Option { let color_model = image_format.color_model(); - match image_format.pixel_format { - Some(PixelFormat::NV12) => Some(ShaderDecoding::Nv12), - Some(PixelFormat::YUY2) => Some(ShaderDecoding::Yuy2), - None => { - if color_model == ColorModel::BGR || color_model == ColorModel::BGRA { - // U8 can be converted to RGBA without the shader's help since there's a format for it. - if image_format.datatype() == ChannelDatatype::U8 - && device_caps.support_bgra_textures() - { - None - } else { - Some(ShaderDecoding::Bgr) - } - } else { - None - } + + if image_format.pixel_format.is_none() && color_model == ColorModel::BGR + || color_model == ColorModel::BGRA + { + // U8 can be converted to RGBA without the shader's help since there's a format for it. + if image_format.datatype() == ChannelDatatype::U8 && device_caps.support_bgra_textures() { + None + } else { + Some(ShaderDecoding::Bgr) } + } else { + None } } -/// Creates a [`Texture2DCreationDesc`] for creating a texture from an [`ImageInfo`]. +/// Creates a [`ImageDataDesc`] for creating a texture from an [`ImageInfo`]. /// /// The resulting texture has requirements as describe by [`required_shader_decode`]. /// -/// TODO(andreas): The consumer needs to be aware of bgr and chroma downsampling conversions. -/// It would be much better if we had a separate `re_renderer`/gpu driven conversion pipeline for this -/// which would allow us to virtually extend over wgpu's texture formats. -/// This would allow us to seamlessly support e.g. NV12 on meshes without the mesh shader having to be updated. +/// TODO(andreas): The consumer needs to be aware of bgr conversions. Other conversions are already taken care of upon upload. pub fn texture_creation_desc_from_color_image<'a>( device_caps: &DeviceCaps, image: &'a ImageInfo, debug_name: &'a str, -) -> Texture2DCreationDesc<'a> { +) -> ImageDataDesc<'a> { re_tracing::profile_function!(); - if let Some(pixel_format) = image.format.pixel_format { - match pixel_format { - PixelFormat::NV12 => { - // Decoded in the shader, see [`required_shader_decode`]. - return Texture2DCreationDesc { - label: debug_name.into(), - data: cast_slice_to_cow(image.buffer.as_slice()), - format: TextureFormat::R8Uint, - width: image.width(), - height: image.height() + image.height() / 2, // ! - }; - } + // TODO(#7608): All image data ingestion conversions should all be handled by re_renderer! - PixelFormat::YUY2 => { - // Decoded in the shader, see [`required_shader_decode`]. - return Texture2DCreationDesc { - label: debug_name.into(), - data: cast_slice_to_cow(image.buffer.as_slice()), - format: TextureFormat::R8Uint, - width: 2 * image.width(), // ! - height: image.height(), - }; - } + let (data, format) = if let Some(pixel_format) = image.format.pixel_format { + match pixel_format { + // Using Bt.601 here for historical reasons. + // TODO(andreas): Expose color primaries. It's probably still the better default (for instance that's what jpeg still uses), + // but should confirm & back that up! + PixelFormat::NV12 => ( + cast_slice_to_cow(image.buffer.as_slice()), + SourceImageDataFormat::Yuv { + format: YuvPixelLayout::Y_UV12, + primaries: ColorPrimaries::Bt601, + }, + ), + PixelFormat::YUY2 => ( + cast_slice_to_cow(image.buffer.as_slice()), + SourceImageDataFormat::Yuv { + format: YuvPixelLayout::YUYV16, + primaries: ColorPrimaries::Bt601, + }, + ), } } else { let color_model = image.format.color_model(); let datatype = image.format.datatype(); - let (data, format) = match (color_model, datatype) { + match (color_model, datatype) { // sRGB(A) handling is done by `ColormappedTexture`. // Why not use `Rgba8UnormSrgb`? Because premul must happen _before_ sRGB decode, so we can't // use a "Srgb-aware" texture like `Rgba8UnormSrgb` for RGBA. (ColorModel::RGB, ChannelDatatype::U8) => ( pad_rgb_to_rgba(&image.buffer, u8::MAX).into(), - TextureFormat::Rgba8Unorm, + SourceImageDataFormat::WgpuCompatible(TextureFormat::Rgba8Unorm), + ), + (ColorModel::RGBA, ChannelDatatype::U8) => ( + cast_slice_to_cow(&image.buffer), + SourceImageDataFormat::WgpuCompatible(TextureFormat::Rgba8Unorm), ), - (ColorModel::RGBA, ChannelDatatype::U8) => { - (cast_slice_to_cow(&image.buffer), TextureFormat::Rgba8Unorm) - } // Make use of wgpu's BGR(A)8 formats if possible. // @@ -315,7 +304,10 @@ pub fn texture_creation_desc_from_color_image<'a>( } else { TextureFormat::Bgra8Unorm }; - (padded_data, texture_format) + ( + padded_data, + SourceImageDataFormat::WgpuCompatible(texture_format), + ) } (ColorModel::BGRA, ChannelDatatype::U8) => { let texture_format = if required_shader_decode(device_caps, &image.format).is_some() @@ -324,7 +316,10 @@ pub fn texture_creation_desc_from_color_image<'a>( } else { TextureFormat::Bgra8Unorm }; - (cast_slice_to_cow(&image.buffer), texture_format) + ( + cast_slice_to_cow(&image.buffer), + SourceImageDataFormat::WgpuCompatible(texture_format), + ) } _ => { @@ -336,15 +331,14 @@ pub fn texture_creation_desc_from_color_image<'a>( datatype, ); } - }; - - Texture2DCreationDesc { - label: debug_name.into(), - data, - format, - width: image.width(), - height: image.height(), } + }; + + ImageDataDesc { + label: debug_name.into(), + data, + format, + width_height: image.width_height(), } } @@ -445,12 +439,11 @@ fn segmentation_image_to_gpu( }) .collect(); - Texture2DCreationDesc { + ImageDataDesc { label: "class_id_colormap".into(), data: data.into(), - format: TextureFormat::Rgba8UnormSrgb, - width: colormap_width as u32, - height: colormap_height as u32, + format: SourceImageDataFormat::WgpuCompatible(TextureFormat::Rgba8UnormSrgb), + width_height: [colormap_width as u32, colormap_height as u32], } }) .context("Failed to create class_id_colormap.")?; @@ -478,12 +471,9 @@ fn general_texture_creation_desc_from_image<'a>( image: &'a ImageInfo, color_model: ColorModel, datatype: ChannelDatatype, -) -> Texture2DCreationDesc<'a> { +) -> ImageDataDesc<'a> { re_tracing::profile_function!(); - let width = image.width(); - let height = image.height(); - let buf: &[u8] = image.buffer.as_ref(); let (data, format) = match color_model { @@ -590,12 +580,11 @@ fn general_texture_creation_desc_from_image<'a>( } }; - Texture2DCreationDesc { + ImageDataDesc { label: debug_name.into(), data, - format, - width, - height, + format: SourceImageDataFormat::WgpuCompatible(format), + width_height: image.width_height(), } } diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs index 29daaf789d97..20a875c3a840 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs @@ -18,7 +18,7 @@ use crate::TensorStats; use re_renderer::{ renderer::{ColormappedTexture, RectangleOptions}, resource_managers::{ - GpuTexture2D, Texture2DCreationDesc, TextureCreationError, TextureManager2DError, + GpuTexture2D, ImageDataDesc, ImageDataToTextureError, TextureManager2DError, }, RenderContext, ViewBuilder, }; @@ -60,11 +60,11 @@ pub fn viewport_resolution_in_pixels(clip_rect: egui::Rect, pixels_per_point: f3 pub fn try_get_or_create_texture<'a, Err: std::fmt::Display>( render_ctx: &RenderContext, texture_key: u64, - try_create_texture_desc: impl FnOnce() -> Result, Err>, + try_create_texture_desc: impl FnOnce() -> Result, Err>, ) -> Result> { render_ctx.texture_manager_2d.get_or_try_create_with( texture_key, - &render_ctx.gpu_resources.textures, + render_ctx, try_create_texture_desc, ) } @@ -72,13 +72,11 @@ pub fn try_get_or_create_texture<'a, Err: std::fmt::Display>( pub fn get_or_create_texture<'a>( render_ctx: &RenderContext, texture_key: u64, - create_texture_desc: impl FnOnce() -> Texture2DCreationDesc<'a>, -) -> Result { - render_ctx.texture_manager_2d.get_or_create_with( - texture_key, - &render_ctx.gpu_resources.textures, - create_texture_desc, - ) + create_texture_desc: impl FnOnce() -> ImageDataDesc<'a>, +) -> Result { + render_ctx + .texture_manager_2d + .get_or_create_with(texture_key, render_ctx, create_texture_desc) } /// Render the given image, respecting the clip rectangle of the given painter. diff --git a/crates/viewer/re_viewer_context/src/image_info.rs b/crates/viewer/re_viewer_context/src/image_info.rs index 0a514018da59..56f5032d2271 100644 --- a/crates/viewer/re_viewer_context/src/image_info.rs +++ b/crates/viewer/re_viewer_context/src/image_info.rs @@ -64,6 +64,10 @@ impl ImageInfo { self.format.height } + pub fn width_height(&self) -> [u32; 2] { + [self.format.width, self.format.height] + } + /// Returns [`ColorModel::L`] for depth and segmentation images. /// /// Currently return [`ColorModel::RGB`] for chroma-subsampled images,