diff --git a/crates/viewer/re_data_ui/src/image.rs b/crates/viewer/re_data_ui/src/image.rs index caacd42acd80..f418a0e7bb8c 100644 --- a/crates/viewer/re_data_ui/src/image.rs +++ b/crates/viewer/re_data_ui/src/image.rs @@ -120,7 +120,7 @@ fn show_image_preview( desired_size: egui::Vec2, ) -> Result { fn texture_size(colormapped_texture: &ColormappedTexture) -> Vec2 { - let [w, h] = colormapped_texture.width_height(); + let [w, h] = colormapped_texture.texture.width_height(); egui::vec2(w as f32, h as f32) } diff --git a/crates/viewer/re_renderer/shader/decodings.wgsl b/crates/viewer/re_renderer/shader/decodings.wgsl deleted file mode 100644 index 6ea6ff6c5415..000000000000 --- a/crates/viewer/re_renderer/shader/decodings.wgsl +++ /dev/null @@ -1,73 +0,0 @@ -#import <./types.wgsl> - - -/// Loads an RGBA texel from a texture holding an NV12 or YUY2 encoded image at the given screen space coordinates. -fn decode_nv12_or_yuy2(sample_type: u32, texture: texture_2d, coords: vec2i) -> vec4f { - let texture_dim = vec2f(textureDimensions(texture).xy); - var y: f32; - var u: f32; - var v: f32; - - // WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! - // NO MORE SAMPLE TYPES CAN BE ADDED TO THIS SHADER! - // The shader is already too large and adding more sample types will push us over the size limit. - // See: https://github.com/rerun-io/rerun/issues/3931, https://github.com/rerun-io/rerun/issues/5073 - if sample_type == SAMPLE_TYPE_NV12 { - let uv_offset = u32(floor(texture_dim.y / 1.5)); - let uv_row = u32(coords.y / 2); - var uv_col = u32(coords.x / 2) * 2u; - - y = f32(textureLoad(texture, vec2u(coords), 0).r); - u = f32(textureLoad(texture, vec2u(u32(uv_col), uv_offset + uv_row), 0).r); - v = f32(textureLoad(texture, vec2u((u32(uv_col) + 1u), uv_offset + uv_row), 0).r); - } else if sample_type == SAMPLE_TYPE_YUY2 { - // texture is 2 * width * height - // every 4 bytes is 2 pixels - let uv_row = u32(coords.y); - // multiply by 2 because the width is multiplied by 2 - let y_col = u32(coords.x) * 2u; - y = f32(textureLoad(texture, vec2u(y_col, uv_row), 0).r); - - // at odd pixels we're in the second half of the yuyu block, offset back by 2 - let uv_col = y_col - u32(coords.x % 2) * 2u; - u = f32(textureLoad(texture, vec2u(uv_col + 1u, uv_row), 0).r); - v = f32(textureLoad(texture, vec2u(uv_col + 3u, uv_row), 0).r); - } - // WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! - - let rgb = rgb_from_yuv(vec3f(y, u, v)); - - return vec4f(rgb, 1.0); -} - - -/// Returns sRGB from YUV color. -/// -/// This conversion mirrors the function in `crates/store/re_types/src/datatypes/tensor_data_ext.rs` -/// -/// Specifying the color standard should be exposed in the future [#3541](https://github.com/rerun-io/rerun/pull/3541) -fn rgb_from_yuv(yuv: vec3f) -> vec3f { - // rescale YUV values - let y = (yuv.x - 16.0) / 219.0; - let u = (yuv.y - 128.0) / 224.0; - let v = (yuv.z - 128.0) / 224.0; - - // BT.601 (aka. SDTV, aka. Rec.601). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion - // Also note according to https://en.wikipedia.org/wiki/SRGB#sYCC_extended-gamut_transformation - // > Although the RGB color primaries are based on BT.709, - // > the equations for transformation from sRGB to sYCC and vice versa are based on BT.601. - let r = y + 1.402 * v; - let g = y - 0.344 * u - 0.714 * v; - let b = y + 1.772 * u; - - // BT.709 (aka. HDTV, aka. Rec.709). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.709_conversion - // let r = y + 1.575 * v; - // let g = y - 0.187 * u - 0.468 * v; - // let b = y + 1.856 * u; - - return vec3f( - clamp(r, 0.0, 1.0), - clamp(g, 0.0, 1.0), - clamp(b, 0.0, 1.0) - ); -} diff --git a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl index a670524199a3..e3dcc97b7e95 100644 --- a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl +++ b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl @@ -1,11 +1,6 @@ #import <./colormap.wgsl> #import <./rectangle.wgsl> #import <./utils/srgb.wgsl> -#import <./decodings.wgsl> - -// WARNING! Adding anything else to this shader is very likely to push us over a size threshold that -// causes the failure reported in: https://github.com/rerun-io/rerun/issues/3931 -// Make sure any changes are tested in Chrome on Linux using the Intel Mesa driver. fn is_magnifying(pixel_coord: vec2f) -> bool { return fwidth(pixel_coord.x) < 1.0; @@ -86,10 +81,6 @@ fn fs_main(in: VertexOut) -> @location(0) vec4f { texture_dimensions = vec2f(textureDimensions(texture_sint).xy); } else if rect_info.sample_type == SAMPLE_TYPE_UINT { texture_dimensions = vec2f(textureDimensions(texture_uint).xy); - } else if rect_info.sample_type == SAMPLE_TYPE_NV12 { - texture_dimensions = vec2f(textureDimensions(texture_uint).xy); - } else if rect_info.sample_type == SAMPLE_TYPE_YUY2 { - texture_dimensions = vec2f(textureDimensions(texture_uint).xy); } let coord = in.texcoord * texture_dimensions; @@ -141,14 +132,6 @@ fn fs_main(in: VertexOut) -> @location(0) vec4f { vec4f(textureLoad(texture_uint, v01_coord, 0)), vec4f(textureLoad(texture_uint, v10_coord, 0)), vec4f(textureLoad(texture_uint, v11_coord, 0))); - } else if rect_info.sample_type == SAMPLE_TYPE_NV12 || rect_info.sample_type == SAMPLE_TYPE_YUY2{ - normalized_value = decode_color_and_filter_nearest_or_bilinear( - filter_nearest, - coord, - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v00_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v01_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v10_coord), - decode_nv12_or_yuy2(rect_info.sample_type, texture_uint, v11_coord)); } else { return ERROR_RGBA; // unknown sample type } diff --git a/crates/viewer/re_renderer/src/renderer/rectangles.rs b/crates/viewer/re_renderer/src/renderer/rectangles.rs index 2b3c60dd2f51..fe7c4ed9298c 100644 --- a/crates/viewer/re_renderer/src/renderer/rectangles.rs +++ b/crates/viewer/re_renderer/src/renderer/rectangles.rs @@ -49,9 +49,6 @@ pub enum TextureFilterMin { /// Describes how the color information is encoded in the texture. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ShaderDecoding { - Nv12, - Yuy2, - /// Do BGR(A)->RGB(A) conversion is in the shader. Bgr, } @@ -145,17 +142,7 @@ impl ColormappedTexture { } pub fn width_height(&self) -> [u32; 2] { - match self.shader_decoding { - Some(ShaderDecoding::Nv12) => { - let [width, height] = self.texture.width_height(); - [width, height * 2 / 3] - } - Some(ShaderDecoding::Yuy2) => { - let [width, height] = self.texture.width_height(); - [width / 2, height] - } - Some(ShaderDecoding::Bgr) | None => self.texture.width_height(), - } + self.texture.width_height() } } @@ -237,8 +224,6 @@ mod gpu_data { const SAMPLE_TYPE_FLOAT: u32 = 1; const SAMPLE_TYPE_SINT: u32 = 2; const SAMPLE_TYPE_UINT: u32 = 3; - const SAMPLE_TYPE_NV12: u32 = 4; - const SAMPLE_TYPE_YUY2: u32 = 5; // How do we do colormapping? const COLOR_MAPPER_OFF_GRAYSCALE: u32 = 1; @@ -318,15 +303,7 @@ mod gpu_data { let sample_type = match texture_format.sample_type(None, None) { Some(wgpu::TextureSampleType::Float { .. }) => SAMPLE_TYPE_FLOAT, Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT, - Some(wgpu::TextureSampleType::Uint) => { - if shader_decoding == &Some(super::ShaderDecoding::Nv12) { - SAMPLE_TYPE_NV12 - } else if shader_decoding == &Some(super::ShaderDecoding::Yuy2) { - SAMPLE_TYPE_YUY2 - } else { - SAMPLE_TYPE_UINT - } - } + Some(wgpu::TextureSampleType::Uint) => SAMPLE_TYPE_UINT, _ => { return Err(RectangleError::TextureFormatNotSupported(texture_format)); } diff --git a/crates/viewer/re_renderer/src/workspace_shaders.rs b/crates/viewer/re_renderer/src/workspace_shaders.rs index 474c5ed3f3a2..a4b4f0ef98a7 100644 --- a/crates/viewer/re_renderer/src/workspace_shaders.rs +++ b/crates/viewer/re_renderer/src/workspace_shaders.rs @@ -43,12 +43,6 @@ pub fn init() { fs.create_file(virtpath, content).unwrap(); } - { - let virtpath = Path::new("shader/decodings.wgsl"); - let content = include_str!("../shader/decodings.wgsl").into(); - fs.create_file(virtpath, content).unwrap(); - } - { let virtpath = Path::new("shader/depth_cloud.wgsl"); let content = include_str!("../shader/depth_cloud.wgsl").into(); diff --git a/crates/viewer/re_space_view_tensor/src/space_view_class.rs b/crates/viewer/re_space_view_tensor/src/space_view_class.rs index 1b3c770a135e..d5f44b7a7741 100644 --- a/crates/viewer/re_space_view_tensor/src/space_view_class.rs +++ b/crates/viewer/re_space_view_tensor/src/space_view_class.rs @@ -353,7 +353,7 @@ impl TensorSpaceView { &colormap, gamma, )?; - let [width, height] = colormapped_texture.width_height(); + let [width, height] = colormapped_texture.texture.width_height(); let view_fit: ViewFit = ViewProperty::from_archetype::( ctx.blueprint_db(), diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs index 12686f3d9dca..7114ef39345e 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs @@ -112,7 +112,6 @@ fn color_image_to_gpu( emath::Rangef::new(-1.0, 1.0) } else if let Some(shader_decoding) = shader_decoding { match shader_decoding { - ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => emath::Rangef::new(0.0, 1.0), ShaderDecoding::Bgr => image_data_range_heuristic(image_stats, &image_format), } } else { @@ -121,10 +120,8 @@ fn color_image_to_gpu( let color_mapper = if let Some(shader_decoding) = shader_decoding { match shader_decoding { - // We only have 1D color maps, therefore chroma downsampled and BGR formats can't have color maps. - ShaderDecoding::Bgr | ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => { - ColorMapper::OffRGB - } + // We only have 1D color maps, therefore BGR formats can't have color maps. + ShaderDecoding::Bgr => ColorMapper::OffRGB, } } else if texture_format.components() == 1 { // TODO(andreas): support colormap property @@ -193,6 +190,7 @@ pub fn image_data_range_heuristic(image_stats: &ImageStats, image_format: &Image fn image_decode_srgb_gamma_heuristic(image_stats: &ImageStats, image_format: ImageFormat) -> bool { if let Some(pixel_format) = image_format.pixel_format { match pixel_format { + // Have to do the conversion because we don't use an `Srgb` texture format. PixelFormat::NV12 | PixelFormat::YUY2 => true, } } else { @@ -219,23 +217,18 @@ pub fn required_shader_decode( image_format: &ImageFormat, ) -> Option { let color_model = image_format.color_model(); - match image_format.pixel_format { - Some(PixelFormat::NV12) => Some(ShaderDecoding::Nv12), - Some(PixelFormat::YUY2) => Some(ShaderDecoding::Yuy2), - None => { - if color_model == ColorModel::BGR || color_model == ColorModel::BGRA { - // U8 can be converted to RGBA without the shader's help since there's a format for it. - if image_format.datatype() == ChannelDatatype::U8 - && device_caps.support_bgra_textures() - { - None - } else { - Some(ShaderDecoding::Bgr) - } - } else { - None - } + + if image_format.pixel_format.is_none() && color_model == ColorModel::BGR + || color_model == ColorModel::BGRA + { + // U8 can be converted to RGBA without the shader's help since there's a format for it. + if image_format.datatype() == ChannelDatatype::U8 && device_caps.support_bgra_textures() { + None + } else { + Some(ShaderDecoding::Bgr) } + } else { + None } } @@ -243,10 +236,7 @@ pub fn required_shader_decode( /// /// The resulting texture has requirements as describe by [`required_shader_decode`]. /// -/// TODO(andreas): The consumer needs to be aware of bgr and chroma downsampling conversions. -/// It would be much better if we had a separate `re_renderer`/gpu driven conversion pipeline for this -/// which would allow us to virtually extend over wgpu's texture formats. -/// This would allow us to seamlessly support e.g. NV12 on meshes without the mesh shader having to be updated. +/// TODO(andreas): The consumer needs to be aware of bgr conversions. Other conversions are already taken care of upon upload. pub fn texture_creation_desc_from_color_image<'a>( device_caps: &DeviceCaps, image: &'a ImageInfo,