diff --git a/.gitignore b/.gitignore
index 0a4c662a860e..44cbc46e9872 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,6 +31,7 @@ _deps
# Python build artifacts:
__pycache__
*.pyc
+*.pyd
*.so
**/.pytest_cache
diff --git a/crates/build/re_types_builder/src/codegen/rust/api.rs b/crates/build/re_types_builder/src/codegen/rust/api.rs
index 5545f8855fcd..4f31be5ed752 100644
--- a/crates/build/re_types_builder/src/codegen/rust/api.rs
+++ b/crates/build/re_types_builder/src/codegen/rust/api.rs
@@ -161,6 +161,11 @@ fn generate_object_file(
code.push_str("#![allow(deprecated)]\n");
}
+ if obj.is_enum() {
+ // Needed for PixelFormat. Should we limit this via attribute to just that?
+ code.push_str("#![allow(non_camel_case_types)]\n");
+ }
+
code.push_str("\n\n");
code.push_str("use ::re_types_core::external::arrow2;\n");
diff --git a/crates/store/re_types/definitions/rerun/datatypes/pixel_format.fbs b/crates/store/re_types/definitions/rerun/datatypes/pixel_format.fbs
index 298ba8736268..d0073e1ad2a3 100644
--- a/crates/store/re_types/definitions/rerun/datatypes/pixel_format.fbs
+++ b/crates/store/re_types/definitions/rerun/datatypes/pixel_format.fbs
@@ -1,5 +1,9 @@
namespace rerun.datatypes;
+// TODO(andreas): Clarify relationship to color primaries. Right now there's some hardcoded differences between formats.
+// See `image_to_gpu.rs`
+// Suggestion: guides heuristic but doesn't specify it unless noted.
+
/// Specifieds a particular format of an [archetypes.Image].
///
/// Most images can be described by a [datatypes.ColorModel] and a [datatypes.ChannelDatatype],
@@ -24,13 +28,82 @@ enum PixelFormat: ubyte {
// this organization and subsequently reduce the chance we may find ourselves wanting to
// change the values in the future.
- /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled format with 12 bits per pixel and 8 bits per channel.
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ Y_U_V24_LimitedRange = 39,
+
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ Y_U_V24_FullRange = 40,
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ Y_U_V16_LimitedRange = 49, // Ocean doesn't have a short codes for this
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ Y_U_V16_FullRange = 50, // Ocean doesn't have a short codes for this
+
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ Y_U_V12_LimitedRange = 20,
+
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ Y_U_V12_FullRange = 44,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray".
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235].
+ /// If not for this range limitation/remapping, this is almost identical to 8bit luminace/grayscale (see [datatypes.ColorModel]).
+ Y8_LimitedRange = 41,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray". This is virtually identical to a 8bit luminance/grayscale (see [datatypes.ColorModel]).
+ ///
+ /// This uses entire range YUV, i.e. Y is expected to be within [0, 255].
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ Y8_FullRange = 30,
+
+ /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled form at with 12 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// First comes entire image in Y in one plane,
/// followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc.
NV12 = 26 (default), // _something_ has to be the default 🤷♀️
- /// `YUY2` (aka `YUYV` or `YUYV16`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ /// `YUY2` (aka `YUYV`, `YUYV16` or `NV21`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// The order of the channels is Y0, U0, Y1, V0, all in the same plane.
YUY2 = 27,
diff --git a/crates/store/re_types/src/blueprint/components/background_kind.rs b/crates/store/re_types/src/blueprint/components/background_kind.rs
index 90f72396843d..d924523cc4f2 100644
--- a/crates/store/re_types/src/blueprint/components/background_kind.rs
+++ b/crates/store/re_types/src/blueprint/components/background_kind.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/blueprint/components/corner2d.rs b/crates/store/re_types/src/blueprint/components/corner2d.rs
index 794dbec2b288..f5d00addf4a2 100644
--- a/crates/store/re_types/src/blueprint/components/corner2d.rs
+++ b/crates/store/re_types/src/blueprint/components/corner2d.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/blueprint/components/panel_state.rs b/crates/store/re_types/src/blueprint/components/panel_state.rs
index b8eaee2edd87..a03e05b80050 100644
--- a/crates/store/re_types/src/blueprint/components/panel_state.rs
+++ b/crates/store/re_types/src/blueprint/components/panel_state.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/blueprint/components/view_fit.rs b/crates/store/re_types/src/blueprint/components/view_fit.rs
index eef956af788b..e99228f743d7 100644
--- a/crates/store/re_types/src/blueprint/components/view_fit.rs
+++ b/crates/store/re_types/src/blueprint/components/view_fit.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/aggregation_policy.rs b/crates/store/re_types/src/components/aggregation_policy.rs
index 524bee85abb6..c8b696f5a2d2 100644
--- a/crates/store/re_types/src/components/aggregation_policy.rs
+++ b/crates/store/re_types/src/components/aggregation_policy.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/colormap.rs b/crates/store/re_types/src/components/colormap.rs
index 313ae8a074d1..3110a4077440 100644
--- a/crates/store/re_types/src/components/colormap.rs
+++ b/crates/store/re_types/src/components/colormap.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/fill_mode.rs b/crates/store/re_types/src/components/fill_mode.rs
index ad79a15162ca..d434fd8d1a4a 100644
--- a/crates/store/re_types/src/components/fill_mode.rs
+++ b/crates/store/re_types/src/components/fill_mode.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/magnification_filter.rs b/crates/store/re_types/src/components/magnification_filter.rs
index d704bd593946..be0b6392be3d 100644
--- a/crates/store/re_types/src/components/magnification_filter.rs
+++ b/crates/store/re_types/src/components/magnification_filter.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/marker_shape.rs b/crates/store/re_types/src/components/marker_shape.rs
index b2da33d1dcd7..78da7ffeb1f9 100644
--- a/crates/store/re_types/src/components/marker_shape.rs
+++ b/crates/store/re_types/src/components/marker_shape.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/components/transform_relation.rs b/crates/store/re_types/src/components/transform_relation.rs
index d3543234659d..06137a33b181 100644
--- a/crates/store/re_types/src/components/transform_relation.rs
+++ b/crates/store/re_types/src/components/transform_relation.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/datatypes/channel_datatype.rs b/crates/store/re_types/src/datatypes/channel_datatype.rs
index d5c0d6222740..e58e4d970e7d 100644
--- a/crates/store/re_types/src/datatypes/channel_datatype.rs
+++ b/crates/store/re_types/src/datatypes/channel_datatype.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/datatypes/color_model.rs b/crates/store/re_types/src/datatypes/color_model.rs
index 0dfe0ae4224c..48a677a4bbe7 100644
--- a/crates/store/re_types/src/datatypes/color_model.rs
+++ b/crates/store/re_types/src/datatypes/color_model.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/datatypes/pixel_format.rs b/crates/store/re_types/src/datatypes/pixel_format.rs
index 45d808a40f0b..348b292cd930 100644
--- a/crates/store/re_types/src/datatypes/pixel_format.rs
+++ b/crates/store/re_types/src/datatypes/pixel_format.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
@@ -32,7 +33,18 @@ use ::re_types_core::{DeserializationError, DeserializationResult};
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Default)]
#[repr(u8)]
pub enum PixelFormat {
- /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled format with 12 bits per pixel and 8 bits per channel.
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V12_LimitedRange = 20,
+
+ /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled form at with 12 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// First comes entire image in Y in one plane,
/// followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc.
@@ -40,27 +52,128 @@ pub enum PixelFormat {
#[allow(clippy::upper_case_acronyms)]
NV12 = 26,
- /// `YUY2` (aka `YUYV` or `YUYV16`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ /// `YUY2` (aka `YUYV`, `YUYV16` or `NV21`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// The order of the channels is Y0, U0, Y1, V0, all in the same plane.
#[allow(clippy::upper_case_acronyms)]
YUY2 = 27,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray". This is virtually identical to a 8bit luminance/grayscale (see [`datatypes::ColorModel`][crate::datatypes::ColorModel]).
+ ///
+ /// This uses entire range YUV, i.e. Y is expected to be within [0, 255].
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ #[allow(clippy::upper_case_acronyms)]
+ Y8_FullRange = 30,
+
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V24_LimitedRange = 39,
+
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V24_FullRange = 40,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray".
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235].
+ /// If not for this range limitation/remapping, this is almost identical to 8bit luminace/grayscale (see [`datatypes::ColorModel`][crate::datatypes::ColorModel]).
+ #[allow(clippy::upper_case_acronyms)]
+ Y8_LimitedRange = 41,
+
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V12_FullRange = 44,
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V16_LimitedRange = 49,
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ #[allow(clippy::upper_case_acronyms)]
+ Y_U_V16_FullRange = 50,
}
impl ::re_types_core::reflection::Enum for PixelFormat {
#[inline]
fn variants() -> &'static [Self] {
- &[Self::NV12, Self::YUY2]
+ &[
+ Self::Y_U_V12_LimitedRange,
+ Self::NV12,
+ Self::YUY2,
+ Self::Y8_FullRange,
+ Self::Y_U_V24_LimitedRange,
+ Self::Y_U_V24_FullRange,
+ Self::Y8_LimitedRange,
+ Self::Y_U_V12_FullRange,
+ Self::Y_U_V16_LimitedRange,
+ Self::Y_U_V16_FullRange,
+ ]
}
#[inline]
fn docstring_md(self) -> &'static str {
match self {
+ Self::Y_U_V12_LimitedRange => {
+ "`Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].\n\nFirst comes entire image in Y in one plane, followed by the U and V planes, which each only have half\nthe resolution of the Y plane."
+ }
Self::NV12 => {
- "`NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled format with 12 bits per pixel and 8 bits per channel.\n\nFirst comes entire image in Y in one plane,\nfollowed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc."
+ "`NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled form at with 12 bits per pixel and 8 bits per channel.\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].\n\nFirst comes entire image in Y in one plane,\nfollowed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc."
}
Self::YUY2 => {
- "`YUY2` (aka `YUYV` or `YUYV16`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.\n\nThe order of the channels is Y0, U0, Y1, V0, all in the same plane."
+ "`YUY2` (aka `YUYV`, `YUYV16` or `NV21`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].\n\nThe order of the channels is Y0, U0, Y1, V0, all in the same plane."
+ }
+ Self::Y8_FullRange => {
+ "Monochrome Y plane only, essentially a YUV 4:0:0 planar format.\n\nAlso known as just \"gray\". This is virtually identical to a 8bit luminance/grayscale (see [`datatypes::ColorModel`][crate::datatypes::ColorModel]).\n\nThis uses entire range YUV, i.e. Y is expected to be within [0, 255].\n(as opposed to \"limited range\" YUV as used e.g. in NV12)."
+ }
+ Self::Y_U_V24_LimitedRange => {
+ "`Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].\n\nFirst comes entire image in Y in one plane, followed by the U and V planes."
+ }
+ Self::Y_U_V24_FullRange => {
+ "`Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.\n\nThis uses full range YUV with all components ranging from 0 to 255\n(as opposed to \"limited range\" YUV as used e.g. in NV12).\n\nFirst comes entire image in Y in one plane, followed by the U and V planes."
+ }
+ Self::Y8_LimitedRange => {
+ "Monochrome Y plane only, essentially a YUV 4:0:0 planar format.\n\nAlso known as just \"gray\".\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235].\nIf not for this range limitation/remapping, this is almost identical to 8bit luminace/grayscale (see [`datatypes::ColorModel`][crate::datatypes::ColorModel])."
+ }
+ Self::Y_U_V12_FullRange => {
+ "`Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.\n\nThis uses full range YUV with all components ranging from 0 to 255\n(as opposed to \"limited range\" YUV as used e.g. in NV12).\n\nFirst comes entire image in Y in one plane, followed by the U and V planes, which each only have half\nthe resolution of the Y plane."
+ }
+ Self::Y_U_V16_LimitedRange => {
+ "`Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.\n\nThis uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].\n\nFirst comes entire image in Y in one plane, followed by the U and V planes, which each only have half\nthe horizontal resolution of the Y plane."
+ }
+ Self::Y_U_V16_FullRange => {
+ "`Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.\n\nThis uses full range YUV with all components ranging from 0 to 255\n(as opposed to \"limited range\" YUV as used e.g. in NV12).\n\nFirst comes entire image in Y in one plane, followed by the U and V planes, which each only have half\nthe horizontal resolution of the Y plane."
}
}
}
@@ -81,8 +194,16 @@ impl ::re_types_core::SizeBytes for PixelFormat {
impl std::fmt::Display for PixelFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
+ Self::Y_U_V12_LimitedRange => write!(f, "Y_U_V12_LimitedRange"),
Self::NV12 => write!(f, "NV12"),
Self::YUY2 => write!(f, "YUY2"),
+ Self::Y8_FullRange => write!(f, "Y8_FullRange"),
+ Self::Y_U_V24_LimitedRange => write!(f, "Y_U_V24_LimitedRange"),
+ Self::Y_U_V24_FullRange => write!(f, "Y_U_V24_FullRange"),
+ Self::Y8_LimitedRange => write!(f, "Y8_LimitedRange"),
+ Self::Y_U_V12_FullRange => write!(f, "Y_U_V12_FullRange"),
+ Self::Y_U_V16_LimitedRange => write!(f, "Y_U_V16_LimitedRange"),
+ Self::Y_U_V16_FullRange => write!(f, "Y_U_V16_FullRange"),
}
}
}
@@ -157,8 +278,16 @@ impl ::re_types_core::Loggable for PixelFormat {
.into_iter()
.map(|opt| opt.copied())
.map(|typ| match typ {
+ Some(20) => Ok(Some(Self::Y_U_V12_LimitedRange)),
Some(26) => Ok(Some(Self::NV12)),
Some(27) => Ok(Some(Self::YUY2)),
+ Some(30) => Ok(Some(Self::Y8_FullRange)),
+ Some(39) => Ok(Some(Self::Y_U_V24_LimitedRange)),
+ Some(40) => Ok(Some(Self::Y_U_V24_FullRange)),
+ Some(41) => Ok(Some(Self::Y8_LimitedRange)),
+ Some(44) => Ok(Some(Self::Y_U_V12_FullRange)),
+ Some(49) => Ok(Some(Self::Y_U_V16_LimitedRange)),
+ Some(50) => Ok(Some(Self::Y_U_V16_FullRange)),
None => Ok(None),
Some(invalid) => Err(DeserializationError::missing_union_arm(
Self::arrow_datatype(),
diff --git a/crates/store/re_types/src/datatypes/pixel_format_ext.rs b/crates/store/re_types/src/datatypes/pixel_format_ext.rs
index 23191ec886f1..55b12e01888b 100644
--- a/crates/store/re_types/src/datatypes/pixel_format_ext.rs
+++ b/crates/store/re_types/src/datatypes/pixel_format_ext.rs
@@ -1,4 +1,4 @@
-use crate::image::rgb_from_yuv;
+use crate::image::{rgb_from_yuv, ColorPrimaries};
use super::{ChannelDatatype, ColorModel, PixelFormat};
@@ -7,7 +7,16 @@ impl PixelFormat {
#[inline]
pub fn has_alpha(&self) -> bool {
match self {
- Self::NV12 | Self::YUY2 => false,
+ Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_FullRange
+ | Self::Y_U_V24_FullRange
+ | Self::Y8_FullRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V24_LimitedRange
+ | Self::Y8_LimitedRange
+ | Self::NV12
+ | Self::YUY2 => false,
}
}
@@ -15,7 +24,16 @@ impl PixelFormat {
/// Is this pixel format floating point?
pub fn is_float(&self) -> bool {
match self {
- Self::NV12 | Self::YUY2 => false,
+ Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_FullRange
+ | Self::Y_U_V24_FullRange
+ | Self::Y8_FullRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V24_LimitedRange
+ | Self::Y8_LimitedRange
+ | Self::NV12
+ | Self::YUY2 => false,
}
}
@@ -24,16 +42,47 @@ impl PixelFormat {
pub fn num_bytes(&self, [w, h]: [u32; 2]) -> usize {
let num_pixels = w as usize * h as usize;
match self {
- Self::NV12 => 12 * num_pixels / 8,
- Self::YUY2 => 16 * num_pixels / 8,
+ // 444 formats.
+ Self::Y_U_V24_FullRange | Self::Y_U_V24_LimitedRange => num_pixels * 4,
+
+ // 422 formats.
+ Self::Y_U_V16_FullRange | Self::Y_U_V16_LimitedRange | Self::NV12 => {
+ 16 * num_pixels / 8
+ }
+
+ // 420 formats.
+ Self::Y_U_V12_FullRange | Self::Y_U_V12_LimitedRange | Self::YUY2 => {
+ 12 * num_pixels / 8
+ }
+
+ // Monochrome formats.
+ Self::Y8_LimitedRange | Self::Y8_FullRange => num_pixels,
}
}
/// The color model derived from this pixel format.
#[inline]
pub fn color_model(&self) -> ColorModel {
+ #[allow(clippy::match_same_arms)]
match self {
- Self::NV12 | Self::YUY2 => ColorModel::RGB,
+ Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_FullRange
+ | Self::Y_U_V24_FullRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V24_LimitedRange
+ | Self::NV12
+ | Self::YUY2 => ColorModel::RGB,
+
+ // TODO(andreas): This shouldn't be ColorModel::RGB, but our YUV converter can't do anything else right now:
+ // The converter doesn't *have* to always output RGB, but having it sometimes output R(8) specifically for the
+ // YUV converter requires me to do more bookkeeping (needs a new renderpipeline and I expect other ripples).
+ //
+ // As of writing, having this color_model "incorrectly" be RGB mostly affects hovering logic which will continue to show RGB rather than L.
+ //
+ // Note that this does not affect the memory Y8 needs. It just implies that we use more GPU memory than we should.
+ // However, we typically (see image cache) hold the converted GPU textures only as long as we actually draw with them.
+ Self::Y8_LimitedRange | Self::Y8_FullRange => ColorModel::RGB,
}
}
@@ -41,7 +90,16 @@ impl PixelFormat {
/// The datatype that this decodes into.
pub fn datatype(&self) -> ChannelDatatype {
match self {
- Self::NV12 | Self::YUY2 => ChannelDatatype::U8,
+ Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_FullRange
+ | Self::Y_U_V24_FullRange
+ | Self::Y8_FullRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V24_LimitedRange
+ | Self::Y8_LimitedRange
+ | Self::NV12
+ | Self::YUY2 => ChannelDatatype::U8,
}
}
@@ -51,6 +109,45 @@ impl PixelFormat {
#[inline]
pub fn decode_yuv_at(&self, buf: &[u8], [w, h]: [u32; 2], [x, y]: [u32; 2]) -> Option<[u8; 3]> {
match self {
+ Self::Y_U_V24_FullRange | Self::Y_U_V24_LimitedRange => {
+ let plane_size = (w * h) as usize;
+ let plane_coord = (y * w + x) as usize;
+
+ let luma = *buf.get(plane_coord)?;
+ let u = *buf.get(plane_coord + plane_size)?;
+ let v = *buf.get(plane_coord + plane_size * 2)?;
+ Some([luma, u, v])
+ }
+
+ Self::Y_U_V16_FullRange | Self::Y_U_V16_LimitedRange => {
+ let y_plane_size = (w * h) as usize;
+ let uv_plane_size = y_plane_size / 2; // Half horizontal resolution.
+ let y_plane_coord = (y * w + x) as usize;
+ let uv_plane_coord = y_plane_coord / 2; // == (y * (w / 2) + x / 2)
+
+ let luma = *buf.get(y_plane_coord)?;
+ let u = *buf.get(uv_plane_coord + y_plane_size)?;
+ let v = *buf.get(uv_plane_coord + y_plane_size + uv_plane_size)?;
+ Some([luma, u, v])
+ }
+
+ Self::Y_U_V12_FullRange | Self::Y_U_V12_LimitedRange => {
+ let y_plane_size = (w * h) as usize;
+ let uv_plane_size = y_plane_size / 4; // Half horizontal & vertical resolution.
+ let y_plane_coord = (y * w + x) as usize;
+ let uv_plane_coord = (y * w / 4 + x / 2) as usize; // == ((y / 2) * (w / 2) + x / 2)
+
+ let luma = *buf.get(y_plane_coord)?;
+ let u = *buf.get(uv_plane_coord + y_plane_size)?;
+ let v = *buf.get(uv_plane_coord + y_plane_size + uv_plane_size)?;
+ Some([luma, u, v])
+ }
+
+ Self::Y8_FullRange | Self::Y8_LimitedRange => {
+ let luma = *buf.get((y * w + x) as usize)?;
+ Some([luma, 128, 128])
+ }
+
Self::NV12 => {
let uv_offset = w * h;
let luma = *buf.get((y * w + x) as usize)?;
@@ -70,12 +167,56 @@ impl PixelFormat {
}
}
+ /// Returns true if the format is a YUV format using
+ /// limited range YUV.
+ ///
+ /// I.e. for 8bit data, Y is valid in [16, 235] and U/V [16, 240], rather than 0-255.
+ pub fn is_limited_yuv_range(&self) -> bool {
+ match self {
+ Self::Y_U_V24_LimitedRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y8_LimitedRange
+ | Self::NV12
+ | Self::YUY2 => true,
+
+ Self::Y_U_V24_FullRange
+ | Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_FullRange
+ | Self::Y8_FullRange => false,
+ }
+ }
+
+ /// Color primaries used by this format.
+ // TODO(andreas): Expose this in the API separately and document it better.
+ pub fn color_primaries(&self) -> ColorPrimaries {
+ match self {
+ Self::Y_U_V24_LimitedRange
+ | Self::Y_U_V24_FullRange
+ | Self::Y_U_V12_LimitedRange
+ | Self::Y_U_V12_FullRange
+ | Self::Y_U_V16_LimitedRange
+ | Self::Y_U_V16_FullRange
+ // TODO(andreas): Y8 isn't really color, does this even make sense?
+ | Self::Y8_FullRange
+ | Self::Y8_LimitedRange => ColorPrimaries::Bt709,
+
+ Self::NV12 | Self::YUY2 => ColorPrimaries::Bt601,
+ }
+ }
+
/// Random-access decoding of a specific pixel of an image.
///
/// Return `None` if out-of-range.
#[inline]
pub fn decode_rgb_at(&self, buf: &[u8], [w, h]: [u32; 2], [x, y]: [u32; 2]) -> Option<[u8; 3]> {
let [y, u, v] = self.decode_yuv_at(buf, [w, h], [x, y])?;
- Some(rgb_from_yuv(y, u, v))
+ Some(rgb_from_yuv(
+ y,
+ u,
+ v,
+ self.is_limited_yuv_range(),
+ self.color_primaries(),
+ ))
}
}
diff --git a/crates/store/re_types/src/image.rs b/crates/store/re_types/src/image.rs
index 7931a9bf6fe6..a27b541492d2 100644
--- a/crates/store/re_types/src/image.rs
+++ b/crates/store/re_types/src/image.rs
@@ -266,29 +266,87 @@ fn test_find_non_empty_dim_indices() {
// ----------------------------------------------------------------------------
-// TODO(jan): there is a duplicate of this function in `crates/store/re_video/src/decode/av1.rs`
+// TODO(andreas): Expose this in the API.
+/// Type of color primaries a given image is in.
+///
+/// This applies both to YUV and RGB formats, but if not specified otherwise
+/// we assume BT.709 primaries for all RGB(A) 8bits per channel content.
+#[derive(Clone, Copy, Debug)]
+pub enum ColorPrimaries {
+ /// BT.601 (aka. SDTV, aka. Rec.601)
+ ///
+ /// Wiki:
+ Bt601,
+
+ /// BT.709 (aka. HDTV, aka. Rec.709)
+ ///
+ /// Wiki:
+ ///
+ /// These are the same primaries we usually assume and use for all of Rerun's rendering
+ /// since they are the same primaries used by sRGB.
+ ///
+ /// The OETF/EOTF function () is different,
+ /// but for all other purposes they are the same.
+ /// (The only reason for us to convert to optical units ("linear" instead of "gamma") is for
+ /// lighting computation & tonemapping where we typically start out with sRGB anyways!)
+ Bt709,
+ //
+ // Not yet supported. These vary a lot more from the other two!
+ //
+ // /// BT.2020 (aka. PQ, aka. Rec.2020)
+ // ///
+ // /// Wiki:
+ // BT2020_ConstantLuminance,
+ // BT2020_NonConstantLuminance,
+}
+
/// Returns sRGB from YUV color.
///
-/// This conversion mirrors the function of the same name in `crates/viewer/re_renderer/shader/decodings.wgsl`
+/// This conversion mirrors the function of the same name in `yuv_converter.wgsl`
///
/// Specifying the color standard should be exposed in the future [#3541](https://github.com/rerun-io/rerun/pull/3541)
-pub fn rgb_from_yuv(y: u8, u: u8, v: u8) -> [u8; 3] {
- let (y, u, v) = (y as f32, u as f32, v as f32);
+pub fn rgb_from_yuv(
+ y: u8,
+ u: u8,
+ v: u8,
+ limited_range: bool,
+ primaries: ColorPrimaries,
+) -> [u8; 3] {
+ let (mut y, mut u, mut v) = (y as f32, u as f32, v as f32);
// rescale YUV values
- let y = (y - 16.0) / 219.0;
- let u = (u - 128.0) / 224.0;
- let v = (v - 128.0) / 224.0;
-
- // BT.601 (aka. SDTV, aka. Rec.601). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
- let r = y + 1.402 * v;
- let g = y - 0.344 * u - 0.714 * v;
- let b = y + 1.772 * u;
-
- // BT.709 (aka. HDTV, aka. Rec.709). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.709_conversion
- // let r = y + 1.575 * v;
- // let g = y - 0.187 * u - 0.468 * v;
- // let b = y + 1.856 * u;
+ if limited_range {
+ // Via https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion:
+ // "The resultant signals range from 16 to 235 for Y′ (Cb and Cr range from 16 to 240);
+ // the values from 0 to 15 are called footroom, while the values from 236 to 255 are called headroom."
+ y = (y - 16.0) / 219.0;
+ u = (u - 128.0) / 224.0;
+ v = (v - 128.0) / 224.0;
+ } else {
+ y /= 255.0;
+ u = (u - 128.0) / 255.0;
+ v = (v - 128.0) / 255.0;
+ }
+
+ let r;
+ let g;
+ let b;
+
+ match primaries {
+ ColorPrimaries::Bt601 => {
+ // BT.601 (aka. SDTV, aka. Rec.601). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
+ r = y + 1.402 * v;
+ g = y - 0.344 * u - 0.714 * v;
+ b = y + 1.772 * u;
+ }
+
+ ColorPrimaries::Bt709 => {
+ // BT.709 (aka. HDTV, aka. Rec.709). wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.709_conversion
+ r = y + 1.575 * v;
+ g = y - 0.187 * u - 0.468 * v;
+ b = y + 1.856 * u;
+ }
+ }
[(255.0 * r) as u8, (255.0 * g) as u8, (255.0 * b) as u8]
}
diff --git a/crates/store/re_types/src/testing/datatypes/enum_test.rs b/crates/store/re_types/src/testing/datatypes/enum_test.rs
index 930cb17f7185..a8417db68617 100644
--- a/crates/store/re_types/src/testing/datatypes/enum_test.rs
+++ b/crates/store/re_types/src/testing/datatypes/enum_test.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types/src/testing/datatypes/valued_enum.rs b/crates/store/re_types/src/testing/datatypes/valued_enum.rs
index 0b11ca9c7473..66063d1121b2 100644
--- a/crates/store/re_types/src/testing/datatypes/valued_enum.rs
+++ b/crates/store/re_types/src/testing/datatypes/valued_enum.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/store/re_types_blueprint/src/blueprint/components/container_kind.rs b/crates/store/re_types_blueprint/src/blueprint/components/container_kind.rs
index 46c3bc33fddb..4aecdd82322f 100644
--- a/crates/store/re_types_blueprint/src/blueprint/components/container_kind.rs
+++ b/crates/store/re_types_blueprint/src/blueprint/components/container_kind.rs
@@ -11,6 +11,7 @@
#![allow(clippy::redundant_closure)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::too_many_lines)]
+#![allow(non_camel_case_types)]
use ::re_types_core::external::arrow2;
use ::re_types_core::ComponentName;
diff --git a/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl b/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl
index 1f6d0701be20..29e81a5030f2 100644
--- a/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl
+++ b/crates/viewer/re_renderer/shader/conversions/yuv_converter.wgsl
@@ -2,9 +2,12 @@
#import <../screen_triangle_vertex.wgsl>
struct UniformBuffer {
- format: u32,
+ yuv_layout: u32,
primaries: u32,
target_texture_size: vec2u,
+ yuv_range: u32,
+
+ _padding: vec3f, // Satisfy `DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED`
};
@group(0) @binding(0)
@@ -13,24 +16,59 @@ var uniform_buffer: UniformBuffer;
@group(0) @binding(1)
var input_texture: texture_2d;
+// see `enum YuvPixelLayout`.
+const YUV_LAYOUT_Y_U_V444 = 0u;
+const YUV_LAYOUT_Y_U_V422 = 1u;
+const YUV_LAYOUT_Y_U_V420 = 2u;
+const YUV_LAYOUT_Y_UV420 = 100u;
+const YUV_LAYOUT_YUYV422 = 200u;
+const YUV_LAYOUT_Y400 = 300u;
-const YUV_LAYOUT_Y_UV = 0u;
-const YUV_LAYOUT_YUYV16 = 1u;
-
+// see `enum ColorPrimaries`.
const PRIMARIES_BT601 = 0u;
const PRIMARIES_BT709 = 1u;
+// see `enum YuvRange`.
+const YUV_RANGE_LIMITED = 0u;
+const YUV_RANGE_FULL = 1u;
+
/// Returns sRGB from YUV color.
///
/// This conversion mirrors the function in `crates/store/re_types/src/datatypes/tensor_data_ext.rs`
///
/// Specifying the color standard should be exposed in the future [#3541](https://github.com/rerun-io/rerun/pull/3541)
-fn srgb_from_yuv(yuv: vec3f, primaries: u32) -> vec3f {
+fn srgb_from_yuv(yuv: vec3f, primaries: u32, range: u32) -> vec3f {
// rescale YUV values
- let y = (yuv[0] - 16.0) / 219.0;
- let u = (yuv[1] - 128.0) / 224.0;
- let v = (yuv[2] - 128.0) / 224.0;
+ //
+ // This is what is called "limited range" and is the most common case.
+ // TODO(andreas): Support "full range" as well.
+
+ var y: f32;
+ var u: f32;
+ var v: f32;
+
+ switch (range) {
+ case YUV_RANGE_LIMITED: {
+ // Via https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion:
+ // "The resultant signals range from 16 to 235 for Y′ (Cb and Cr range from 16 to 240);
+ // the values from 0 to 15 are called footroom, while the values from 236 to 255 are called headroom."
+ y = (yuv[0] - 16.0) / 219.0;
+ u = (yuv[1] - 128.0) / 224.0;
+ v = (yuv[2] - 128.0) / 224.0;
+ }
+
+ case YUV_RANGE_FULL: {
+ y = yuv[0] / 255.0;
+ u = (yuv[1] - 128.0) / 255.0;
+ v = (yuv[2] - 128.0) / 255.0;
+ }
+
+ default: {
+ // Should never happen.
+ return ERROR_RGBA.rgb;
+ }
+ }
var rgb: vec3f;
@@ -53,7 +91,7 @@ fn srgb_from_yuv(yuv: vec3f, primaries: u32) -> vec3f {
}
default: {
- rgb = ERROR_RGBA.rgb;
+ return ERROR_RGBA.rgb;
}
}
@@ -64,27 +102,70 @@ fn srgb_from_yuv(yuv: vec3f, primaries: u32) -> vec3f {
///
/// See also `enum YuvPixelLayout` in `yuv_converter.rs for a specification of
/// the expected data layout.
-fn sample_yuv(yuv_layout: u32, texture: texture_2d, coords: vec2f) -> vec3f {
+fn sample_yuv(yuv_layout: u32, texture: texture_2d, coords: vec2u, target_texture_size: vec2u) -> vec3f {
let texture_dim = vec2f(textureDimensions(texture).xy);
var yuv: vec3f;
switch (yuv_layout) {
- case YUV_LAYOUT_Y_UV: {
+ case YUV_LAYOUT_Y_U_V444: {
+ // Just 3 planes under each other.
+ yuv[0] = f32(textureLoad(texture, coords, 0).r);
+ yuv[1] = f32(textureLoad(texture, vec2u(coords.x, coords.y + target_texture_size.y), 0).r);
+ yuv[2] = f32(textureLoad(texture, vec2u(coords.x, coords.y + target_texture_size.y * 2u), 0).r);
+ }
+
+ case YUV_LAYOUT_Y_U_V422: {
+ // A large Y plane, followed by a UV plane with half the horizontal resolution,
+ // every row contains two u/v rows.
+ yuv[0] = f32(textureLoad(texture, coords, 0).r);
+ // UV coordinate on its own plane:
+ let uv_coord = vec2u(coords.x / 2u, coords.y);
+ // UV coordinate on the data texture, ignoring offset from previous planes.
+ // Each texture row contains two UV rows
+ let uv_col = uv_coord.x + (uv_coord.y % 2) * target_texture_size.x / 2u;
+ let uv_row = uv_coord.y / 2u;
+
+ yuv[1] = f32(textureLoad(texture, vec2u(uv_col, uv_row + target_texture_size.y), 0).r);
+ yuv[2] = f32(textureLoad(texture, vec2u(uv_col, uv_row + target_texture_size.y + target_texture_size.y / 2u), 0).r);
+ }
+
+ case YUV_LAYOUT_Y_U_V420: {
+ // A large Y plane, followed by a UV plane with half the horizontal & vertical resolution,
+ // every row contains two u/v rows and there's only half as many.
+ yuv[0] = f32(textureLoad(texture, coords, 0).r);
+ // UV coordinate on its own plane:
+ let uv_coord = vec2u(coords.x / 2u, coords.y / 2u);
+ // UV coordinate on the data texture, ignoring offset from previous planes.
+ // Each texture row contains two UV rows
+ let uv_col = uv_coord.x + (uv_coord.y % 2) * (target_texture_size.x / 2u);
+ let uv_row = uv_coord.y / 2u;
+
+ yuv[1] = f32(textureLoad(texture, vec2u(uv_col, uv_row + target_texture_size.y), 0).r);
+ yuv[2] = f32(textureLoad(texture, vec2u(uv_col, uv_row + target_texture_size.y + target_texture_size.y / 4u), 0).r);
+ }
+
+ case YUV_LAYOUT_Y400 {
+ yuv[0] = f32(textureLoad(texture, coords, 0).r);
+ yuv[1] = 128.0;
+ yuv[2] = 128.0;
+ }
+
+ case YUV_LAYOUT_Y_UV420: {
let uv_offset = u32(floor(texture_dim.y / 1.5));
- let uv_row = u32(coords.y / 2);
- var uv_col = u32(coords.x / 2) * 2u;
+ let uv_row = (coords.y / 2u);
+ var uv_col = (coords.x / 2u) * 2u;
- yuv[0] = f32(textureLoad(texture, vec2u(coords), 0).r);
- yuv[1] = f32(textureLoad(texture, vec2u(u32(uv_col), uv_offset + uv_row), 0).r);
- yuv[2] = f32(textureLoad(texture, vec2u((u32(uv_col) + 1u), uv_offset + uv_row), 0).r);
+ yuv[0] = f32(textureLoad(texture, coords, 0).r);
+ yuv[1] = f32(textureLoad(texture, vec2u(uv_col, uv_offset + uv_row), 0).r);
+ yuv[2] = f32(textureLoad(texture, vec2u((uv_col + 1u), uv_offset + uv_row), 0).r);
}
- case YUV_LAYOUT_YUYV16: {
+ case YUV_LAYOUT_YUYV422: {
// texture is 2 * width * height
// every 4 bytes is 2 pixels
- let uv_row = u32(coords.y);
+ let uv_row = coords.y;
// multiply by 2 because the width is multiplied by 2
- let y_col = u32(coords.x) * 2u;
+ let y_col = coords.x * 2u;
yuv[0] = f32(textureLoad(texture, vec2u(y_col, uv_row), 0).r);
// at odd pixels we're in the second half of the yuyu block, offset back by 2
@@ -103,10 +184,10 @@ fn sample_yuv(yuv_layout: u32, texture: texture_2d, coords: vec2f) -> vec3f
@fragment
fn fs_main(in: FragmentInput) -> @location(0) vec4f {
- let coords = vec2f(uniform_buffer.target_texture_size) * in.texcoord;
+ let coords = vec2u(vec2f(uniform_buffer.target_texture_size) * in.texcoord);
- let yuv = sample_yuv(uniform_buffer.format, input_texture, coords);
- let rgb = srgb_from_yuv(yuv, uniform_buffer.primaries);
+ let yuv = sample_yuv(uniform_buffer.yuv_layout, input_texture, coords, uniform_buffer.target_texture_size);
+ let rgb = srgb_from_yuv(yuv, uniform_buffer.primaries, uniform_buffer.yuv_range);
return vec4f(rgb, 1.0);
}
diff --git a/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs b/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs
index 42d79c78ecf7..1ba94392a9ac 100644
--- a/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs
+++ b/crates/viewer/re_renderer/src/resource_managers/image_data_to_texture.rs
@@ -1,4 +1,4 @@
-use super::yuv_converter::{YuvFormatConversionTask, YuvPixelLayout};
+use super::yuv_converter::{YuvFormatConversionTask, YuvPixelLayout, YuvRange};
use crate::{
renderer::DrawError,
wgpu_resources::{GpuTexture, TextureDesc},
@@ -12,7 +12,7 @@ use crate::{
/// Since with YUV content the color space is often less clear, we always explicitly
/// specify it.
///
-/// Ffmpeg's documentation has a short & good overview of these relationships:
+/// Ffmpeg's documentation has a short & good overview of the relationship of YUV & color primaries:
///
///
/// Values need to be kept in sync with `yuv_converter.wgsl`
@@ -34,7 +34,7 @@ pub enum ColorPrimaries {
/// but for all other purposes they are the same.
/// (The only reason for us to convert to optical units ("linear" instead of "gamma") is for
/// lighting & tonemapping where we typically start out with an sRGB image!)
- Bt709 = 2,
+ Bt709 = 1,
//
// Not yet supported. These vary a lot more from the other two!
//
@@ -46,9 +46,6 @@ pub enum ColorPrimaries {
}
/// Image data format that can be converted to a wgpu texture.
-///
-/// Names follow a similar convention as Facebook's Ocean library
-/// See
// TODO(andreas): Right now this combines both color space and pixel format. Consider separating them similar to how we do on user facing APIs.
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug)]
@@ -65,6 +62,7 @@ pub enum SourceImageDataFormat {
Yuv {
format: YuvPixelLayout,
primaries: ColorPrimaries,
+ range: YuvRange,
},
//
// TODO(#7608): Add rgb (3 channels!) formats.
@@ -262,9 +260,14 @@ pub fn transfer_image_data_to_texture(
// No further conversion needed, we're done here!
return Ok(data_texture);
}
- SourceImageDataFormat::Yuv { format, primaries } => YuvFormatConversionTask::new(
+ SourceImageDataFormat::Yuv {
+ format,
+ primaries,
+ range,
+ } => YuvFormatConversionTask::new(
ctx,
format,
+ range,
primaries,
&data_texture,
&label,
diff --git a/crates/viewer/re_renderer/src/resource_managers/mod.rs b/crates/viewer/re_renderer/src/resource_managers/mod.rs
index 6bbab468f8b6..c3210fc9e034 100644
--- a/crates/viewer/re_renderer/src/resource_managers/mod.rs
+++ b/crates/viewer/re_renderer/src/resource_managers/mod.rs
@@ -14,4 +14,4 @@ pub use image_data_to_texture::{
ColorPrimaries, ImageDataDesc, ImageDataToTextureError, SourceImageDataFormat,
};
pub use texture_manager::{GpuTexture2D, TextureManager2D, TextureManager2DError};
-pub use yuv_converter::YuvPixelLayout;
+pub use yuv_converter::{YuvPixelLayout, YuvRange};
diff --git a/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs b/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs
index a72a632e51ce..075c24f13ccb 100644
--- a/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs
+++ b/crates/viewer/re_renderer/src/resource_managers/yuv_converter.rs
@@ -15,13 +15,108 @@ use super::ColorPrimaries;
/// Supported chroma subsampling input formats.
///
+/// We use `YUV`/`YCbCr`/`YPbPr` interchangeably and usually just call it `YUV`.
+///
+/// According to this [source](https://www.retrosix.wiki/yuv-vs-ycbcr-vs-rgb-color-space/):
+/// * `YUV` is an analog signal
+/// * `YCbCr` is scaled and offsetted version of YUV, used in digital signals (we denote this as "limited range YUV")
+/// * `YPbPr` is the physical component cabel to transmit `YCbCr`
+/// Actual use in the wild seems to be all over the place.
+/// For instance `OpenCV` uses `YCbCr` when talking about the full range and YUV when talking about
+/// limited range. [Source](https://docs.opencv.org/4.x/de/d25/imgproc_color_conversions.html):
+/// > RGB <-> YCrCb JPEG [...] Y, Cr, and Cb cover the whole value range.
+/// > RGB <-> YUV with subsampling [...] with resulting values Y [16, 235], U and V [16, 240] centered at 128.
+///
+/// For more on YUV ranges see [`YuvRange`].
+///
+/// Naming schema:
+/// * every time a plane starts add a `_`
+/// * end with `4xy` for 4:x:y subsampling.
+///
+/// This picture gives a great overview of how to interpret the 4:x:y naming scheme for subsampling:
+///
+///
/// Keep indices in sync with `yuv_converter.wgsl`
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug)]
pub enum YuvPixelLayout {
+ // ---------------------------
+ // Planar formats
+ // ---------------------------
+ //
+ /// 4:4:4 no chroma downsampling with 3 separate planes.
+ /// Also known as `I444`
+ ///
+ /// Expects single channel data texture format.
+ ///
+ /// ```text
+ /// width
+ /// __________
+ /// | |
+ /// height | Y |
+ /// | |
+ /// |_________|
+ /// | |
+ /// height | U |
+ /// | |
+ /// |_________|
+ /// | |
+ /// height | V |
+ /// | |
+ /// |_________|
+ /// ```
+ Y_U_V444 = 0,
+
+ /// 4:2:2 subsampling with 3 separate planes.
+ /// Also known as `I422`
+ ///
+ /// Expects single channel data texture format.
+ ///
+ /// Each data texture row in U & V section contains two rows
+ /// of U/V respectively, since there's a total of (width/2) * (height/2) U & V samples
+ ///
+ /// ```text
+ /// width
+ /// __________
+ /// | |
+ /// height | Y |
+ /// | |
+ /// |_________|
+ /// height/2 | U |
+ /// |_________|
+ /// height/2 | V |
+ /// |_________|
+ /// ```
+ Y_U_V422 = 1,
+
+ /// 4:2:0 subsampling with 3 separate planes.
+ /// Also known as `I420`
+ ///
+ /// Expects single channel data texture format.
+ ///
+ /// Each data texture row in U & V section contains two rows
+ /// of U/V respectively, since there's a total of (width/2) * height U & V samples
+ ///
+ /// ```text
+ /// width
+ /// __________
+ /// | |
+ /// height | Y |
+ /// | |
+ /// |_________|
+ /// height/4 |___◌̲U____|
+ /// height/4 |___◌̲V____|
+ /// ```
+ Y_U_V420 = 2,
+
+ // ---------------------------
+ // Semi-planar formats
+ // ---------------------------
+ //
/// 4:2:0 subsampling with a separate Y plane, followed by a UV plane.
+ /// Also known as `NV12` (although `NV12` usually also implies the limited range).
///
- /// Expects single channel texture format.
+ /// Expects single channel data texture format.
///
/// First comes entire image in Y in one plane,
/// followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc.
@@ -36,11 +131,15 @@ pub enum YuvPixelLayout {
/// height/2 | U,V,U,… |
/// |_________|
/// ```
- Y_UV12 = 0,
+ Y_UV420 = 100,
+ // ---------------------------
+ // Interleaved formats
+ // ---------------------------
+ //
/// YUV 4:2:2 subsampling, single plane.
///
- /// Expects single channel texture format.
+ /// Expects single channel data texture format.
///
/// The order of the channels is Y0, U0, Y1, V0, all in the same plane.
///
@@ -51,15 +150,57 @@ pub enum YuvPixelLayout {
/// height | Y0, U0, Y1, V0… |
/// |_________________|
/// ```
- YUYV16 = 1,
+ YUYV422 = 200,
+
+ // ---------------------------
+ // Monochrome formats
+ // ---------------------------
+ //
+ /// 4:0:0, single plane of chroma only.
+ /// Also known as I400
+ ///
+ /// Expects single channel data texture format.
+ ///
+ /// Note that we still convert this to RGBA, for convenience.
+ ///
+ /// ```text
+ /// width
+ /// __________
+ /// | |
+ /// height | Y |
+ /// | |
+ /// |_________|
+ /// ```
+ Y400 = 300,
+}
+
+/// Expected range of YUV values.
+///
+/// Keep indices in sync with `yuv_converter.wgsl`
+#[derive(Clone, Copy, Debug, Default)]
+pub enum YuvRange {
+ /// Use limited range YUV, i.e. for 8bit data, Y is valid in [16, 235] and U/V [16, 240].
+ ///
+ /// This is by far the more common YUV range.
+ // TODO(andreas): What about higher bit ranges?
+ // This range says https://www.reddit.com/r/ffmpeg/comments/uiugfc/comment/i7f4wyp/
+ // 64-940 for Y and 64-960 for chroma.
+ #[default]
+ Limited = 0,
+
+ /// Use full range YUV with all components ranging from 0 to 255 for 8bit or higher otherwise.
+ Full = 1,
}
impl YuvPixelLayout {
/// Given the dimensions of the output picture, what are the expected dimensions of the input data texture.
pub fn data_texture_width_height(&self, [decoded_width, decoded_height]: [u32; 2]) -> [u32; 2] {
match self {
- Self::Y_UV12 => [decoded_width, decoded_height + decoded_height / 2],
- Self::YUYV16 => [decoded_width * 2, decoded_height],
+ Self::Y_U_V444 => [decoded_width, decoded_height * 3],
+ Self::Y_U_V422 => [decoded_width, decoded_height * 2],
+ Self::Y_U_V420 | Self::Y_UV420 => [decoded_width, decoded_height + decoded_height / 2],
+ Self::YUYV422 => [decoded_width * 2, decoded_height],
+ Self::Y400 => [decoded_width, decoded_height],
}
}
@@ -70,24 +211,36 @@ impl YuvPixelLayout {
// Our shader currently works with 8 bit integer formats here since while
// _technically_ YUV formats have nothing to do with concrete bit depth,
// practically there's underlying expectation for 8 bits per channel
- // as long as the data is Bt.709 or Bt.601.
+ // at least as long as the data is Bt.709 or Bt.601.
// In other words: The conversions implementations we have today expect 0-255 as the value range.
#[allow(clippy::match_same_arms)]
match self {
- Self::Y_UV12 => wgpu::TextureFormat::R8Uint,
+ // Only thing that makes sense for 8 bit planar data is the R8Uint format.
+ Self::Y_U_V444 | Self::Y_U_V422 | Self::Y_U_V420 => wgpu::TextureFormat::R8Uint,
+
+ // Same for planar
+ Self::Y_UV420 => wgpu::TextureFormat::R8Uint,
+
+ // Interleaved have opportunities here!
// TODO(andreas): Why not use [`wgpu::TextureFormat::Rg8Uint`] here?
- Self::YUYV16 => wgpu::TextureFormat::R8Uint,
+ Self::YUYV422 => wgpu::TextureFormat::R8Uint,
+
+ // Monochrome have only one channel anyways.
+ Self::Y400 => wgpu::TextureFormat::R8Uint,
}
}
/// Size of the buffer needed to create the data texture, i.e. the raw input data.
pub fn num_data_buffer_bytes(&self, decoded_width: [u32; 2]) -> usize {
- let num_pixels = decoded_width[0] as usize * decoded_width[1] as usize;
- match self {
- Self::Y_UV12 => 12 * num_pixels / 8,
- Self::YUYV16 => 16 * num_pixels / 8,
- }
+ let data_texture_width_height = self.data_texture_width_height(decoded_width);
+ let data_texture_format = self.data_texture_format();
+
+ (data_texture_format
+ .block_copy_size(None)
+ .expect("data texture formats are expected to be trivial")
+ * data_texture_width_height[0]
+ * data_texture_width_height[1]) as usize
}
}
@@ -98,14 +251,17 @@ mod gpu_data {
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct UniformBuffer {
/// Uses [`super::YuvPixelLayout`].
- pub pixel_layout: u32,
+ pub yuv_layout: u32,
/// Uses [`super::ColorPrimaries`].
pub primaries: u32,
pub target_texture_size: [u32; 2],
- pub _end_padding: [wgpu_buffer_types::PaddingRow; 16 - 1],
+ /// Uses [`super::YuvRange`].
+ pub yuv_range: wgpu_buffer_types::U32RowPadded,
+
+ pub _end_padding: [wgpu_buffer_types::PaddingRow; 16 - 2],
}
}
@@ -132,7 +288,8 @@ impl YuvFormatConversionTask {
/// see methods of [`YuvPixelLayout`] for details.
pub fn new(
ctx: &RenderContext,
- format: YuvPixelLayout,
+ yuv_layout: YuvPixelLayout,
+ yuv_range: YuvRange,
primaries: ColorPrimaries,
input_data: &GpuTexture,
output_label: &DebugLabel,
@@ -163,9 +320,10 @@ impl YuvFormatConversionTask {
ctx,
format!("{output_label}_conversion").into(),
gpu_data::UniformBuffer {
- pixel_layout: format as _,
+ yuv_layout: yuv_layout as _,
primaries: primaries as _,
target_texture_size: output_width_height,
+ yuv_range: (yuv_range as u32).into(),
_end_padding: Default::default(),
},
diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs
index e093a3edd121..ba6f0907fb2e 100644
--- a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs
+++ b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs
@@ -10,7 +10,9 @@ use re_renderer::{
config::DeviceCaps,
pad_rgb_to_rgba,
renderer::{ColorMapper, ColormappedTexture, ShaderDecoding},
- resource_managers::{ColorPrimaries, ImageDataDesc, SourceImageDataFormat, YuvPixelLayout},
+ resource_managers::{
+ ColorPrimaries, ImageDataDesc, SourceImageDataFormat, YuvPixelLayout, YuvRange,
+ },
RenderContext,
};
use re_types::components::ClassId;
@@ -188,11 +190,9 @@ pub fn image_data_range_heuristic(image_stats: &ImageStats, image_format: &Image
/// Return whether an image should be assumed to be encoded in sRGB color space ("gamma space", no EOTF applied).
fn image_decode_srgb_gamma_heuristic(image_stats: &ImageStats, image_format: ImageFormat) -> bool {
- if let Some(pixel_format) = image_format.pixel_format {
- match pixel_format {
- // Have to do the conversion because we don't use an `Srgb` texture format.
- PixelFormat::NV12 | PixelFormat::YUY2 => true,
- }
+ if image_format.pixel_format.is_some() {
+ // Have to do the conversion because we don't use an `Srgb` texture format.
+ true
} else {
let (min, max) = image_stats.finite_range;
@@ -247,25 +247,70 @@ pub fn texture_creation_desc_from_color_image<'a>(
// TODO(#7608): All image data ingestion conversions should all be handled by re_renderer!
let (data, format) = if let Some(pixel_format) = image.format.pixel_format {
- match pixel_format {
- // Using Bt.601 here for historical reasons.
+ let data = cast_slice_to_cow(image.buffer.as_slice());
+ let primaries = match pixel_format.color_primaries() {
+ re_types::image::ColorPrimaries::Bt601 => ColorPrimaries::Bt601,
+ re_types::image::ColorPrimaries::Bt709 => ColorPrimaries::Bt709,
+ };
+
+ let range = match pixel_format.is_limited_yuv_range() {
+ true => YuvRange::Limited,
+ false => YuvRange::Full,
+ };
+
+ let format = match pixel_format {
+ // For historical reasons, using Bt.709 for fully planar formats and Bt.601 for others.
+ //
+ // TODO(andreas): Investigate if there's underlying expectation for some of these (for instance I suspect that NV12 is "usually" BT601).
// TODO(andreas): Expose color primaries. It's probably still the better default (for instance that's what jpeg still uses),
// but should confirm & back that up!
- PixelFormat::NV12 => (
- cast_slice_to_cow(image.buffer.as_slice()),
+ //
+ PixelFormat::Y_U_V24_FullRange | PixelFormat::Y_U_V24_LimitedRange => {
SourceImageDataFormat::Yuv {
- format: YuvPixelLayout::Y_UV12,
- primaries: ColorPrimaries::Bt601,
- },
- ),
- PixelFormat::YUY2 => (
- cast_slice_to_cow(image.buffer.as_slice()),
+ format: YuvPixelLayout::Y_U_V444,
+ range,
+ primaries,
+ }
+ }
+
+ PixelFormat::Y_U_V16_FullRange | PixelFormat::Y_U_V16_LimitedRange => {
SourceImageDataFormat::Yuv {
- format: YuvPixelLayout::YUYV16,
- primaries: ColorPrimaries::Bt601,
- },
- ),
- }
+ format: YuvPixelLayout::Y_U_V422,
+ range,
+ primaries,
+ }
+ }
+
+ PixelFormat::Y_U_V12_FullRange | PixelFormat::Y_U_V12_LimitedRange => {
+ SourceImageDataFormat::Yuv {
+ format: YuvPixelLayout::Y_U_V420,
+ range,
+ primaries,
+ }
+ }
+
+ PixelFormat::Y8_FullRange | PixelFormat::Y8_LimitedRange => {
+ SourceImageDataFormat::Yuv {
+ format: YuvPixelLayout::Y400,
+ range,
+ primaries,
+ }
+ }
+
+ PixelFormat::NV12 => SourceImageDataFormat::Yuv {
+ format: YuvPixelLayout::Y_UV420,
+ range,
+ primaries,
+ },
+
+ PixelFormat::YUY2 => SourceImageDataFormat::Yuv {
+ format: YuvPixelLayout::YUYV422,
+ range,
+ primaries,
+ },
+ };
+
+ (data, format)
} else {
let color_model = image.format.color_model();
let datatype = image.format.datatype();
diff --git a/crates/viewer/re_viewer_context/src/image_info.rs b/crates/viewer/re_viewer_context/src/image_info.rs
index 56f5032d2271..6a19d2f0876f 100644
--- a/crates/viewer/re_viewer_context/src/image_info.rs
+++ b/crates/viewer/re_viewer_context/src/image_info.rs
@@ -99,7 +99,13 @@ impl ImageInfo {
// Shouldn't hit BGR and BGRA, but we'll handle it like RGB and RGBA here for completeness.
ColorModel::RGB | ColorModel::RGBA | ColorModel::BGR | ColorModel::BGRA => {
if channel < 3 {
- let rgb = rgb_from_yuv(luma, u, v);
+ let rgb = rgb_from_yuv(
+ luma,
+ u,
+ v,
+ pixel_format.is_limited_yuv_range(),
+ pixel_format.color_primaries(),
+ );
Some(TensorElement::U8(rgb[channel as usize]))
} else if channel == 4 {
Some(TensorElement::U8(255))
diff --git a/crates/viewer/re_viewer_context/src/tensor/image_stats.rs b/crates/viewer/re_viewer_context/src/tensor/image_stats.rs
index c4254feb78fe..7b453babf0a1 100644
--- a/crates/viewer/re_viewer_context/src/tensor/image_stats.rs
+++ b/crates/viewer/re_viewer_context/src/tensor/image_stats.rs
@@ -1,6 +1,6 @@
use half::f16;
-use re_types::datatypes::{ChannelDatatype, PixelFormat};
+use re_types::datatypes::ChannelDatatype;
use crate::ImageInfo;
@@ -121,8 +121,11 @@ impl ImageStats {
// ---------------------------
let datatype = match image.format.pixel_format {
- Some(PixelFormat::NV12 | PixelFormat::YUY2) => {
- // We do the lazy thing here:
+ Some(_) => {
+ // We do the lazy thing here since we convert everything to RGB8 right now anyways.
+ // Note that this range is all about the format we're converting _to_.
+ // It would be nice if we can distininguish this better in the future:
+ // E.g. limited range YUV should have the correct limited range.
return Self {
range: Some((0.0, 255.0)),
finite_range: (0.0, 255.0),
diff --git a/docs/content/reference/types/datatypes/pixel_format.md b/docs/content/reference/types/datatypes/pixel_format.md
index 6464475e6be0..636d22c441c6 100644
--- a/docs/content/reference/types/datatypes/pixel_format.md
+++ b/docs/content/reference/types/datatypes/pixel_format.md
@@ -17,8 +17,16 @@ For more compressed image formats, see [`archetypes.EncodedImage`](https://rerun
## Variants
+* Y_U_V12_LimitedRange
* NV12
* YUY2
+* Y8_FullRange
+* Y_U_V24_LimitedRange
+* Y_U_V24_FullRange
+* Y8_LimitedRange
+* Y_U_V12_FullRange
+* Y_U_V16_LimitedRange
+* Y_U_V16_FullRange
## API reference links
* 🌊 [C++ API docs for `PixelFormat`](https://ref.rerun.io/docs/cpp/stable/namespacererun_1_1datatypes.html)
diff --git a/rerun_cpp/src/rerun/datatypes/pixel_format.hpp b/rerun_cpp/src/rerun/datatypes/pixel_format.hpp
index 1cd791004b7b..37844ae73751 100644
--- a/rerun_cpp/src/rerun/datatypes/pixel_format.hpp
+++ b/rerun_cpp/src/rerun/datatypes/pixel_format.hpp
@@ -33,16 +33,85 @@ namespace rerun::datatypes {
/// For more compressed image formats, see `archetypes::EncodedImage`.
enum class PixelFormat : uint8_t {
- /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled format with 12 bits per pixel and 8 bits per channel.
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ Y_U_V12_LimitedRange = 20,
+
+ /// `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled form at with 12 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// First comes entire image in Y in one plane,
/// followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc.
NV12 = 26,
- /// `YUY2` (aka `YUYV` or `YUYV16`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ /// `YUY2` (aka `YUYV`, `YUYV16` or `NV21`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
///
/// The order of the channels is Y0, U0, Y1, V0, all in the same plane.
YUY2 = 27,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray". This is virtually identical to a 8bit luminance/grayscale (see `datatypes::ColorModel`).
+ ///
+ /// This uses entire range YUV, i.e. Y is expected to be within [0, 255].
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ Y8_FullRange = 30,
+
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ Y_U_V24_LimitedRange = 39,
+
+ /// `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes.
+ Y_U_V24_FullRange = 40,
+
+ /// Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+ ///
+ /// Also known as just "gray".
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235].
+ /// If not for this range limitation/remapping, this is almost identical to 8bit luminace/grayscale (see `datatypes::ColorModel`).
+ Y8_LimitedRange = 41,
+
+ /// `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the resolution of the Y plane.
+ Y_U_V12_FullRange = 44,
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ Y_U_V16_LimitedRange = 49,
+
+ /// `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+ ///
+ /// This uses full range YUV with all components ranging from 0 to 255
+ /// (as opposed to "limited range" YUV as used e.g. in NV12).
+ ///
+ /// First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ /// the horizontal resolution of the Y plane.
+ Y_U_V16_FullRange = 50,
};
} // namespace rerun::datatypes
diff --git a/rerun_cpp/src/rerun/image_utils.hpp b/rerun_cpp/src/rerun/image_utils.hpp
index 12be1ece918c..de78d531ebf0 100644
--- a/rerun_cpp/src/rerun/image_utils.hpp
+++ b/rerun_cpp/src/rerun/image_utils.hpp
@@ -152,10 +152,27 @@ namespace rerun {
) {
auto num_pixels = resolution.width * resolution.height;
switch (pixel_format) {
+ // 444 formats.
+ case datatypes::PixelFormat::Y_U_V24_FullRange:
+ case datatypes::PixelFormat::Y_U_V24_LimitedRange:
+ return num_pixels * 4;
+
+ // 422 formats.
+ case datatypes::PixelFormat::Y_U_V16_FullRange:
+ case datatypes::PixelFormat::Y_U_V16_LimitedRange:
case datatypes::PixelFormat::NV12:
- return 12 * num_pixels / 8;
- case datatypes::PixelFormat::YUY2:
return 16 * num_pixels / 8;
+
+ // 420 formats.
+ case datatypes::PixelFormat::Y_U_V12_FullRange:
+ case datatypes::PixelFormat::Y_U_V12_LimitedRange:
+ case datatypes::PixelFormat::YUY2:
+ return 12 * num_pixels / 8;
+
+ // Monochrome formats.
+ case datatypes::PixelFormat::Y8_LimitedRange:
+ case datatypes::PixelFormat::Y8_FullRange:
+ return num_pixels;
}
return 0;
}
diff --git a/rerun_py/rerun_sdk/rerun/datatypes/pixel_format.py b/rerun_py/rerun_sdk/rerun/datatypes/pixel_format.py
index 872f076901b5..5ea053f8bdae 100644
--- a/rerun_py/rerun_sdk/rerun/datatypes/pixel_format.py
+++ b/rerun_py/rerun_sdk/rerun/datatypes/pixel_format.py
@@ -35,9 +35,21 @@ class PixelFormat(Enum):
For more compressed image formats, see [`archetypes.EncodedImage`][rerun.archetypes.EncodedImage].
"""
+ Y_U_V12_LimitedRange = 20
+ """
+ `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+
+ First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ the resolution of the Y plane.
+ """
+
NV12 = 26
"""
- `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled format with 12 bits per pixel and 8 bits per channel.
+ `NV12` (aka `Y_UV12`) is a YUV 4:2:0 chroma downsampled form at with 12 bits per pixel and 8 bits per channel.
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
First comes entire image in Y in one plane,
followed by a plane with interleaved lines ordered as U0, V0, U1, V1, etc.
@@ -45,11 +57,84 @@ class PixelFormat(Enum):
YUY2 = 27
"""
- `YUY2` (aka `YUYV` or `YUYV16`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+ `YUY2` (aka `YUYV`, `YUYV16` or `NV21`), is a YUV 4:2:2 chroma downsampled format with 16 bits per pixel and 8 bits per channel.
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
The order of the channels is Y0, U0, Y1, V0, all in the same plane.
"""
+ Y8_FullRange = 30
+ """
+ Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+
+ Also known as just "gray". This is virtually identical to a 8bit luminance/grayscale (see [`datatypes.ColorModel`][rerun.datatypes.ColorModel]).
+
+ This uses entire range YUV, i.e. Y is expected to be within [0, 255].
+ (as opposed to "limited range" YUV as used e.g. in NV12).
+ """
+
+ Y_U_V24_LimitedRange = 39
+ """
+ `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+
+ First comes entire image in Y in one plane, followed by the U and V planes.
+ """
+
+ Y_U_V24_FullRange = 40
+ """
+ `Y_U_V24` is a YUV 4:4:4 fully planar YUV format without chroma downsampling, also known as `I444`.
+
+ This uses full range YUV with all components ranging from 0 to 255
+ (as opposed to "limited range" YUV as used e.g. in NV12).
+
+ First comes entire image in Y in one plane, followed by the U and V planes.
+ """
+
+ Y8_LimitedRange = 41
+ """
+ Monochrome Y plane only, essentially a YUV 4:0:0 planar format.
+
+ Also known as just "gray".
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235].
+ If not for this range limitation/remapping, this is almost identical to 8bit luminace/grayscale (see [`datatypes.ColorModel`][rerun.datatypes.ColorModel]).
+ """
+
+ Y_U_V12_FullRange = 44
+ """
+ `Y_U_V12` is a YUV 4:2:0 fully planar YUV format without chroma downsampling, also known as `I420`.
+
+ This uses full range YUV with all components ranging from 0 to 255
+ (as opposed to "limited range" YUV as used e.g. in NV12).
+
+ First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ the resolution of the Y plane.
+ """
+
+ Y_U_V16_LimitedRange = 49
+ """
+ `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+
+ This uses limited range YUV, i.e. Y is expected to be within [16, 235] and U/V within [16, 240].
+
+ First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ the horizontal resolution of the Y plane.
+ """
+
+ Y_U_V16_FullRange = 50
+ """
+ `Y_U_V16` is a YUV 4:2:2 fully planar YUV format without chroma downsampling, also known as `I422`.
+
+ This uses full range YUV with all components ranging from 0 to 255
+ (as opposed to "limited range" YUV as used e.g. in NV12).
+
+ First comes entire image in Y in one plane, followed by the U and V planes, which each only have half
+ the horizontal resolution of the Y plane.
+ """
+
@classmethod
def auto(cls, val: str | int | PixelFormat) -> PixelFormat:
"""Best-effort converter, including a case-insensitive string matcher."""
@@ -71,7 +156,32 @@ def __str__(self) -> str:
return self.name
-PixelFormatLike = Union[PixelFormat, Literal["NV12", "YUY2", "nv12", "yuy2"], int]
+PixelFormatLike = Union[
+ PixelFormat,
+ Literal[
+ "NV12",
+ "Y8_FullRange",
+ "Y8_LimitedRange",
+ "YUY2",
+ "Y_U_V12_FullRange",
+ "Y_U_V12_LimitedRange",
+ "Y_U_V16_FullRange",
+ "Y_U_V16_LimitedRange",
+ "Y_U_V24_FullRange",
+ "Y_U_V24_LimitedRange",
+ "nv12",
+ "y8_fullrange",
+ "y8_limitedrange",
+ "y_u_v12_fullrange",
+ "y_u_v12_limitedrange",
+ "y_u_v16_fullrange",
+ "y_u_v16_limitedrange",
+ "y_u_v24_fullrange",
+ "y_u_v24_limitedrange",
+ "yuy2",
+ ],
+ int,
+]
PixelFormatArrayLike = Union[PixelFormatLike, Sequence[PixelFormatLike]]
diff --git a/tests/python/chroma_downsample_image/main.py b/tests/python/chroma_downsample_image/main.py
deleted file mode 100755
index 427cf1bff119..000000000000
--- a/tests/python/chroma_downsample_image/main.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python3
-"""Testing NV12 image encoding."""
-
-from __future__ import annotations
-
-import argparse
-import os
-from typing import Any
-
-import cv2
-import numpy as np
-import rerun as rr
-import rerun.blueprint as rrb
-
-
-def bgra2nv12(bgra: Any) -> np.ndarray:
- yuv = cv2.cvtColor(bgra, cv2.COLOR_BGRA2YUV_I420)
- uv_row_cnt = yuv.shape[0] // 3
- uv_plane = np.transpose(yuv[uv_row_cnt * 2 :].reshape(2, -1), [1, 0])
- yuv[uv_row_cnt * 2 :] = uv_plane.reshape(uv_row_cnt, -1)
- return yuv
-
-
-def bgra2yuy2(bgra: Any) -> np.ndarray:
- yuv = cv2.cvtColor(bgra, cv2.COLOR_BGRA2YUV_YUY2)
- (y, uv) = cv2.split(yuv)
-
- yuy2 = np.empty((y.shape[0], y.shape[1] * 2), dtype=y.dtype)
- yuy2[:, 0::2] = y
- yuy2[:, 1::4] = uv[:, ::2]
- yuy2[:, 3::4] = uv[:, 1::2]
-
- return yuy2
-
-
-def main() -> None:
- parser = argparse.ArgumentParser(description="Displaying chroma downsampled images.")
- rr.script_add_args(parser)
- args = parser.parse_args()
-
- rr.script_setup(
- args,
- "rerun_example_chroma_downsampled",
- default_blueprint=rrb.Grid(
- rrb.Spatial2DView(origin="img_reference", name="Reference RGB"),
- rrb.Spatial2DView(origin="img_nv12", name="NV12"),
- rrb.Spatial2DView(origin="img_yuy2", name="YUY2"),
- rrb.TextDocumentView(origin="expectation", name="expectation"),
- ),
- )
-
- # Make sure you use a colorful image!
- dir_path = os.path.dirname(os.path.realpath(__file__))
- img_path = f"{dir_path}/../../../crates/viewer/re_ui/data/logo_dark_mode.png"
- img_bgra = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
-
- img_rgb = cv2.cvtColor(img_bgra, cv2.COLOR_BGRA2RGB)
- rr.log("img_reference", rr.Image(img_rgb, "rgb"))
-
- rr.log(
- "img_nv12",
- rr.Image(
- width=img_bgra.shape[1],
- height=img_bgra.shape[0],
- pixel_format=rr.PixelFormat.NV12,
- bytes=bgra2nv12(img_bgra).tobytes(),
- ),
- )
- rr.log(
- "img_yuy2",
- rr.Image(
- width=img_bgra.shape[1],
- height=img_bgra.shape[0],
- pixel_format=rr.PixelFormat.YUY2,
- bytes=bgra2yuy2(img_bgra).tobytes(),
- ),
- )
-
- rr.log("expectation", rr.TextDocument("The images should look the same, except for some chroma artifacts."))
-
- rr.script_teardown(args)
-
-
-if __name__ == "__main__":
- main()
diff --git a/tests/python/chroma_downsample_image/requirements.txt b/tests/python/chroma_downsample_image/requirements.txt
deleted file mode 100644
index 4364766d7369..000000000000
--- a/tests/python/chroma_downsample_image/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy
-opencv-python
-rerun-sdk
diff --git a/tests/python/release_checklist/check_chroma_subsampling.py b/tests/python/release_checklist/check_chroma_subsampling.py
new file mode 100644
index 000000000000..4cfda9ad5d6c
--- /dev/null
+++ b/tests/python/release_checklist/check_chroma_subsampling.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+"""Testing NV12 image encoding."""
+
+from __future__ import annotations
+
+import os
+from argparse import Namespace
+from typing import Any
+from uuid import uuid4
+
+import cv2
+import numpy as np
+import rerun as rr
+import rerun.blueprint as rrb
+
+README = """\
+# Chroma subsampling
+
+All images should look roughly the same except for some chroma artifacts
+and slight color differences due to different color primaries.
+
+Naturally, Y8 formats are greyscale.
+"""
+
+
+def bgra2y_u_v24(bgra: Any, full_range: bool) -> np.ndarray:
+ if full_range:
+ yvu = cv2.cvtColor(bgra, cv2.COLOR_BGR2YCrCb)
+ y, v, u = cv2.split(yvu)
+ else:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGR2YUV)
+ y, u, v = cv2.split(yuv)
+ y = np.array(y).flatten()
+ u = np.array(u).flatten()
+ v = np.array(v).flatten()
+ yuv24 = np.concatenate((y, u, v))
+ return yuv24.astype(np.uint8)
+
+
+def bgra2y_u_v16(bgra: Any, full_range: bool) -> np.ndarray:
+ if full_range:
+ yvu = cv2.cvtColor(bgra, cv2.COLOR_BGR2YCrCb)
+ y, v, u = cv2.split(yvu)
+ else:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGR2YUV)
+ y, u, v = cv2.split(yuv)
+ y = np.array(y).flatten()
+ u = np.array(cv2.resize(u, (u.shape[1] // 2, u.shape[0]))).flatten()
+ v = np.array(cv2.resize(v, (v.shape[1] // 2, v.shape[0]))).flatten()
+ yuv16 = np.concatenate((y, u, v))
+ return yuv16.astype(np.uint8)
+
+
+def bgra2y_u_v12(bgra: Any, full_range: bool) -> np.ndarray:
+ if full_range:
+ yvu = cv2.cvtColor(bgra, cv2.COLOR_BGR2YCrCb)
+ y, v, u = cv2.split(yvu)
+ else:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGR2YUV)
+ y, u, v = cv2.split(yuv)
+ y = np.array(y).flatten()
+ u = np.array(cv2.resize(u, (u.shape[1] // 2, u.shape[0] // 2))).flatten()
+ v = np.array(cv2.resize(v, (v.shape[1] // 2, v.shape[0] // 2))).flatten()
+ yuv12 = np.concatenate((y, u, v))
+ return yuv12.astype(np.uint8)
+
+
+def bgra2y8(bgra: Any, full_range: bool) -> np.ndarray:
+ if full_range:
+ yvu = cv2.cvtColor(bgra, cv2.COLOR_BGR2YCrCb)
+ y, _v, _u = cv2.split(yvu)
+ else:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGR2YUV)
+ y, _u, _v = cv2.split(yuv)
+ return y.astype(np.uint8)
+
+
+def bgra2nv12(bgra: Any) -> np.ndarray:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGRA2YUV_I420)
+ uv_row_cnt = yuv.shape[0] // 3
+ uv_plane = np.transpose(yuv[uv_row_cnt * 2 :].reshape(2, -1), [1, 0])
+ yuv[uv_row_cnt * 2 :] = uv_plane.reshape(uv_row_cnt, -1)
+ return yuv
+
+
+def bgra2yuy2(bgra: Any) -> np.ndarray:
+ yuv = cv2.cvtColor(bgra, cv2.COLOR_BGRA2YUV_YUY2)
+ (y, uv) = cv2.split(yuv)
+
+ yuy2 = np.empty((y.shape[0], y.shape[1] * 2), dtype=y.dtype)
+ yuy2[:, 0::2] = y
+ yuy2[:, 1::4] = uv[:, ::2]
+ yuy2[:, 3::4] = uv[:, 1::2]
+
+ return yuy2
+
+
+def log_readme() -> None:
+ rr.log("readme", rr.TextDocument(README, media_type=rr.MediaType.MARKDOWN), timeless=True)
+
+
+def blueprint() -> rrb.BlueprintLike:
+ return rrb.Grid(
+ rrb.TextDocumentView(origin="readme"),
+ rrb.Spatial2DView(origin="img_reference", name="Reference RGB"),
+ rrb.Spatial2DView(origin="img_V_U_V24_limited_range", name="Y_U_V24_limited_range"),
+ rrb.Spatial2DView(origin="img_V_U_V24_full_range", name="Y_U_V24_full_range"),
+ rrb.Spatial2DView(origin="img_V_U_V16_limited_range", name="Y_U_V16_limited_range"),
+ rrb.Spatial2DView(origin="img_V_U_V16_full_range", name="Y_U_V16_full_range"),
+ rrb.Spatial2DView(origin="img_V_U_V12_limited_range", name="Y_U_V12_limited_range"),
+ rrb.Spatial2DView(origin="img_V_U_V12_full_range", name="Y_U_V12_full_range"),
+ rrb.Spatial2DView(origin="img_y8_limited_range", name="Y8_limited_range"),
+ rrb.Spatial2DView(origin="img_y8_full_range", name="Y8_full_range"),
+ rrb.Spatial2DView(origin="img_nv12", name="NV12"),
+ rrb.Spatial2DView(origin="img_yuy2", name="YUY2"),
+ )
+
+
+def log_data() -> None:
+ # Make sure you use a colorful image!
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ img_path = f"{dir_path}/../../../crates/viewer/re_ui/data/logo_dark_mode.png"
+ img_bgra = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
+
+ img_rgb = cv2.cvtColor(img_bgra, cv2.COLOR_BGRA2RGB)
+ rr.log("img_reference", rr.Image(img_rgb, "rgb"))
+
+ rr.log(
+ "img_V_U_V24_limited_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V24_LimitedRange,
+ bytes=bgra2y_u_v24(img_bgra, False).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_V_U_V16_limited_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V16_LimitedRange,
+ bytes=bgra2y_u_v16(img_bgra, False).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_V_U_V12_limited_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V12_LimitedRange,
+ bytes=bgra2y_u_v12(img_bgra, False).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_y8_limited_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y8_LimitedRange,
+ bytes=bgra2y8(img_bgra, False).tobytes(),
+ ),
+ )
+
+ rr.log(
+ "img_V_U_V24_full_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V24_FullRange,
+ bytes=bgra2y_u_v24(img_bgra, True).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_V_U_V16_full_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V16_FullRange,
+ bytes=bgra2y_u_v16(img_bgra, True).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_V_U_V12_full_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y_U_V12_FullRange,
+ bytes=bgra2y_u_v12(img_bgra, True).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_y8_full_range",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.Y8_FullRange,
+ bytes=bgra2y8(img_bgra, True).tobytes(),
+ ),
+ )
+
+ rr.log(
+ "img_nv12",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.NV12,
+ bytes=bgra2nv12(img_bgra).tobytes(),
+ ),
+ )
+ rr.log(
+ "img_yuy2",
+ rr.Image(
+ width=img_bgra.shape[1],
+ height=img_bgra.shape[0],
+ pixel_format=rr.PixelFormat.YUY2,
+ bytes=bgra2yuy2(img_bgra).tobytes(),
+ ),
+ )
+
+
+def run(args: Namespace) -> None:
+ rr.script_setup(args, f"{os.path.basename(__file__)}", recording_id=uuid4(), default_blueprint=blueprint())
+
+ log_readme()
+ log_data()
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description="Interactive release checklist")
+ rr.script_add_args(parser)
+ args = parser.parse_args()
+ run(args)