diff --git a/Cargo.lock b/Cargo.lock index f9703f346a2d..2ad62e163514 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4310,7 +4310,6 @@ dependencies = [ "re_query", "re_query2", "re_query_cache", - "re_query_cache2", "re_smart_channel", "re_tracing", "re_types", @@ -4515,32 +4514,6 @@ dependencies = [ [[package]] name = "re_query_cache" version = "0.16.0-alpha.2" -dependencies = [ - "ahash", - "criterion", - "indent", - "itertools 0.12.0", - "mimalloc", - "parking_lot", - "paste", - "rand", - "re_data_store", - "re_format", - "re_log", - "re_log_types", - "re_query", - "re_tracing", - "re_types", - "re_types_core", - "seq-macro", - "similar-asserts", - "static_assertions", - "web-time", -] - -[[package]] -name = "re_query_cache2" -version = "0.16.0-alpha.2" dependencies = [ "ahash", "anyhow", @@ -4780,7 +4753,6 @@ dependencies = [ "re_log_types", "re_query", "re_query_cache", - "re_query_cache2", "re_renderer", "re_space_view", "re_tracing", @@ -4849,7 +4821,6 @@ dependencies = [ "re_log", "re_log_types", "re_query_cache", - "re_query_cache2", "re_renderer", "re_tracing", "re_types", @@ -4871,7 +4842,6 @@ dependencies = [ "re_log_types", "re_query", "re_query_cache", - "re_query_cache2", "re_renderer", "re_space_view", "re_tracing", @@ -5088,7 +5058,6 @@ dependencies = [ "re_log_types", "re_memory", "re_query_cache", - "re_query_cache2", "re_renderer", "re_smart_channel", "re_space_view", @@ -5150,7 +5119,6 @@ dependencies = [ "re_query", "re_query2", "re_query_cache", - "re_query_cache2", "re_renderer", "re_smart_channel", "re_string_interner", diff --git a/Cargo.toml b/Cargo.toml index 5df25ff9ff7d..d391fc4bcab3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,6 @@ re_log_types = { path = "crates/re_log_types", version = "=0.16.0-alpha.2", defa re_memory = { path = "crates/re_memory", version = "=0.16.0-alpha.2", default-features = false } re_query = { path = "crates/re_query", version = "=0.16.0-alpha.2", default-features = false } re_query_cache = { path = "crates/re_query_cache", version = "=0.16.0-alpha.2", default-features = false } -re_query_cache2 = { path = "crates/re_query_cache2", version = "=0.16.0-alpha.2", default-features = false } re_query2 = { path = "crates/re_query2", version = "=0.16.0-alpha.2", default-features = false } re_renderer = { path = "crates/re_renderer", version = "=0.16.0-alpha.2", default-features = false } re_sdk = { path = "crates/re_sdk", version = "=0.16.0-alpha.2", default-features = false } diff --git a/crates/re_data_ui/src/component.rs b/crates/re_data_ui/src/component.rs index e3961b0a1789..6c6e9a2fa4c6 100644 --- a/crates/re_data_ui/src/component.rs +++ b/crates/re_data_ui/src/component.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use egui::NumExt; use re_entity_db::{ - external::re_query_cache2::CachedLatestAtComponentResults, EntityPath, InstancePath, + external::re_query_cache::CachedLatestAtComponentResults, EntityPath, InstancePath, }; use re_types::ComponentName; use re_ui::SyntaxHighlighting as _; diff --git a/crates/re_data_ui/src/component_path.rs b/crates/re_data_ui/src/component_path.rs index 35af64c88430..79311a6182f2 100644 --- a/crates/re_data_ui/src/component_path.rs +++ b/crates/re_data_ui/src/component_path.rs @@ -25,7 +25,7 @@ impl DataUi for ComponentPath { )); } else { let results = - db.query_caches2() + db.query_caches() .latest_at(db.store(), query, entity_path, [*component_name]); if let Some(results) = results.components.get(component_name) { crate::EntityLatestAtResults { diff --git a/crates/re_data_ui/src/component_ui_registry.rs b/crates/re_data_ui/src/component_ui_registry.rs index 08b03de31dcf..98e4afbf148d 100644 --- a/crates/re_data_ui/src/component_ui_registry.rs +++ b/crates/re_data_ui/src/component_ui_registry.rs @@ -1,5 +1,5 @@ use re_data_store::LatestAtQuery; -use re_entity_db::{external::re_query_cache2::CachedLatestAtComponentResults, EntityDb}; +use re_entity_db::{external::re_query_cache::CachedLatestAtComponentResults, EntityDb}; use re_log_types::{external::arrow2, EntityPath}; use re_types::external::arrow2::array::Utf8Array; use re_viewer_context::{ComponentUiRegistry, UiVerbosity, ViewerContext}; diff --git a/crates/re_data_ui/src/editors.rs b/crates/re_data_ui/src/editors.rs index 425cbc28eeca..61b96d4eade1 100644 --- a/crates/re_data_ui/src/editors.rs +++ b/crates/re_data_ui/src/editors.rs @@ -2,7 +2,7 @@ use egui::NumExt as _; use re_data_store::LatestAtQuery; -use re_entity_db::{external::re_query_cache2::CachedLatestAtComponentResults, EntityDb}; +use re_entity_db::{external::re_query_cache::CachedLatestAtComponentResults, EntityDb}; use re_log_types::EntityPath; use re_types::{ components::{ diff --git a/crates/re_data_ui/src/instance_path.rs b/crates/re_data_ui/src/instance_path.rs index a3369bc5a2b8..87155fa7ae46 100644 --- a/crates/re_data_ui/src/instance_path.rs +++ b/crates/re_data_ui/src/instance_path.rs @@ -76,7 +76,7 @@ impl DataUi for InstancePath { .num_columns(2) .show(ui, |ui| { for component_name in normal_components { - let results = db.query_caches2().latest_at( + let results = db.query_caches().latest_at( db.store(), query, entity_path, diff --git a/crates/re_entity_db/Cargo.toml b/crates/re_entity_db/Cargo.toml index 0b4bb51b6ad5..38e646d205a1 100644 --- a/crates/re_entity_db/Cargo.toml +++ b/crates/re_entity_db/Cargo.toml @@ -35,8 +35,7 @@ re_log_encoding = { workspace = true, features = ["decoder"] } re_log_types.workspace = true re_query.workspace = true re_query2.workspace = true -re_query_cache.workspace = true -re_query_cache2 = { workspace = true, features = ["to_archetype"] } +re_query_cache = { workspace = true, features = ["to_archetype"] } re_smart_channel.workspace = true re_tracing.workspace = true re_types_core.workspace = true diff --git a/crates/re_entity_db/src/entity_db.rs b/crates/re_entity_db/src/entity_db.rs index 68de41faeb7a..3027be02373a 100644 --- a/crates/re_entity_db/src/entity_db.rs +++ b/crates/re_entity_db/src/entity_db.rs @@ -116,14 +116,11 @@ pub struct EntityDb { /// Stores all components for all entities for all timelines. data_store: DataStore, - /// Query caches for the data in [`Self::data_store`]. - query_caches: re_query_cache::Caches, - /// The active promise resolver for this DB. resolver: re_query2::PromiseResolver, /// Query caches for the data in [`Self::data_store`]. - query_caches2: re_query_cache2::Caches, + query_caches: re_query_cache::Caches, stats: IngestionStatistics, } @@ -136,7 +133,6 @@ impl EntityDb { DataStoreConfig::default(), ); let query_caches = re_query_cache::Caches::new(&data_store); - let query_caches2 = re_query_cache2::Caches::new(&data_store); Self { data_source: None, set_store_info: None, @@ -146,9 +142,8 @@ impl EntityDb { times_per_timeline: Default::default(), tree: crate::EntityTree::root(), data_store, - query_caches, resolver: re_query2::PromiseResolver::default(), - query_caches2, + query_caches, stats: IngestionStatistics::new(store_id), } } @@ -201,11 +196,6 @@ impl EntityDb { &self.query_caches } - #[inline] - pub fn query_caches2(&self) -> &re_query_cache2::Caches { - &self.query_caches2 - } - #[inline] pub fn resolver(&self) -> &re_query2::PromiseResolver { &self.resolver @@ -219,21 +209,21 @@ impl EntityDb { query: &re_data_store::LatestAtQuery, ) -> PromiseResult> where - re_query_cache2::CachedLatestAtResults: re_query_cache2::ToArchetype, + re_query_cache::CachedLatestAtResults: re_query_cache::ToArchetype, { - let results = self.query_caches2().latest_at( + let results = self.query_caches().latest_at( self.store(), query, entity_path, A::all_components().iter().copied(), // no generics! ); - use re_query_cache2::ToArchetype as _; + use re_query_cache::ToArchetype as _; match results.to_archetype(self.resolver()).flatten() { PromiseResult::Pending => PromiseResult::Pending, PromiseResult::Error(err) => { - if let Some(err) = err.downcast_ref::() { - if matches!(err, re_query_cache2::QueryError::PrimaryNotFound(_)) { + if let Some(err) = err.downcast_ref::() { + if matches!(err, re_query_cache::QueryError::PrimaryNotFound(_)) { return PromiseResult::Ready(None); } } @@ -248,8 +238,8 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_data_store::LatestAtQuery, - ) -> Option> { - self.query_caches2().latest_at_component::( + ) -> Option> { + self.query_caches().latest_at_component::( self.store(), self.resolver(), entity_path, @@ -262,8 +252,8 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_data_store::LatestAtQuery, - ) -> Option> { - self.query_caches2().latest_at_component_quiet::( + ) -> Option> { + self.query_caches().latest_at_component_quiet::( self.store(), self.resolver(), entity_path, @@ -276,8 +266,8 @@ impl EntityDb { &self, entity_path: &EntityPath, query: &re_data_store::LatestAtQuery, - ) -> Option<(EntityPath, re_query_cache2::CachedLatestAtMonoResult)> { - self.query_caches2() + ) -> Option<(EntityPath, re_query_cache::CachedLatestAtMonoResult)> { + self.query_caches() .latest_at_component_at_closest_ancestor::( self.store(), self.resolver(), @@ -477,7 +467,6 @@ impl EntityDb { let original_store_events = &[store_event]; self.times_per_timeline.on_events(original_store_events); self.query_caches.on_events(original_store_events); - self.query_caches2.on_events(original_store_events); let clear_cascade = self.tree.on_store_additions(original_store_events); // Second-pass: update the [`DataStore`] by applying the [`ClearCascade`]. @@ -487,7 +476,6 @@ impl EntityDb { let new_store_events = self.on_clear_cascade(clear_cascade); self.times_per_timeline.on_events(&new_store_events); self.query_caches.on_events(&new_store_events); - self.query_caches2.on_events(&new_store_events); let clear_cascade = self.tree.on_store_additions(&new_store_events); // Clears don't affect `Clear` components themselves, therefore we cannot have recursive @@ -643,15 +631,13 @@ impl EntityDb { times_per_timeline, tree, data_store: _, - query_caches, resolver: _, - query_caches2, + query_caches, stats: _, } = self; times_per_timeline.on_events(store_events); query_caches.on_events(store_events); - query_caches2.on_events(store_events); let store_events = store_events.iter().collect_vec(); let compacted = CompactedStoreEvents::new(&store_events); diff --git a/crates/re_entity_db/src/lib.rs b/crates/re_entity_db/src/lib.rs index 78a87c87a7d2..92389cc07aeb 100644 --- a/crates/re_entity_db/src/lib.rs +++ b/crates/re_entity_db/src/lib.rs @@ -44,7 +44,7 @@ pub use editable_auto_value::EditableAutoValue; pub mod external { pub use re_data_store; pub use re_query2; - pub use re_query_cache2; + pub use re_query_cache; } // ---------------------------------------------------------------------------- diff --git a/crates/re_query_cache/Cargo.toml b/crates/re_query_cache/Cargo.toml index be8c2adf62af..71a9dfbe35ef 100644 --- a/crates/re_query_cache/Cargo.toml +++ b/crates/re_query_cache/Cargo.toml @@ -20,26 +20,37 @@ all-features = true [features] -default = [] +default = ["to_archetype"] + +## Implements `ToArchetype` for all builtin archetypes on `CachedLatestAtResults`. +to_archetype = ["dep:re_types", "dep:re_types_blueprint"] [dependencies] # Rerun dependencies: re_data_store.workspace = true +re_error.workspace = true re_format.workspace = true re_log.workspace = true re_log_types.workspace = true -re_query.workspace = true +re_query2.workspace = true re_tracing.workspace = true re_types_core.workspace = true +# Rerun dependencies (optional): +re_types = { workspace = true, optional = true } +re_types_blueprint = { workspace = true, optional = true } + # External dependencies: ahash.workspace = true +anyhow.workspace = true +backtrace.workspace = true indent.workspace = true +indexmap.workspace = true itertools.workspace = true +nohash-hasher.workspace = true parking_lot.workspace = true paste.workspace = true seq-macro.workspace = true -static_assertions.workspace = true web-time.workspace = true @@ -56,11 +67,15 @@ similar-asserts.workspace = true bench = false +[[example]] +name = "latest_at_archetype" +required-features = ["to_archetype"] + + [[bench]] name = "flat_vec_deque" harness = false - [[bench]] name = "latest_at" harness = false diff --git a/crates/re_query_cache/benches/latest_at.rs b/crates/re_query_cache/benches/latest_at.rs index e74a7e0a20df..599c73726ef9 100644 --- a/crates/re_query_cache/benches/latest_at.rs +++ b/crates/re_query_cache/benches/latest_at.rs @@ -6,10 +6,12 @@ use criterion::{criterion_group, criterion_main, Criterion}; use itertools::Itertools; use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; use re_log_types::{entity_path, DataRow, EntityPath, RowId, TimeInt, TimeType, Timeline}; -use re_query_cache::Caches; +use re_query2::{clamped_zip_1x1, PromiseResolver}; +use re_query_cache::{CachedLatestAtResults, Caches}; use re_types::{ archetypes::Points2D, components::{Color, InstanceKey, Position2D, Text}, + Archetype as _, }; use re_types_core::Loggable as _; @@ -278,33 +280,47 @@ fn query_and_visit_points( store: &DataStore, paths: &[EntityPath], ) -> Vec { + let resolver = PromiseResolver::default(); + let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_POINTS as i64 / 2); - let mut points = Vec::with_capacity(NUM_POINTS as _); + let mut ret = Vec::with_capacity(NUM_POINTS as _); // TODO(jleibs): Add Radius once we have support for it in field_types - for path in paths { - caches - .query_archetype_pov1_comp1::( - store, - &query.clone().into(), - path, - |(_, _, positions, colors)| { - itertools::izip!(positions.iter(), colors.unwrap().iter()).for_each( - |(pos, color)| { - points.push(SavePoint { - _pos: *pos, - _color: *color, - }); - }, - ); - }, - ) - .unwrap(); + for entity_path in paths { + let results: CachedLatestAtResults = caches.latest_at( + store, + &query, + entity_path, + Points2D::all_components().iter().copied(), // no generics! + ); + + let points = results.get_required(Position2D::name()).unwrap(); + let colors = results.get_or_empty(Color::name()); + + let points = points + .iter_dense::(&resolver) + .flatten() + .unwrap() + .copied(); + + let colors = colors + .iter_dense::(&resolver) + .flatten() + .unwrap() + .copied(); + let color_default_fn = || Color::from(0xFF00FFFF); + + for (point, color) in clamped_zip_1x1(points, colors, color_default_fn) { + ret.push(SavePoint { + _pos: point, + _color: Some(color), + }); + } } - assert_eq!(NUM_POINTS as usize, points.len()); - points + assert_eq!(NUM_POINTS as usize, ret.len()); + ret } struct SaveString { @@ -316,28 +332,43 @@ fn query_and_visit_strings( store: &DataStore, paths: &[EntityPath], ) -> Vec { + let resolver = PromiseResolver::default(); + let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_STRINGS as i64 / 2); let mut strings = Vec::with_capacity(NUM_STRINGS as _); - for path in paths { - caches - .query_archetype_pov1_comp1::( - store, - &query.clone().into(), - path, - |(_, _, _, labels)| { - for label in labels.unwrap() { - strings.push(SaveString { - _label: label.clone(), - }); - } - }, - ) - .unwrap(); + for entity_path in paths { + let results: CachedLatestAtResults = caches.latest_at( + store, + &query, + entity_path, + Points2D::all_components().iter().copied(), // no generics! + ); + + let points = results.get_required(Position2D::name()).unwrap(); + let colors = results.get_or_empty(Text::name()); + + let points = points + .iter_dense::(&resolver) + .flatten() + .unwrap() + .copied(); + + let labels = colors + .iter_dense::(&resolver) + .flatten() + .unwrap() + .cloned(); + let label_default_fn = || Text(String::new().into()); + + for (_point, label) in clamped_zip_1x1(points, labels, label_default_fn) { + strings.push(SaveString { + _label: Some(label), + }); + } } assert_eq!(NUM_STRINGS as usize, strings.len()); - criterion::black_box(strings) } diff --git a/crates/re_query_cache2/examples/latest_at.rs b/crates/re_query_cache/examples/latest_at.rs similarity index 98% rename from crates/re_query_cache2/examples/latest_at.rs rename to crates/re_query_cache/examples/latest_at.rs index c283aac83f0f..e894e10b63a1 100644 --- a/crates/re_query_cache2/examples/latest_at.rs +++ b/crates/re_query_cache/examples/latest_at.rs @@ -4,7 +4,7 @@ use re_log_types::example_components::{MyColor, MyLabel, MyPoint, MyPoints}; use re_log_types::{build_frame_nr, DataRow, RowId, TimeType, Timeline}; use re_types_core::{Archetype as _, Loggable as _}; -use re_query_cache2::{ +use re_query_cache::{ clamped_zip_1x2, CachedLatestAtComponentResults, CachedLatestAtResults, PromiseResolver, PromiseResult, }; @@ -22,7 +22,7 @@ fn main() -> anyhow::Result<()> { let query = LatestAtQuery::latest(timeline); eprintln!("query:{query:?}"); - let caches = re_query_cache2::Caches::new(&store); + let caches = re_query_cache::Caches::new(&store); // First, get the results for this query. // diff --git a/crates/re_query_cache2/examples/latest_at_archetype.rs b/crates/re_query_cache/examples/latest_at_archetype.rs similarity index 94% rename from crates/re_query_cache2/examples/latest_at_archetype.rs rename to crates/re_query_cache/examples/latest_at_archetype.rs index 03931bdeb081..94caab585ce2 100644 --- a/crates/re_query_cache2/examples/latest_at_archetype.rs +++ b/crates/re_query_cache/examples/latest_at_archetype.rs @@ -7,7 +7,7 @@ use re_types::{ }; use re_types_core::{Archetype as _, Loggable as _}; -use re_query_cache2::{clamped_zip_1x2, CachedLatestAtResults, PromiseResolver, PromiseResult}; +use re_query_cache::{clamped_zip_1x2, CachedLatestAtResults, PromiseResolver, PromiseResult}; // --- @@ -22,7 +22,7 @@ fn main() -> anyhow::Result<()> { let query = LatestAtQuery::latest(timeline); eprintln!("query:{query:?}"); - let caches = re_query_cache2::Caches::new(&store); + let caches = re_query_cache::Caches::new(&store); // First, get the results for this query. // @@ -37,7 +37,7 @@ fn main() -> anyhow::Result<()> { // Then make use of the `ToArchetype` helper trait in order to query, resolve, deserialize and // cache an entire archetype all at once. - use re_query_cache2::ToArchetype as _; + use re_query_cache::ToArchetype as _; let arch: Points2D = match results.to_archetype(&resolver).flatten() { PromiseResult::Pending => { diff --git a/crates/re_query_cache2/examples/range.rs b/crates/re_query_cache/examples/range.rs similarity index 98% rename from crates/re_query_cache2/examples/range.rs rename to crates/re_query_cache/examples/range.rs index 45d46093beb0..e9bef34822eb 100644 --- a/crates/re_query_cache2/examples/range.rs +++ b/crates/re_query_cache/examples/range.rs @@ -4,7 +4,7 @@ use re_log_types::example_components::{MyColor, MyLabel, MyPoint, MyPoints}; use re_log_types::{build_frame_nr, DataRow, RowId, TimeRange, TimeType, Timeline}; use re_types_core::{Archetype as _, Loggable as _}; -use re_query_cache2::{ +use re_query_cache::{ clamped_zip_1x2, range_zip_1x2, CachedRangeComponentResults, CachedRangeResults, PromiseResolver, PromiseResult, }; @@ -22,7 +22,7 @@ fn main() -> anyhow::Result<()> { let query = RangeQuery::new(timeline, TimeRange::EVERYTHING); eprintln!("query:{query:?}"); - let caches = re_query_cache2::Caches::new(&store); + let caches = re_query_cache::Caches::new(&store); // First, get the raw results for this query. // diff --git a/crates/re_query_cache/src/cache.rs b/crates/re_query_cache/src/cache.rs index 6b03a1b2fe5e..6ac7ee9ee433 100644 --- a/crates/re_query_cache/src/cache.rs +++ b/crates/re_query_cache/src/cache.rs @@ -1,194 +1,123 @@ use std::{ - collections::{BTreeMap, VecDeque}, - ops::Range, + collections::{BTreeMap, BTreeSet}, sync::Arc, }; use ahash::{HashMap, HashSet}; -use itertools::Itertools; use parking_lot::RwLock; -use paste::paste; -use seq_macro::seq; -use re_data_store::{DataStore, LatestAtQuery, RangeQuery, StoreDiff, StoreEvent, StoreSubscriber}; -use re_log_types::{EntityPath, RowId, StoreId, TimeInt, TimeRange, Timeline}; -use re_query::ArchetypeView; -use re_types_core::{ - components::InstanceKey, Archetype, ArchetypeName, Component, ComponentName, SizeBytes as _, -}; +use re_data_store::{DataStore, StoreDiff, StoreEvent, StoreSubscriber, TimeInt}; +use re_log_types::{EntityPath, StoreId, TimeRange, Timeline}; +use re_types_core::ComponentName; -use crate::{ErasedFlatVecDeque, FlatVecDeque, LatestAtCache, RangeCache}; +use crate::{LatestAtCache, RangeCache}; // --- -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum AnyQuery { - LatestAt(LatestAtQuery), - Range(RangeQuery), +/// Uniquely identifies cached query results in the [`Caches`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct CacheKey { + pub entity_path: EntityPath, + pub timeline: Timeline, + pub component_name: ComponentName, } -impl From for AnyQuery { +impl re_types_core::SizeBytes for CacheKey { #[inline] - fn from(query: LatestAtQuery) -> Self { - Self::LatestAt(query) + fn heap_size_bytes(&self) -> u64 { + let Self { + entity_path, + timeline, + component_name, + } = self; + entity_path.heap_size_bytes() + + timeline.heap_size_bytes() + + component_name.heap_size_bytes() } } -impl From for AnyQuery { +impl std::fmt::Debug for CacheKey { #[inline] - fn from(query: RangeQuery) -> Self { - Self::Range(query) - } -} - -// --- - -/// Maintains the top-level cache mappings. -pub struct Caches { - /// The [`StoreId`] of the associated [`DataStore`]. - store_id: StoreId, - - // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. - per_cache_key: RwLock>>>, -} - -impl std::fmt::Debug for Caches { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { - store_id, - per_cache_key, + entity_path, + timeline, + component_name, } = self; - - let mut strings = Vec::new(); - - strings.push(format!("[Caches({store_id})]")); - - let per_cache_key = per_cache_key.read(); - let per_cache_key: BTreeMap<_, _> = per_cache_key.iter().collect(); - - for (cache_key, caches_per_archetype) in &per_cache_key { - let caches_per_archetype = caches_per_archetype.read(); - strings.push(format!( - " [{cache_key:?} (pending_temporal={:?} pending_timeless={:?})]", - caches_per_archetype - .pending_temporal_invalidation - .map(|t| cache_key - .timeline - .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), - caches_per_archetype.pending_static_invalidation, - )); - strings.push(indent::indent_all_by( - 4, - format!("{caches_per_archetype:?}"), - )); - } - - f.write_str(&strings.join("\n").replace("\n\n", "\n")) + f.write_fmt(format_args!( + "{entity_path}:{component_name} on {}", + timeline.name() + )) } } -impl std::ops::Deref for Caches { - type Target = RwLock>>>; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.per_cache_key - } -} - -impl Caches { +impl CacheKey { #[inline] - pub fn new(store: &DataStore) -> Self { + pub fn new( + entity_path: impl Into, + timeline: impl Into, + component_name: impl Into, + ) -> Self { Self { - store_id: store.id().clone(), - per_cache_key: Default::default(), + entity_path: entity_path.into(), + timeline: timeline.into(), + component_name: component_name.into(), } } } -#[derive(Default)] -pub struct CachesPerArchetype { - /// Which [`Archetype`] are we querying for? - /// - /// This is very important because of our data model: we not only query for components, but we - /// query for components from a specific point-of-view (the so-called primary component). - /// Different archetypes have different point-of-views, and therefore can end up with different - /// results, even from the same raw data. - // - // NOTE: `Arc` so we can cheaply free the archetype-level lock early when needed. - // - // TODO(cmc): At some point we should probably just store the PoV and optional components rather - // than an `ArchetypeName`: the query system doesn't care about archetypes. - pub(crate) latest_at_per_archetype: RwLock>>>, - - /// Which [`Archetype`] are we querying for? - /// - /// This is very important because of our data model: we not only query for components, but we - /// query for components from a specific point-of-view (the so-called primary component). - /// Different archetypes have different point-of-views, and therefore can end up with different - /// results, even from the same raw data. - // - // NOTE: `Arc` so we can cheaply free the archetype-level lock early when needed. - // - // TODO(cmc): At some point we should probably just store the PoV and optional components rather - // than an `ArchetypeName`: the query system doesn't care about archetypes. - pub(crate) range_per_archetype: RwLock>>>, +pub struct Caches { + /// The [`StoreId`] of the associated [`DataStore`]. + pub(crate) store_id: StoreId, - /// Everything greater than or equal to this timestamp has been asynchronously invalidated. - /// - /// The next time this cache gets queried, it must remove any entry matching this criteria. - /// `None` indicates that there's no pending invalidation. - /// - /// Invalidation is deferred to query time because it is far more efficient that way: the frame - /// time effectively behaves as a natural micro-batching mechanism. - pending_temporal_invalidation: Option, + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. + pub(crate) latest_at_per_cache_key: RwLock>>>, - /// If `true`, the timeless data associated with this cache has been asynchronously invalidated. - /// - /// If `true`, this cache must remove all of its timeless entries the next time it gets queried. - /// `false` indicates that there's no pending invalidation. - /// - /// Invalidation is deferred to query time because it is far more efficient that way: the frame - /// time effectively behaves as a natural micro-batching mechanism. - pending_static_invalidation: bool, + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. + pub(crate) range_per_cache_key: RwLock>>>, } -impl std::fmt::Debug for CachesPerArchetype { +impl std::fmt::Debug for Caches { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let CachesPerArchetype { - latest_at_per_archetype, - range_per_archetype, - pending_temporal_invalidation: _, - pending_static_invalidation: _, + let Self { + store_id, + latest_at_per_cache_key, + range_per_cache_key, } = self; let mut strings = Vec::new(); + strings.push(format!("[LatestAt @ {store_id}]")); { - let latest_at_per_archetype = latest_at_per_archetype.read(); - let latest_at_per_archetype: BTreeMap<_, _> = latest_at_per_archetype.iter().collect(); + let latest_at_per_cache_key = latest_at_per_cache_key.read(); + let latest_at_per_cache_key: BTreeMap<_, _> = latest_at_per_cache_key.iter().collect(); - for (archetype_name, latest_at_cache) in &latest_at_per_archetype { - let latest_at_cache = latest_at_cache.read(); + for (cache_key, cache) in &latest_at_per_cache_key { + let cache = cache.read(); strings.push(format!( - "[latest_at for {archetype_name} ({})]", - re_format::format_bytes(latest_at_cache.total_size_bytes() as _) + " [{cache_key:?} (pending_invalidation_min={:?})]", + cache.pending_invalidations.first().map(|&t| cache_key + .timeline + .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), )); - strings.push(indent::indent_all_by(2, format!("{latest_at_cache:?}"))); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); } } + strings.push(format!("[Range @ {store_id}]")); { - let range_per_archetype = range_per_archetype.read(); - let range_per_archetype: BTreeMap<_, _> = range_per_archetype.iter().collect(); + let range_per_cache_key = range_per_cache_key.read(); + let range_per_cache_key: BTreeMap<_, _> = range_per_cache_key.iter().collect(); - for (archetype_name, range_cache) in &range_per_archetype { - let range_cache = range_cache.read(); + for (cache_key, cache) in &range_per_cache_key { + let cache = cache.read(); strings.push(format!( - "[range for {archetype_name} ({})]", - re_format::format_bytes(range_cache.total_size_bytes() as _) + " [{cache_key:?} (pending_invalidation_min={:?})]", + cache.pending_invalidation.map(|t| cache_key + .timeline + .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), )); - strings.push(indent::indent_all_by(2, format!("{range_cache:?}"))); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); } } @@ -197,236 +126,28 @@ impl std::fmt::Debug for CachesPerArchetype { } impl Caches { - /// Clears all caches. - #[inline] - pub fn clear(&self) { - self.write().clear(); - } - - /// Gives access to the appropriate `LatestAtCache` according to the specified - /// query parameters. - /// - /// `upsert` is a user-defined callback that will be run first, with full mutable access to the cache. - /// `iter` is a user-defined callback that will be run last, with shared access. - /// - /// These callback semantics allow for reentrancy: you can use the same cache from multiple - /// query contexts (i.e. space views), even in a work-stealing environment. - #[inline] - pub fn with_latest_at( - &self, - store: &DataStore, - entity_path: EntityPath, - query: &LatestAtQuery, - mut upsert: F1, - mut iter: F2, - ) -> (Option, R2) - where - A: Archetype, - F1: FnMut(&mut LatestAtCache) -> R1, - F2: FnMut(&LatestAtCache) -> R2, - { - assert!( - self.store_id == *store.id(), - "attempted to use a query cache {} with the wrong datastore ({})", - self.store_id, - store.id(), - ); - - let key = CacheKey::new(entity_path, query.timeline()); - - let cache = { - let caches_per_archetype = Arc::clone(self.write().entry(key.clone()).or_default()); - // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. - - let removed_bytes = caches_per_archetype.write().handle_pending_invalidation(); - // Implicitly releasing archetype-level cache mappings -- concurrent queries using the - // same `CacheKey` but a different `ArchetypeName` can run once again. - if removed_bytes > 0 { - re_log::trace!( - store_id=%self.store_id, - entity_path = %key.entity_path, - removed = removed_bytes, - "invalidated latest-at caches" - ); - } - - let caches_per_archetype = caches_per_archetype.read(); - let mut latest_at_per_archetype = caches_per_archetype.latest_at_per_archetype.write(); - Arc::clone(latest_at_per_archetype.entry(A::name()).or_default()) - // Implicitly releasing bottom-level cache mappings -- identical concurrent queries - // can run once again. - }; - - // # Multithreading semantics - // - // There is only one situation where this `try_write()` might fail: there is another task that - // is already in the process of upserting that specific cache (e.g. a cloned space view). - // - // That task might be on the same thread (due to work-stealing), or a different one. - // Either way, we need to give up trying to upsert the cache in order to prevent a - // deadlock in case the other task is in fact running on the same thread. - // - // It's fine, though: - // - Best case scenario, the data we need is already cached. - // - Worst case scenario, the data is missing and we'll be missing some data for the current - // frame. - // It'll get cached at some point in an upcoming frame (statistically, we're bound to win - // the race at some point). - // - // Data invalidation happens at the per-archetype cache layer, so this won't return - // out-of-date data in either scenario. - // - // There is a lot of complexity we could add to make this whole process more efficient: - // keep track of failed queries in a queue so we don't rely on probabilities, keep track - // of the thread-local reentrancy state to skip this logic when it's not needed, return raw - // data when the lock is busy and the data isn't already cached, etc. - // - // In the end, this is a edge-case inherent to our current "immediate query" model that we - // already know we want -- and have to -- move away from; the extra complexity isn't worth it. - let r1 = cache.try_write().map(|mut cache| upsert(&mut cache)); - // Implicitly releasing the write lock -- if any. - - // # Multithreading semantics - // - // We need the reentrant lock because query contexts (i.e. space views) generally run on a - // work-stealing thread-pool and might swap a task on one thread with another task on the - // same thread, where both tasks happen to query the same exact data (e.g. cloned space views). - // - // See comment above for more details. - let r2 = iter(&cache.read_recursive()); - - (r1, r2) - } - - /// Gives access to the appropriate `RangeCache` according to the specified query parameters. - /// - /// `upsert` is a user-defined callback that will be run first, with full mutable access to the cache. - /// `iter` is a user-defined callback that will be run last, with shared access. - /// - /// These callback semantics allow for reentrancy: you can use the same cache from multiple - /// query contexts (i.e. space views), even in a work-stealing environment. #[inline] - pub fn with_range( - &self, - store: &DataStore, - entity_path: EntityPath, - query: &RangeQuery, - mut upsert: F1, - mut iter: F2, - ) -> (Option, R2) - where - A: Archetype, - F1: FnMut(&mut RangeCache) -> R1, - F2: FnMut(&RangeCache) -> R2, - { - assert!( - self.store_id == *store.id(), - "attempted to use a query cache {} with the wrong datastore ({})", - self.store_id, - store.id(), - ); - - let key = CacheKey::new(entity_path, query.timeline); - - let cache = { - let caches_per_archetype = Arc::clone(self.write().entry(key.clone()).or_default()); - // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. - - let removed_bytes = caches_per_archetype.write().handle_pending_invalidation(); - // Implicitly releasing archetype-level cache mappings -- concurrent queries using the - // same `CacheKey` but a different `ArchetypeName` can run once again. - if removed_bytes > 0 { - re_log::trace!( - store_id=%self.store_id, - entity_path = %key.entity_path, - removed = removed_bytes, - "invalidated range caches" - ); - } - - let caches_per_archetype = caches_per_archetype.read(); - let mut range_per_archetype = caches_per_archetype.range_per_archetype.write(); - Arc::clone(range_per_archetype.entry(A::name()).or_default()) - // Implicitly releasing bottom-level cache mappings -- identical concurrent queries - // can run once again. - }; - - // # Multithreading semantics - // - // There is only one situation where this `try_write()` might fail: there is another task that - // is already in the process of upserting that specific cache (e.g. a cloned space view). - // - // That task might be on the same thread (due to work-stealing), or a different one. - // Either way, we need to give up trying to upsert the cache in order to prevent a - // deadlock in case the other task is in fact running on the same thread. - // - // It's fine, though: - // - Best case scenario, the data we need is already cached. - // - Worst case scenario, the data is missing and we'll be missing some data for the current - // frame. - // It'll get cached at some point in an upcoming frame (statistically, we're bound to win - // the race at some point). - // - // Data invalidation happens at the per-archetype cache layer, so this won't return - // out-of-date data in either scenario. - // - // There is a lot of complexity we could add to make this whole process more efficient: - // keep track of failed queries in a queue so we don't rely on probabilities, keep track - // of the thread-local reentrancy state to skip this logic when it's not needed, keep track - // of min-max timestamp values per entity so we can clamp range queries and thus know - // whether the data is already cached or not, etc. - // - // In the end, this is a edge-case inherent to our current "immediate query" model that we - // already know we want -- and have to -- move away from; the extra complexity isn't worth it. - let r1 = cache.try_write().map(|mut cache| upsert(&mut cache)); - // Implicitly releasing the write lock -- if any. - - // # Multithreading semantics - // - // We need the reentrant lock because query contexts (i.e. space views) generally run on a - // work-stealing thread-pool and might swap a task on one thread with another task on the - // same thread, where both tasks happen to query the same exact data (e.g. cloned space views). - // - // See comment above for more details. - let r2 = iter(&cache.read_recursive()); - - (r1, r2) + pub fn new(store: &DataStore) -> Self { + Self { + store_id: store.id().clone(), + latest_at_per_cache_key: Default::default(), + range_per_cache_key: Default::default(), + } } -} -/// Uniquely identifies cached query results in the [`Caches`]. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct CacheKey { - /// Which [`EntityPath`] is the query targeting? - pub entity_path: EntityPath, - - /// Which [`Timeline`] is the query targeting? - pub timeline: Timeline, -} - -impl std::fmt::Debug for CacheKey { #[inline] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + pub fn clear(&self) { let Self { - entity_path, - timeline, + store_id: _, + latest_at_per_cache_key, + range_per_cache_key, } = self; - f.write_fmt(format_args!("{entity_path} on {}", timeline.name())) - } -} -impl CacheKey { - #[inline] - pub fn new(entity_path: impl Into, timeline: impl Into) -> Self { - Self { - entity_path: entity_path.into(), - timeline: timeline.into(), - } + latest_at_per_cache_key.write().clear(); + range_per_cache_key.write().clear(); } } -// --- Invalidation --- - impl StoreSubscriber for Caches { #[inline] fn name(&self) -> String { @@ -446,6 +167,14 @@ impl StoreSubscriber for Caches { fn on_events(&mut self, events: &[StoreEvent]) { re_tracing::profile_function!(format!("num_events={}", events.len())); + #[derive(Default, Debug)] + struct CompactedEvents { + static_: HashSet<(EntityPath, ComponentName)>, + temporal: HashMap>, + } + + let mut compacted = CompactedEvents::default(); + for event in events { let StoreEvent { store_id, @@ -466,620 +195,76 @@ impl StoreSubscriber for Caches { row_id: _, times, entity_path, - cells: _, // Don't care: we invalidate at the entity level, not component level. + cells, } = diff; - #[derive(Default, Debug)] - struct CompactedEvents { - timeless: HashSet, - temporal: HashMap, - } - - let mut compacted = CompactedEvents::default(); { re_tracing::profile_scope!("compact events"); if times.is_empty() { - compacted.timeless.insert(entity_path.clone()); + for component_name in cells.keys() { + compacted + .static_ + .insert((entity_path.clone(), *component_name)); + } } - for &(timeline, time) in times { - let key = CacheKey::new(entity_path.clone(), timeline); - let min_time = compacted.temporal.entry(key).or_insert(TimeInt::MAX); - *min_time = TimeInt::min(*min_time, time); + for &(timeline, data_time) in times { + for component_name in cells.keys() { + let key = CacheKey::new(entity_path.clone(), timeline, *component_name); + let data_times = compacted.temporal.entry(key).or_default(); + data_times.insert(data_time); + } } } + } - let caches = self.write(); - // NOTE: Don't release the top-level lock -- even though this cannot happen yet with - // our current macro-architecture, we want to prevent queries from concurrently - // running while we're updating the invalidation flags. + let caches_latest_at = self.latest_at_per_cache_key.write(); + let caches_range = self.range_per_cache_key.write(); + // NOTE: Don't release the top-level locks -- even though this cannot happen yet with + // our current macro-architecture, we want to prevent queries from concurrently + // running while we're updating the invalidation flags. + + { + re_tracing::profile_scope!("static"); // TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding // yet another layer of caching indirection. // But since this pretty much never happens in practice, let's not go there until we // have metrics showing that show we need to. - { - re_tracing::profile_scope!("timeless"); - - for entity_path in compacted.timeless { - for (key, caches_per_archetype) in caches.iter() { - if key.entity_path == entity_path { - caches_per_archetype.write().pending_static_invalidation = true; - } + for (entity_path, component_name) in compacted.static_ { + for (key, cache) in caches_latest_at.iter() { + if key.entity_path == entity_path && key.component_name == component_name { + cache.write().pending_invalidations.insert(TimeInt::STATIC); } } - } - { - re_tracing::profile_scope!("temporal"); - - for (key, time) in compacted.temporal { - if let Some(caches_per_archetype) = caches.get(&key) { - // NOTE: Do _NOT_ lock from within the if clause itself or the guard will live - // for the remainder of the if statement and hell will ensue. - // is - // supposed to catch that but it doesn't, I don't know why. - let mut caches_per_archetype = caches_per_archetype.write(); - if let Some(min_time) = - caches_per_archetype.pending_temporal_invalidation.as_mut() - { - *min_time = TimeInt::min(*min_time, time); - } else { - caches_per_archetype.pending_temporal_invalidation = Some(time); - } + for (key, cache) in caches_range.iter() { + if key.entity_path == entity_path && key.component_name == component_name { + cache.write().pending_invalidation = Some(TimeInt::STATIC); } } } } - } -} - -impl CachesPerArchetype { - /// Removes all entries from the cache that have been asynchronously invalidated. - /// - /// Invalidation is deferred to query time because it is far more efficient that way: the frame - /// time effectively behaves as a natural micro-batching mechanism. - /// - /// Returns the number of bytes removed. - fn handle_pending_invalidation(&mut self) -> u64 { - let pending_static_invalidation = self.pending_static_invalidation; - let pending_temporal_invalidation = self.pending_temporal_invalidation.is_some(); - - if !pending_static_invalidation && !pending_temporal_invalidation { - return 0; - } - - re_tracing::profile_function!(); - - let time_threshold = self.pending_temporal_invalidation.unwrap_or(TimeInt::MAX); - - self.pending_temporal_invalidation = None; - self.pending_static_invalidation = false; - - // Timeless being infinitely into the past, this effectively invalidates _everything_ with - // the current coarse-grained / archetype-level caching strategy. - if pending_static_invalidation { - re_tracing::profile_scope!("static"); - - let latest_at_removed_bytes = self - .latest_at_per_archetype - .read() - .values() - .map(|latest_at_cache| latest_at_cache.read().total_size_bytes()) - .sum::(); - let range_removed_bytes = self - .range_per_archetype - .read() - .values() - .map(|range_cache| range_cache.read().total_size_bytes()) - .sum::(); - - *self = CachesPerArchetype::default(); - - return latest_at_removed_bytes + range_removed_bytes; - } - - re_tracing::profile_scope!("temporal"); - - let mut removed_bytes = 0u64; - - for latest_at_cache in self.latest_at_per_archetype.read().values() { - let mut latest_at_cache = latest_at_cache.write(); - removed_bytes = - removed_bytes.saturating_add(latest_at_cache.truncate_at_time(time_threshold)); - } - - for range_cache in self.range_per_archetype.read().values() { - let mut range_cache = range_cache.write(); - removed_bytes = - removed_bytes.saturating_add(range_cache.truncate_at_time(time_threshold)); - } - - removed_bytes - } -} - -// --- - -/// Caches the results of any query for an arbitrary range of time. -/// -/// This caches all the steps involved in getting data ready for space views: -/// - index search, -/// - instance key joining, -/// - deserialization. -/// -/// We share the `CacheBucket` implementation between all types of queries to avoid duplication of -/// logic, especially for things that require metaprogramming, to keep the macro madness to a -/// minimum. -/// In the case of `LatestAt` queries, a `CacheBucket` will always contain a single timestamp worth -/// of data. -#[derive(Default)] -pub struct CacheBucket { - /// The _data_ timestamps and [`RowId`]s of all cached rows. - /// - /// This corresponds to the data time and `RowId` returned by `re_query::query_archetype`. - /// - /// This is guaranteed to always be sorted and dense (i.e. there cannot be a hole in the cached - /// data, unless the raw data itself in the store has a hole at that particular point in time). - /// - /// Reminder: within a single timestamp, rows are sorted according to their [`RowId`]s. - /// - /// Invariant: `data_times.len() == pov_instance_keys.num_entries()` - pub(crate) data_times: VecDeque<(TimeInt, RowId)>, - - /// The [`InstanceKey`]s of the point-of-view components. - /// - /// Invariant: `data_times.len() == pov_instance_keys.num_entries()` - pub(crate) pov_instance_keys: FlatVecDeque, - - /// The resulting component data, pre-deserialized, pre-joined. - /// - /// All the contained FlatVecDeques have the same length as `data_times`. - // - // TODO(#4733): Don't denormalize auto-generated instance keys. - // TODO(#4734): Don't denormalize splatted values. - pub(crate) components: BTreeMap>, - - /// The total size in bytes stored in this bucket. - /// - /// Only used so we can decrement the global cache size when the last reference to a bucket - /// gets dropped. - pub(crate) total_size_bytes: u64, - // - // TODO(cmc): secondary cache -} - -impl std::fmt::Debug for CacheBucket { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - data_times: _, - pov_instance_keys: _, - components, - total_size_bytes: _, - } = self; - - let strings = components - .iter() - .filter(|(_, data)| data.dyn_num_values() > 0) - .map(|(name, data)| { - format!( - "{} {name} values spread across {} entries ({})", - data.dyn_num_values(), - data.dyn_num_entries(), - re_format::format_bytes(data.dyn_total_size_bytes() as _), - ) - }) - .collect_vec(); - - f.write_str(&strings.join("\n").replace("\n\n", "\n")) - } -} - -impl CacheBucket { - // Check invariants in debug builds - fn sanity_check(&self) { - if cfg!(debug_assertions) { - assert_eq!(self.data_times.len(), self.pov_instance_keys.num_entries()); - let n = self.data_times.len(); - for (name, data) in &self.components { - assert_eq!(data.dyn_num_entries(), n, "{name}"); - } - } - } - - #[inline] - pub fn time_range(&self) -> Option { - let first_time = self.data_times.front().map(|(t, _)| *t)?; - let last_time = self.data_times.back().map(|(t, _)| *t)?; - Some(TimeRange::new(first_time, last_time)) - } - - #[inline] - pub fn contains_data_time(&self, data_time: TimeInt) -> bool { - let first_time = self.data_times.front().map_or(&TimeInt::MAX, |(t, _)| t); - let last_time = self.data_times.back().map_or(&TimeInt::MIN, |(t, _)| t); - *first_time <= data_time && data_time <= *last_time - } - - #[inline] - pub fn contains_data_row(&self, data_time: TimeInt, row_id: RowId) -> bool { - self.data_times.binary_search(&(data_time, row_id)).is_ok() - } - - /// How many timestamps' worth of data is stored in this bucket? - #[inline] - pub fn num_entries(&self) -> usize { - self.data_times.len() - } - - #[inline] - pub fn is_empty(&self) -> bool { - self.num_entries() == 0 - } - - // --- - - /// Iterate over the timestamps of the point-of-view components. - #[inline] - pub fn iter_data_times(&self) -> impl Iterator { - self.data_times.iter() - } - - /// Iterate over the [`InstanceKey`] batches of the point-of-view components. - #[inline] - pub fn iter_pov_instance_keys(&self) -> impl Iterator { - self.pov_instance_keys.iter() - } - - /// Iterate over the batches of the specified non-optional component. - #[inline] - pub fn iter_component(&self) -> Option> { - let data = self - .components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>())?; - Some(data.iter()) - } - - /// Iterate over the batches of the specified optional component. - #[inline] - pub fn iter_component_opt(&self) -> Option]>> { - let data = self - .components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>>())?; - Some(data.iter()) - } - - // --- - - /// Returns the index range that corresponds to the static data (if any). - /// - /// Use the returned range with one of the range iteration methods: - /// - [`Self::range_data_times`] - /// - [`Self::range_pov_instance_keys`] - /// - [`Self::range_component`] - /// - [`Self::range_component_opt`] - /// - /// Make sure that the bucket hasn't been modified in-between! - /// - /// This is `O(2*log(n))`, so make sure to clone the returned range rather than calling this - /// multiple times. - #[inline] - pub fn static_range(&self) -> Range { - static_assertions::const_assert_eq!(TimeInt::STATIC.as_i64(), i64::MIN); - let start_index = 0; - let end_index = self - .data_times - .partition_point(|(data_time, _)| data_time <= &TimeInt::STATIC); - start_index..end_index - } - - /// Returns the index range that corresponds to the specified `time_range`. - /// - /// Use the returned range with one of the range iteration methods: - /// - [`Self::range_data_times`] - /// - [`Self::range_pov_instance_keys`] - /// - [`Self::range_component`] - /// - [`Self::range_component_opt`] - /// - /// Make sure that the bucket hasn't been modified in-between! - /// - /// This is `O(2*log(n))`, so make sure to clone the returned range rather than calling this - /// multiple times. - #[inline] - pub fn entry_range(&self, time_range: TimeRange) -> Range { - let start_index = self - .data_times - .partition_point(|(data_time, _)| data_time < &time_range.min()); - let end_index = self - .data_times - .partition_point(|(data_time, _)| data_time <= &time_range.max()); - start_index..end_index - } - - /// Range over the timestamps of the point-of-view components. - #[inline] - pub fn range_data_times( - &self, - entry_range: Range, - ) -> impl Iterator { - self.data_times.range(entry_range) - } - - /// Range over the [`InstanceKey`] batches of the point-of-view components. - #[inline] - pub fn range_pov_instance_keys( - &self, - entry_range: Range, - ) -> impl Iterator { - self.pov_instance_keys.range(entry_range) - } - - /// Get the raw batches for the specified non-optional component. - #[inline] - pub fn component(&self) -> Option<&FlatVecDeque> { - self.components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>()) - } - - /// Range over the batches of the specified non-optional component. - #[inline] - pub fn range_component( - &self, - entry_range: Range, - ) -> Option> { - let data = self - .components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>())?; - Some(data.range(entry_range)) - } - - /// Get the raw batches for the specified optional component. - #[inline] - pub fn component_opt(&self) -> Option<&FlatVecDeque>> { - self.components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>>()) - } - - /// Range over the batches of the specified optional component. - #[inline] - pub fn range_component_opt( - &self, - entry_range: Range, - ) -> Option]>> { - let data = self - .components - .get(&C::name()) - .and_then(|data| data.as_any().downcast_ref::>>())?; - Some(data.range(entry_range)) - } - - /// Removes everything from the bucket that corresponds to a time equal or greater than the - /// specified `threshold`. - /// - /// Returns the number of bytes removed. - #[inline] - pub fn truncate_at_time(&mut self, threshold: TimeInt) -> u64 { - self.sanity_check(); - - let Self { - data_times, - pov_instance_keys, - components, - total_size_bytes, - } = self; - - let mut removed_bytes = 0u64; - - let threshold_idx = data_times.partition_point(|(data_time, _)| data_time < &threshold); - - { - let total_size_bytes_before = data_times.total_size_bytes(); - data_times.truncate(threshold_idx); - removed_bytes += total_size_bytes_before - data_times.total_size_bytes(); - } { - let total_size_bytes_before = pov_instance_keys.total_size_bytes(); - pov_instance_keys.truncate(threshold_idx); - removed_bytes += total_size_bytes_before - pov_instance_keys.total_size_bytes(); - } - - for data in components.values_mut() { - let total_size_bytes_before = data.dyn_total_size_bytes(); - data.dyn_truncate(threshold_idx); - removed_bytes += total_size_bytes_before - data.dyn_total_size_bytes(); - } - - *total_size_bytes = total_size_bytes - .checked_sub(removed_bytes) - .unwrap_or_else(|| { - re_log::debug!( - current = *total_size_bytes, - removed = removed_bytes, - "book keeping underflowed" - ); - u64::MIN - }); - - self.sanity_check(); - - removed_bytes - } -} - -macro_rules! impl_insert { - (for N=$N:expr, M=$M:expr => povs=[$($pov:ident)+] comps=[$($comp:ident)*]) => { paste! { - #[doc = "Inserts the contents of the given [`ArchetypeView`], which are made of the specified"] - #[doc = "`" $N "` point-of-view components and `" $M "` optional components, to the cache."] - #[doc = ""] - #[doc = "Returns the size in bytes of the data that was cached."] - #[doc = ""] - #[doc = "`query_time` must be the time of query, _not_ of the resulting data."] - pub fn []( - &mut self, - query_time: TimeInt, - arch_view: &ArchetypeView, - ) -> ::re_query::Result - where - A: Archetype, - $($pov: Component,)+ - $($comp: Component,)* - { - // NOTE: not `profile_function!` because we want them merged together. - re_tracing::profile_scope!("CacheBucket::insert", format!("arch={} pov={} comp={}", A::name(), $N, $M)); - - self.sanity_check(); - - let pov_row_id = arch_view.primary_row_id(); - let index = self.data_times.partition_point(|t| t < &(query_time, pov_row_id)); - - let mut added_size_bytes = 0u64; - - self.data_times.insert(index, (query_time, pov_row_id)); - added_size_bytes += (query_time, pov_row_id).total_size_bytes(); + re_tracing::profile_scope!("temporal"); + + for (key, times) in compacted.temporal { + if let Some(cache) = caches_latest_at.get(&key) { + cache + .write() + .pending_invalidations + .extend(times.iter().copied()); + } - { - // The `FlatVecDeque` will have to collect the data one way or another: do it ourselves - // instead, that way we can efficiently compute its size while we're at it. - let added: FlatVecDeque = arch_view - .iter_instance_keys() - .collect::>() - .into(); - added_size_bytes += added.total_size_bytes(); - self.pov_instance_keys.insert_deque(index, added); + if let Some(cache) = caches_range.get(&key) { + let pending_invalidation = &mut cache.write().pending_invalidation; + let min_time = times.first().copied(); + *pending_invalidation = + Option::min(*pending_invalidation, min_time).or(min_time); + } } - - $(added_size_bytes += self.insert_component::(index, arch_view)?;)+ - $(added_size_bytes += self.insert_component_opt::(index, arch_view)?;)* - - self.sanity_check(); - - self.total_size_bytes += added_size_bytes; - - Ok(added_size_bytes) - } } - }; - - // TODO(cmc): Supporting N>1 generically is quite painful due to limitations in declarative macros, - // not that we care at the moment. - (for N=1, M=$M:expr) => { - seq!(COMP in 1..=$M { - impl_insert!(for N=1, M=$M => povs=[R1] comps=[#(C~COMP)*]); - }); - }; -} - -impl CacheBucket { - /// Alias for [`Self::insert_pov1_comp0`]. - #[inline] - #[allow(dead_code)] - fn insert_pov1( - &mut self, - query_time: TimeInt, - arch_view: &ArchetypeView, - ) -> ::re_query::Result - where - A: Archetype, - R1: Component, - { - self.insert_pov1_comp0::(query_time, arch_view) - } - - seq!(NUM_COMP in 0..10 { - impl_insert!(for N=1, M=NUM_COMP); - }); - - #[inline] - fn insert_component( - &mut self, - at: usize, - arch_view: &ArchetypeView, - ) -> re_query::Result { - re_tracing::profile_function!(C::name()); - // no sanity checks here - we are called while in an invariant-breaking state! - - let num_entries = self.data_times.len(); - - let data = self.components.entry(C::name()).or_insert_with(|| { - Box::new(FlatVecDeque::::from_vecs( - std::iter::repeat(vec![]).take( - num_entries - .checked_sub(1) - .expect("We should have been called AFTER inserting to data_times"), - ), - )) - }); - - debug_assert!(at <= data.dyn_num_entries()); - - // The `FlatVecDeque` will have to collect the data one way or another: do it ourselves - // instead, that way we can efficiently compute its size while we're at it. - let added: FlatVecDeque = arch_view - .iter_required_component::()? - .collect::>() - .into(); - let added_size_bytes = added.total_size_bytes(); - - // NOTE: downcast cannot fail, we create it just above. - let data = data.as_any_mut().downcast_mut::>().unwrap(); - data.insert_deque(at, added); - - Ok(added_size_bytes) - } - - /// This will insert an empty slice for a missing component (instead of N `None` values). - #[inline] - fn insert_component_opt( - &mut self, - at: usize, - arch_view: &ArchetypeView, - ) -> re_query::Result { - re_tracing::profile_function!(C::name()); - // no sanity checks here - we are called while in an invariant-breaking state! - - let num_entries = self.num_entries(); - - let data = self.components.entry(C::name()).or_insert_with(|| { - Box::new(FlatVecDeque::>::from_vecs( - std::iter::repeat(vec![]).take( - num_entries - .checked_sub(1) - .expect("We should have been called AFTER inserting to data_times"), - ), - )) - }); - - debug_assert!(at <= data.dyn_num_entries()); - - let added: FlatVecDeque> = if arch_view.has_component::() { - // The `FlatVecDeque` will have to collect the data one way or another: do it ourselves - // instead, that way we can efficiently computes its size while we're at it. - arch_view - .iter_optional_component::()? - .collect::>>() - .into() - } else { - // If an optional component is missing entirely, we just store an empty slice in its - // stead, rather than a bunch of `None` values. - let mut added = FlatVecDeque::>::new(); - added.push_back(std::iter::empty()); - added - }; - let added_size_bytes = added.total_size_bytes(); - - // NOTE: downcast cannot fail, we create it just above. - let data = data - .as_any_mut() - .downcast_mut::>>() - .unwrap(); - data.insert_deque(at, added); - - Ok(added_size_bytes) + } } } diff --git a/crates/re_query_cache/src/cache_stats.rs b/crates/re_query_cache/src/cache_stats.rs index 7c360454f000..7c205f5a8321 100644 --- a/crates/re_query_cache/src/cache_stats.rs +++ b/crates/re_query_cache/src/cache_stats.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use re_log_types::{EntityPath, TimeRange, Timeline}; -use re_types_core::{components::InstanceKey, ComponentName, Loggable as _, SizeBytes as _}; +use re_log_types::TimeRange; +use re_types_core::SizeBytes as _; -use crate::{cache::CacheBucket, Caches, LatestAtCache, RangeCache}; +use crate::{CacheKey, Caches}; // --- @@ -12,8 +12,8 @@ use crate::{cache::CacheBucket, Caches, LatestAtCache, RangeCache}; /// Fetch them via [`Caches::stats`]. #[derive(Default, Debug, Clone)] pub struct CachesStats { - pub latest_at: BTreeMap, - pub range: BTreeMap>, + pub latest_at: BTreeMap, + pub range: BTreeMap, CachedComponentStats)>, } impl CachesStats { @@ -27,171 +27,73 @@ impl CachesStats { latest_at.values().map(|stats| stats.total_size_bytes).sum(); let range_size_bytes: u64 = range .values() - .flat_map(|all_ranges| { - all_ranges - .iter() - .map(|(_, _, stats)| stats.total_size_bytes) - }) + .map(|(_, stats)| stats.total_size_bytes) .sum(); latest_at_size_bytes + range_size_bytes } } -/// Stats for a cached entity. -#[derive(Debug, Clone)] -pub struct CachedEntityStats { - pub total_rows: u64, - pub total_size_bytes: u64, - - /// Only if `detailed_stats` is `true` (see [`Caches::stats`]). - pub per_component: Option>, -} - -impl CachedEntityStats { - #[inline] - pub fn is_empty(&self) -> bool { - // NOTE: That looks non-sensical, but it can happen if the cache is bugged, which we'd like - // to know. - self.total_rows == 0 && self.total_size_bytes == 0 - } -} - /// Stats for a cached component. #[derive(Default, Debug, Clone)] pub struct CachedComponentStats { - pub total_rows: u64, + pub total_indices: u64, pub total_instances: u64, pub total_size_bytes: u64, } impl Caches { /// Computes the stats for all primary caches. - /// - /// `per_component` toggles per-component stats. - pub fn stats(&self, detailed_stats: bool) -> CachesStats { + pub fn stats(&self) -> CachesStats { re_tracing::profile_function!(); - fn upsert_bucket_stats( - per_component: &mut BTreeMap, - bucket: &CacheBucket, - ) { - let CacheBucket { - data_times, - pov_instance_keys, - components, - total_size_bytes: _, - } = bucket; - - { - let stats: &mut CachedComponentStats = - per_component.entry("".into()).or_default(); - stats.total_rows += data_times.len() as u64; - stats.total_instances += data_times.len() as u64; - stats.total_size_bytes += data_times.total_size_bytes(); - } - - { - let stats: &mut CachedComponentStats = - per_component.entry(InstanceKey::name()).or_default(); - stats.total_rows += pov_instance_keys.num_entries() as u64; - stats.total_instances += pov_instance_keys.num_values() as u64; - stats.total_size_bytes += pov_instance_keys.total_size_bytes(); - } - - for (component_name, data) in components { - let stats: &mut CachedComponentStats = - per_component.entry(*component_name).or_default(); - stats.total_rows += data.dyn_num_entries() as u64; - stats.total_instances += data.dyn_num_values() as u64; - stats.total_size_bytes += data.dyn_total_size_bytes(); - } - } - - let caches = self.read().clone(); - // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. - - let latest_at = caches - .iter() - .map(|(key, caches_per_arch)| { - (key.entity_path.clone(), { - let mut total_size_bytes = 0u64; - let mut total_rows = 0u64; - let mut per_component = detailed_stats.then(BTreeMap::default); - - for latest_at_cache in caches_per_arch - .read() - .latest_at_per_archetype - .read() - .values() - { - let latest_at_cache @ LatestAtCache { - per_query_time: _, - per_data_time, - .. - } = &*latest_at_cache.read(); - - total_size_bytes += latest_at_cache.total_size_bytes(); - total_rows = per_data_time.len() as u64; - - if let Some(per_component) = per_component.as_mut() { - re_tracing::profile_scope!("detailed"); - - for bucket in per_data_time.values() { - upsert_bucket_stats(per_component, bucket); - } - } - } - - CachedEntityStats { - total_size_bytes, - total_rows, - - per_component, - } + let latest_at = { + let latest_at = self.latest_at_per_cache_key.read_recursive().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + latest_at + .iter() + .map(|(key, cache)| { + let cache = cache.read_recursive(); + ( + key.clone(), + CachedComponentStats { + total_indices: cache.per_data_time.len() as _, + total_instances: cache + .per_data_time + .values() + .map(|results| results.num_instances()) + .sum(), + total_size_bytes: cache.total_size_bytes(), + }, + ) }) - }) - .collect(); - - let range = caches - .iter() - .map(|(key, caches_per_arch)| { - (key.entity_path.clone(), { - caches_per_arch - .read() - .range_per_archetype - .read() - .values() - .map(|range_cache| { - let range_cache @ RangeCache { - per_data_time, - timeline: _, - } = &*range_cache.read(); - - let total_rows = per_data_time.data_times.len() as u64; - - let mut per_component = detailed_stats.then(BTreeMap::default); - if let Some(per_component) = per_component.as_mut() { - re_tracing::profile_scope!("detailed"); - - upsert_bucket_stats(per_component, per_data_time); - } - - ( - key.timeline, - per_data_time.time_range().unwrap_or(TimeRange::EMPTY), - CachedEntityStats { - total_size_bytes: range_cache.total_size_bytes(), - total_rows, - - per_component, - }, - ) - }) - .collect() + .collect() + }; + + let range = { + let range = self.range_per_cache_key.read_recursive().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + range + .iter() + .map(|(key, cache)| { + let cache = cache.read_recursive(); + let cache = cache.per_data_time.read_recursive(); + ( + key.clone(), + ( + cache.time_range(), + CachedComponentStats { + total_indices: cache.indices.len() as _, + total_instances: cache.num_instances(), + total_size_bytes: cache.total_size_bytes(), + }, + ), + ) }) - }) - .collect(); + .collect() + }; CachesStats { latest_at, range } } diff --git a/crates/re_query_cache/src/flat_vec_deque.rs b/crates/re_query_cache/src/flat_vec_deque.rs index 8cc9abc9b1a8..1a89f41628e9 100644 --- a/crates/re_query_cache/src/flat_vec_deque.rs +++ b/crates/re_query_cache/src/flat_vec_deque.rs @@ -165,6 +165,17 @@ impl From> for FlatVecDeque { } } +impl From> for FlatVecDeque { + #[inline] + fn from(values: Vec) -> Self { + let num_values = values.len(); + Self { + values: values.into(), + offsets: std::iter::once(num_values).collect(), + } + } +} + impl Default for FlatVecDeque { #[inline] fn default() -> Self { diff --git a/crates/re_query_cache/src/latest_at.rs b/crates/re_query_cache/src/latest_at.rs deleted file mode 100644 index 4804b513801d..000000000000 --- a/crates/re_query_cache/src/latest_at.rs +++ /dev/null @@ -1,332 +0,0 @@ -use std::{collections::BTreeMap, sync::Arc}; - -use ahash::HashMap; -use paste::paste; -use seq_macro::seq; - -use re_data_store::{DataStore, LatestAtQuery, TimeInt}; -use re_log_types::{EntityPath, RowId, Timeline}; -use re_query::query_archetype; -use re_types_core::{components::InstanceKey, Archetype, Component, SizeBytes}; - -use crate::{CacheBucket, Caches}; - -// --- Data structures --- - -/// Caches the results of `LatestAt` queries. -#[derive(Default)] -pub struct LatestAtCache { - /// Organized by _query_ time. - /// - /// If the data you're looking for isn't in here, try partially running the query and check - /// if there is any data available for the resulting _data_ time in [`Self::per_data_time`]. - // - // NOTE: `Arc` so we can deduplicate buckets across query time & data time. - pub per_query_time: BTreeMap>, - - /// Organized by _data_ time. - /// - /// Due to how our latest-at semantics work, any number of queries at time `T+n` where `n >= 0` - /// can result in a data time of `T`. - // - // NOTE: `Arc` so we can deduplicate buckets across query time & data time. - pub per_data_time: BTreeMap>, - - /// For debugging purposes. - pub(crate) timeline: Timeline, - - /// Total size of the data stored in this cache in bytes. - total_size_bytes: u64, -} - -impl std::fmt::Debug for LatestAtCache { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - per_query_time, - per_data_time, - timeline, - total_size_bytes: _, - } = self; - - let mut strings = Vec::new(); - - let data_times_per_bucket: HashMap<_, _> = per_data_time - .iter() - .map(|(time, bucket)| (Arc::as_ptr(bucket), *time)) - .collect(); - - for (query_time, bucket) in per_query_time { - let query_time = timeline.typ().format_utc(*query_time); - let data_time = data_times_per_bucket - .get(&Arc::as_ptr(bucket)) - .map_or_else(|| "MISSING?!".to_owned(), |t| timeline.typ().format_utc(*t)); - strings.push(format!( - "query_time={query_time} -> data_time={data_time} ({})", - re_format::format_bytes(bucket.total_size_bytes as _), - )); - strings.push(indent::indent_all_by(2, format!("{bucket:?}"))); - } - - f.write_str(&strings.join("\n").replace("\n\n", "\n")) - } -} - -impl SizeBytes for LatestAtCache { - #[inline] - fn heap_size_bytes(&self) -> u64 { - self.total_size_bytes - } -} - -impl LatestAtCache { - /// Removes everything from the cache that corresponds to a time equal or greater than the - /// specified `threshold`. - /// - /// Reminder: invalidating static data is the same as invalidating everything, so just reset - /// the `LatestAtCache` entirely in that case. - /// - /// Returns the number of bytes removed. - #[inline] - pub fn truncate_at_time(&mut self, threshold: TimeInt) -> u64 { - let Self { - per_query_time, - per_data_time, - timeline: _, - total_size_bytes, - } = self; - - let mut removed_bytes = 0u64; - - per_query_time.retain(|&query_time, _| query_time < threshold); - - // Buckets for latest-at queries are guaranteed to only ever contain a single entry, so - // just remove the buckets entirely directly. - per_data_time.retain(|&data_time, bucket| { - if data_time < threshold { - return true; - } - - // Only if that bucket is about to be dropped. - if Arc::strong_count(bucket) == 1 { - removed_bytes += bucket.total_size_bytes; - } - - false - }); - - *total_size_bytes = total_size_bytes - .checked_sub(removed_bytes) - .unwrap_or_else(|| { - re_log::debug!( - current = *total_size_bytes, - removed = removed_bytes, - "book keeping underflowed" - ); - u64::MIN - }); - - removed_bytes - } -} - -// --- Queries --- - -macro_rules! impl_query_archetype_latest_at { - (for N=$N:expr, M=$M:expr => povs=[$($pov:ident)+] comps=[$($comp:ident)*]) => { paste! { - #[doc = "Cached implementation of [`re_query::query_archetype`] and [`re_query::range_archetype`]"] - #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] - #[allow(non_snake_case)] - pub fn []<'a, A, $($pov,)+ $($comp,)* F>( - &self, - store: &'a DataStore, - query: &LatestAtQuery, - entity_path: &'a EntityPath, - mut f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - $($pov: Component,)+ - $($comp: Component,)* - F: FnMut( - ( - (TimeInt, RowId), - &[InstanceKey], - $(&[$pov],)+ - $(Option<&[Option<$comp>]>,)* - ), - ), - { - let iter_results = |bucket: &crate::CacheBucket, f: &mut F| -> crate::Result<()> { - // Profiling this in isolation can be useful, but adds a lot of noise for small queries. - // re_tracing::profile_scope!("iter"); - - let it = itertools::izip!( - bucket.iter_data_times(), - bucket.iter_pov_instance_keys(), - $(bucket.iter_component::<$pov>() - .ok_or_else(|| re_query::ComponentNotFoundError(<$pov>::name()))?,)+ - $(bucket.iter_component_opt::<$comp>() - .map_or_else( - || itertools::Either::Left(std::iter::repeat(&[] as &[Option<$comp>])), - |it| itertools::Either::Right(it)), - )* - ).map(|((time, row_id), instance_keys, $($pov,)+ $($comp,)*)| { - ( - (*time, *row_id), - instance_keys, - $($pov,)+ - $((!$comp.is_empty()).then_some($comp),)* - ) - }); - - for data in it { - f(data); - } - - Ok(()) - }; - - let create_and_fill_bucket = | - data_time: TimeInt, - arch_view: &::re_query::ArchetypeView, - | -> crate::Result { - re_log::trace!(data_time=?data_time, ?data_time, "fill"); - - // Grabbing the current time is quite costly on web. - #[cfg(not(target_arch = "wasm32"))] - let now = web_time::Instant::now(); - - let mut bucket = crate::CacheBucket::default(); - bucket.[]::(data_time, &arch_view)?; - - #[cfg(not(target_arch = "wasm32"))] - { - let elapsed = now.elapsed(); - ::re_log::trace!( - store_id=%store.id(), - %entity_path, - archetype=%A::name(), - added_size_bytes=bucket.total_size_bytes, - "cached new entry in {elapsed:?} ({:0.3} entries/s)", - 1f64 / elapsed.as_secs_f64() - ); - } - - Ok(bucket) - }; - - let upsert_callback = |query: &LatestAtQuery, latest_at_cache: &mut crate::LatestAtCache| -> crate::Result<()> { - re_tracing::profile_scope!("latest_at", format!("{query:?}")); - - let crate::LatestAtCache { - per_query_time, - per_data_time, - timeline: _, - total_size_bytes, - } = latest_at_cache; - - let query_time_bucket_at_query_time = match per_query_time.entry(query.at()) { - std::collections::btree_map::Entry::Occupied(_) => { - // Fastest path: we have an entry for this exact query time, no need to look any - // further. - re_log::trace!(query_time=?query.at(), "cache hit (query time)"); - return Ok(()); - } - std::collections::btree_map::Entry::Vacant(entry) => entry, - }; - - let arch_view = query_archetype::(store, &query, entity_path)?; - let data_time = arch_view.data_time(); - - // Fast path: we've run the query and realized that we already have the data for the resulting - // _data_ time, so let's use that to avoid join & deserialization costs. - if let Some(data_time_bucket_at_data_time) = per_data_time.get(&data_time) { - re_log::trace!(query_time=?query.at(), ?data_time, "cache hit (data time)"); - - query_time_bucket_at_query_time.insert(Arc::clone(&data_time_bucket_at_data_time)); - - // We now know for a fact that a query at that data time would yield the same - // results: copy the bucket accordingly so that the next cache hit for that query - // time ends up taking the fastest path. - let query_time_bucket_at_data_time = per_query_time.entry(data_time); - query_time_bucket_at_data_time - .and_modify(|v| *v = Arc::clone(&data_time_bucket_at_data_time)) - .or_insert(Arc::clone(&data_time_bucket_at_data_time)); - - return Ok(()); - } - - // Slowest path: this is a complete cache miss. - { - re_log::trace!(query_time=?query.at(), ?data_time, "cache miss"); - - let bucket = Arc::new(create_and_fill_bucket(data_time, &arch_view)?); - *total_size_bytes += bucket.total_size_bytes; - let query_time_bucket_at_query_time = query_time_bucket_at_query_time.insert(bucket); - - let data_time_bucket_at_data_time = per_data_time.entry(data_time); - data_time_bucket_at_data_time - .and_modify(|v| *v = Arc::clone(&query_time_bucket_at_query_time)) - .or_insert(Arc::clone(&query_time_bucket_at_query_time)); - - Ok(()) - } - }; - - let iter_callback = |query: &LatestAtQuery, latest_at_cache: &crate::LatestAtCache, f: &mut F| { - re_tracing::profile_scope!("latest_at", format!("{query:?}")); - - let crate::LatestAtCache { - per_query_time, - per_data_time: _, - timeline: _, - total_size_bytes: _, - } = latest_at_cache; - - // Expected path: cache was properly upserted. - if let Some(query_time_bucket_at_query_time) = per_query_time.get(&query.at()) { - return iter_results(query_time_bucket_at_query_time, f); - } - - re_log::trace!( - store_id = %store.id(), - %entity_path, - ?query, - "either no data exist at this time or we couldn't upsert the cache (write lock was busy)" - ); - - Ok(()) - }; - - - let (res1, res2) = self.with_latest_at::( - store, - entity_path.clone(), - query, - |latest_at_cache| upsert_callback(query, latest_at_cache), - |latest_at_cache| iter_callback(query, latest_at_cache, &mut f), - ); - - if let Some(res1) = res1 { - res1?; - } - res2?; - - Ok(()) - } } - }; - - // TODO(cmc): Supporting N>1 generically is quite painful due to limitations in declarative macros, - // not that we care at the moment. - (for N=1, M=$M:expr) => { - seq!(COMP in 1..=$M { - impl_query_archetype_latest_at!(for N=1, M=$M => povs=[R1] comps=[#(C~COMP)*]); - }); - }; -} - -impl Caches { - seq!(NUM_COMP in 0..10 { - impl_query_archetype_latest_at!(for N=1, M=NUM_COMP); - }); -} diff --git a/crates/re_query_cache2/src/latest_at/helpers.rs b/crates/re_query_cache/src/latest_at/helpers.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/helpers.rs rename to crates/re_query_cache/src/latest_at/helpers.rs diff --git a/crates/re_query_cache2/src/latest_at/mod.rs b/crates/re_query_cache/src/latest_at/mod.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/mod.rs rename to crates/re_query_cache/src/latest_at/mod.rs diff --git a/crates/re_query_cache2/src/latest_at/query.rs b/crates/re_query_cache/src/latest_at/query.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/query.rs rename to crates/re_query_cache/src/latest_at/query.rs diff --git a/crates/re_query_cache2/src/latest_at/results.rs b/crates/re_query_cache/src/latest_at/results.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/results.rs rename to crates/re_query_cache/src/latest_at/results.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/.gitattributes b/crates/re_query_cache/src/latest_at/to_archetype/.gitattributes similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/.gitattributes rename to crates/re_query_cache/src/latest_at/to_archetype/.gitattributes diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/annotation_context.rs b/crates/re_query_cache/src/latest_at/to_archetype/annotation_context.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/annotation_context.rs rename to crates/re_query_cache/src/latest_at/to_archetype/annotation_context.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/arrows2d.rs b/crates/re_query_cache/src/latest_at/to_archetype/arrows2d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/arrows2d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/arrows2d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/arrows3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/arrows3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/arrows3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/arrows3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/asset3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/asset3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/asset3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/asset3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/background.rs b/crates/re_query_cache/src/latest_at/to_archetype/background.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/background.rs rename to crates/re_query_cache/src/latest_at/to_archetype/background.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/bar_chart.rs b/crates/re_query_cache/src/latest_at/to_archetype/bar_chart.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/bar_chart.rs rename to crates/re_query_cache/src/latest_at/to_archetype/bar_chart.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/boxes2d.rs b/crates/re_query_cache/src/latest_at/to_archetype/boxes2d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/boxes2d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/boxes2d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/boxes3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/boxes3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/boxes3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/boxes3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/clear.rs b/crates/re_query_cache/src/latest_at/to_archetype/clear.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/clear.rs rename to crates/re_query_cache/src/latest_at/to_archetype/clear.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/container_blueprint.rs b/crates/re_query_cache/src/latest_at/to_archetype/container_blueprint.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/container_blueprint.rs rename to crates/re_query_cache/src/latest_at/to_archetype/container_blueprint.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/depth_image.rs b/crates/re_query_cache/src/latest_at/to_archetype/depth_image.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/depth_image.rs rename to crates/re_query_cache/src/latest_at/to_archetype/depth_image.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/disconnected_space.rs b/crates/re_query_cache/src/latest_at/to_archetype/disconnected_space.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/disconnected_space.rs rename to crates/re_query_cache/src/latest_at/to_archetype/disconnected_space.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/image.rs b/crates/re_query_cache/src/latest_at/to_archetype/image.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/image.rs rename to crates/re_query_cache/src/latest_at/to_archetype/image.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/line_strips2d.rs b/crates/re_query_cache/src/latest_at/to_archetype/line_strips2d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/line_strips2d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/line_strips2d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/line_strips3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/line_strips3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/line_strips3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/line_strips3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/mesh3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/mesh3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/mesh3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/mesh3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/mod.rs b/crates/re_query_cache/src/latest_at/to_archetype/mod.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/mod.rs rename to crates/re_query_cache/src/latest_at/to_archetype/mod.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/panel_blueprint.rs b/crates/re_query_cache/src/latest_at/to_archetype/panel_blueprint.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/panel_blueprint.rs rename to crates/re_query_cache/src/latest_at/to_archetype/panel_blueprint.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/pinhole.rs b/crates/re_query_cache/src/latest_at/to_archetype/pinhole.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/pinhole.rs rename to crates/re_query_cache/src/latest_at/to_archetype/pinhole.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/plot_legend.rs b/crates/re_query_cache/src/latest_at/to_archetype/plot_legend.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/plot_legend.rs rename to crates/re_query_cache/src/latest_at/to_archetype/plot_legend.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/points2d.rs b/crates/re_query_cache/src/latest_at/to_archetype/points2d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/points2d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/points2d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/points3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/points3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/points3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/points3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/scalar.rs b/crates/re_query_cache/src/latest_at/to_archetype/scalar.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/scalar.rs rename to crates/re_query_cache/src/latest_at/to_archetype/scalar.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/scalar_axis.rs b/crates/re_query_cache/src/latest_at/to_archetype/scalar_axis.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/scalar_axis.rs rename to crates/re_query_cache/src/latest_at/to_archetype/scalar_axis.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/segmentation_image.rs b/crates/re_query_cache/src/latest_at/to_archetype/segmentation_image.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/segmentation_image.rs rename to crates/re_query_cache/src/latest_at/to_archetype/segmentation_image.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/series_line.rs b/crates/re_query_cache/src/latest_at/to_archetype/series_line.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/series_line.rs rename to crates/re_query_cache/src/latest_at/to_archetype/series_line.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/series_point.rs b/crates/re_query_cache/src/latest_at/to_archetype/series_point.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/series_point.rs rename to crates/re_query_cache/src/latest_at/to_archetype/series_point.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/space_view_blueprint.rs b/crates/re_query_cache/src/latest_at/to_archetype/space_view_blueprint.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/space_view_blueprint.rs rename to crates/re_query_cache/src/latest_at/to_archetype/space_view_blueprint.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/space_view_contents.rs b/crates/re_query_cache/src/latest_at/to_archetype/space_view_contents.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/space_view_contents.rs rename to crates/re_query_cache/src/latest_at/to_archetype/space_view_contents.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/tensor.rs b/crates/re_query_cache/src/latest_at/to_archetype/tensor.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/tensor.rs rename to crates/re_query_cache/src/latest_at/to_archetype/tensor.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/text_document.rs b/crates/re_query_cache/src/latest_at/to_archetype/text_document.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/text_document.rs rename to crates/re_query_cache/src/latest_at/to_archetype/text_document.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/text_log.rs b/crates/re_query_cache/src/latest_at/to_archetype/text_log.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/text_log.rs rename to crates/re_query_cache/src/latest_at/to_archetype/text_log.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/transform3d.rs b/crates/re_query_cache/src/latest_at/to_archetype/transform3d.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/transform3d.rs rename to crates/re_query_cache/src/latest_at/to_archetype/transform3d.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/view_coordinates.rs b/crates/re_query_cache/src/latest_at/to_archetype/view_coordinates.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/view_coordinates.rs rename to crates/re_query_cache/src/latest_at/to_archetype/view_coordinates.rs diff --git a/crates/re_query_cache2/src/latest_at/to_archetype/viewport_blueprint.rs b/crates/re_query_cache/src/latest_at/to_archetype/viewport_blueprint.rs similarity index 100% rename from crates/re_query_cache2/src/latest_at/to_archetype/viewport_blueprint.rs rename to crates/re_query_cache/src/latest_at/to_archetype/viewport_blueprint.rs diff --git a/crates/re_query_cache/src/lib.rs b/crates/re_query_cache/src/lib.rs index df9044b10f42..cb995ab447aa 100644 --- a/crates/re_query_cache/src/lib.rs +++ b/crates/re_query_cache/src/lib.rs @@ -4,23 +4,51 @@ mod cache; mod cache_stats; mod flat_vec_deque; mod latest_at; -mod query; mod range; -pub use self::cache::{AnyQuery, Caches}; -pub use self::cache_stats::{CachedComponentStats, CachedEntityStats, CachesStats}; +pub use self::cache::{CacheKey, Caches}; +pub use self::cache_stats::{CachedComponentStats, CachesStats}; pub use self::flat_vec_deque::{ErasedFlatVecDeque, FlatVecDeque}; -pub use self::query::iter_or_repeat_opt; +pub use self::latest_at::{ + CachedLatestAtComponentResults, CachedLatestAtMonoResult, CachedLatestAtResults, +}; +pub use self::range::{CachedRangeComponentResults, CachedRangeData, CachedRangeResults}; -pub(crate) use self::cache::CacheBucket; pub(crate) use self::latest_at::LatestAtCache; -pub(crate) use self::range::RangeCache; +pub(crate) use self::range::{CachedRangeComponentResultsInner, RangeCache}; -pub use re_query::{QueryError, Result}; // convenience +pub use re_query2::{ + clamped_zip::*, range_zip::*, ExtraQueryHistory, Promise, PromiseId, PromiseResolver, + PromiseResult, QueryError, Result, ToArchetype, VisibleHistory, VisibleHistoryBoundary, +}; pub mod external { - pub use re_query; + pub use re_query2; pub use paste; pub use seq_macro; } + +// --- + +use re_data_store::{LatestAtQuery, RangeQuery}; + +#[derive(Debug)] +pub enum CachedResults { + LatestAt(LatestAtQuery, CachedLatestAtResults), + Range(RangeQuery, CachedRangeResults), +} + +impl From<(LatestAtQuery, CachedLatestAtResults)> for CachedResults { + #[inline] + fn from((query, results): (LatestAtQuery, CachedLatestAtResults)) -> Self { + Self::LatestAt(query, results) + } +} + +impl From<(RangeQuery, CachedRangeResults)> for CachedResults { + #[inline] + fn from((query, results): (RangeQuery, CachedRangeResults)) -> Self { + Self::Range(query, results) + } +} diff --git a/crates/re_query_cache/src/query.rs b/crates/re_query_cache/src/query.rs deleted file mode 100644 index 50e4036bff32..000000000000 --- a/crates/re_query_cache/src/query.rs +++ /dev/null @@ -1,251 +0,0 @@ -use paste::paste; -use seq_macro::seq; - -use re_data_store::{DataStore, LatestAtQuery, RangeQuery, TimeInt, Timeline}; -use re_log_types::{EntityPath, RowId}; -use re_query::{ExtraQueryHistory, VisibleHistory}; -use re_types_core::{components::InstanceKey, Archetype, Component}; - -use crate::{AnyQuery, Caches}; - -// --- - -/// Iterates over the data of an optional component, or repeat `None` values if it's missing. -#[inline] -pub fn iter_or_repeat_opt( - this: Option<&[Option]>, - len: usize, -) -> impl Iterator> + '_ { - this.as_ref().map_or( - itertools::Either::Left(std::iter::repeat(&None).take(len)), - |data| itertools::Either::Right(data.iter()), - ) -} - -// --- - -/// Cached implementation of [`re_query::query_archetype`] and [`re_query::range_archetype`] -/// (combined) for 1 point-of-view component and no optional components. -/// -/// Alias for [`Self::query_archetype_pov1_comp0`]. -impl Caches { - #[inline] - pub fn query_archetype_pov1<'a, A, R1, F>( - &self, - store: &'a DataStore, - query: &AnyQuery, - entity_path: &'a EntityPath, - f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - R1: Component, - F: FnMut(((TimeInt, RowId), &[InstanceKey], &[R1])), - { - self.query_archetype_pov1_comp0::(store, query, entity_path, f) - } -} - -macro_rules! impl_query_archetype { - (for N=$N:expr, M=$M:expr => povs=[$($pov:ident)+] comps=[$($comp:ident)*]) => { paste! { - #[doc = "Cached implementation of [`re_query::query_archetype`] and [`re_query::range_archetype`]"] - #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] - #[allow(non_snake_case)] - pub fn []<'a, A, $($pov,)+ $($comp,)* F>( - &self, - store: &'a DataStore, - query: &AnyQuery, - entity_path: &'a EntityPath, - mut f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - $($pov: Component,)+ - $($comp: Component,)* - F: FnMut( - ( - (TimeInt, RowId), - &[InstanceKey], - $(&[$pov],)+ - $(Option<&[Option<$comp>]>,)* - ), - ), - { - // NOTE: not `profile_function!` because we want them merged together. - re_tracing::profile_scope!( - "query_archetype", - format!("cached=true arch={} pov={} comp={}", A::name(), $N, $M) - ); - - match &query { - AnyQuery::LatestAt(query) => { - re_tracing::profile_scope!("latest_at", format!("{query:?}")); - - self.[]::( - store, - query, - entity_path, - f, - ) - } - - AnyQuery::Range(query) => { - re_tracing::profile_scope!("range", format!("{query:?}")); - - self.[]::( - store, - query, - entity_path, - |entry_range, (data_times, pov_instance_keys, $($pov,)+ $($comp,)*)| { - let it = itertools::izip!( - data_times.range(entry_range.clone()), - pov_instance_keys.range(entry_range.clone()), - $($pov.range(entry_range.clone()),)+ - $($comp.map_or_else( - || itertools::Either::Left(std::iter::repeat(&[] as &[Option<$comp>])), - |data| itertools::Either::Right(data.range(entry_range.clone()))) - ,)* - ).map(|((time, row_id), instance_keys, $($pov,)+ $($comp,)*)| { - ( - (*time, *row_id), - instance_keys, - $($pov,)+ - $((!$comp.is_empty()).then_some($comp),)* - ) - }); - - for data in it { - f(data); - } - }, - ) - } - } - } } - }; - - // TODO(cmc): Supporting N>1 generically is quite painful due to limitations in declarative macros, - // not that we care at the moment. - (for N=1, M=$M:expr) => { - seq!(COMP in 1..=$M { - impl_query_archetype!(for N=1, M=$M => povs=[R1] comps=[#(C~COMP)*]); - }); - }; -} - -impl Caches { - seq!(NUM_COMP in 0..10 { - impl_query_archetype!(for N=1, M=NUM_COMP); - }); -} - -// --- - -/// Cached implementation of [`re_query::query_archetype_with_history`] for 1 point-of-view component -/// and no optional components. -/// -/// Alias for [`Self::query_archetype_with_history_pov1_comp0`]. -impl Caches { - #[allow(clippy::too_many_arguments)] - #[inline] - pub fn query_archetype_with_history_pov1<'a, A, R1, F>( - &self, - store: &'a DataStore, - timeline: &'a Timeline, - time: &'a TimeInt, - history: &ExtraQueryHistory, - ent_path: &'a EntityPath, - f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - R1: Component, - F: FnMut(((TimeInt, RowId), &[InstanceKey], &[R1])), - { - self.query_archetype_with_history_pov1_comp0::( - store, timeline, time, history, ent_path, f, - ) - } -} - -/// Generates a function to cache a (potentially historical) query with N point-of-view components and M -/// other components. -macro_rules! impl_query_archetype_with_history { - (for N=$N:expr, M=$M:expr => povs=[$($pov:ident)+] comps=[$($comp:ident)*]) => { paste! { - #[doc = "Cached implementation of [`re_query::query_archetype_with_history`] for `" $N "` point-of-view"] - #[doc = "components and `" $M "` optional components."] - #[allow(clippy::too_many_arguments)] - pub fn []<'a, A, $($pov,)+ $($comp,)* F>( - &self, - store: &'a DataStore, - timeline: &'a Timeline, - time: &'a TimeInt, - history: &ExtraQueryHistory, - ent_path: &'a EntityPath, - f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - $($pov: Component,)+ - $($comp: Component,)* - F: FnMut( - ( - (TimeInt, RowId), - &[InstanceKey], - $(&[$pov],)+ - $(Option<&[Option<$comp>]>,)* - ), - ), - { - - let visible_history = match timeline.typ() { - re_log_types::TimeType::Time => history.nanos, - re_log_types::TimeType::Sequence => history.sequences, - }; - - if !history.enabled || visible_history == VisibleHistory::OFF { - // NOTE: not `profile_function!` because we want them merged together. - re_tracing::profile_scope!( - "query_archetype_with_history", - format!("cached=true arch={} pov={} comp={}", A::name(), $N, $M) - ); - - let query = LatestAtQuery::new(*timeline, *time); - self.[]::( - store, - &query.clone().into(), - ent_path, - f, - ) - } else { - // NOTE: not `profile_function!` because we want them merged together. - re_tracing::profile_scope!( - "query_archetype_with_history", - format!("cached=true arch={} pov={} comp={}", A::name(), $N, $M) - ); - - let query = RangeQuery::new(*timeline, visible_history.time_range(*time)); - self.[]::( - store, - &query.clone().into(), - ent_path, - f, - ) - } - } } - }; - - // TODO(cmc): Supporting N>1 generically is quite painful due to limitations in declarative macros, - // not that we care at the moment. - (for N=1, M=$M:expr) => { - seq!(COMP in 1..=$M { - impl_query_archetype_with_history!(for N=1, M=$M => povs=[R1] comps=[#(C~COMP)*]); - }); - }; -} - -impl Caches { - seq!(NUM_COMP in 0..10 { - impl_query_archetype_with_history!(for N=1, M=NUM_COMP); - }); -} diff --git a/crates/re_query_cache/src/range.rs b/crates/re_query_cache/src/range.rs deleted file mode 100644 index 39ffcc07fd91..000000000000 --- a/crates/re_query_cache/src/range.rs +++ /dev/null @@ -1,334 +0,0 @@ -use paste::paste; -use seq_macro::seq; - -use re_data_store::{DataStore, RangeQuery, TimeInt}; -use re_log_types::{EntityPath, TimeRange, Timeline}; -use re_types_core::{components::InstanceKey, Archetype, Component, SizeBytes}; - -use crate::{CacheBucket, Caches}; - -// --- Data structures --- - -/// Caches the results of `Range` queries. -#[derive(Default)] -pub struct RangeCache { - /// All temporal data, organized by _data_ time. - /// - /// Query time is irrelevant for range queries. - // - // TODO(#4810): bucketize - pub per_data_time: CacheBucket, - - /// For debugging purposes. - pub(crate) timeline: Timeline, -} - -impl std::fmt::Debug for RangeCache { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - per_data_time, - timeline, - } = self; - - let mut strings = Vec::new(); - - let mut data_time_min = TimeInt::MAX; - let mut data_time_max = TimeInt::MIN; - - if !per_data_time.is_empty() { - data_time_min = TimeInt::min( - data_time_min, - per_data_time.data_times.front().map(|(t, _)| *t).unwrap(), - ); - data_time_max = TimeInt::max( - data_time_max, - per_data_time.data_times.back().map(|(t, _)| *t).unwrap(), - ); - } - - strings.push(format!( - "{} ({})", - timeline - .typ() - .format_range_utc(TimeRange::new(data_time_min, data_time_max)), - re_format::format_bytes((per_data_time.total_size_bytes) as _), - )); - strings.push(indent::indent_all_by(2, format!("{per_data_time:?}"))); - - f.write_str(&strings.join("\n").replace("\n\n", "\n")) - } -} - -impl SizeBytes for RangeCache { - #[inline] - fn heap_size_bytes(&self) -> u64 { - let Self { - per_data_time, - timeline: _, - } = self; - - per_data_time.total_size_bytes - } -} - -impl RangeCache { - /// Removes everything from the cache that corresponds to a time equal or greater than the - /// specified `threshold`. - /// - /// Reminder: invalidating static data is the same as invalidating everything, so just reset - /// the `RangeCache` entirely in that case. - /// - /// Returns the number of bytes removed. - #[inline] - pub fn truncate_at_time(&mut self, threshold: TimeInt) -> u64 { - let Self { - per_data_time, - timeline: _, - } = self; - - per_data_time.truncate_at_time(threshold) - } -} - -impl RangeCache { - /// Given a `query`, returns N reduced queries that are sufficient to fill the missing data - /// on both the front & back sides of the cache. - #[inline] - pub fn compute_queries(&self, query: &RangeQuery) -> impl Iterator { - let front = self.compute_front_query(query); - let back = self.compute_back_query(query); - front.into_iter().chain(back) - } - - /// Given a `query`, returns a reduced query that is sufficient to fill the missing data - /// on the front side of the cache, or `None` if all the necessary data is already - /// cached. - pub fn compute_front_query(&self, query: &RangeQuery) -> Option { - let mut reduced_query = query.clone(); - - if self.per_data_time.is_empty() { - return Some(reduced_query); - } - - if let Some(bucket_time_range) = self.per_data_time.time_range() { - reduced_query.range.set_max(i64::min( - reduced_query.range.max().as_i64(), - bucket_time_range.min().as_i64().saturating_sub(1), - )); - } else { - return Some(reduced_query); - } - - if reduced_query.range.max() < reduced_query.range.min() { - return None; - } - - Some(reduced_query) - } - - /// Given a `query`, returns a reduced query that is sufficient to fill the missing data - /// on the back side of the cache, or `None` if all the necessary data is already - /// cached. - pub fn compute_back_query(&self, query: &RangeQuery) -> Option { - let mut reduced_query = query.clone(); - - if let Some(bucket_time_range) = self.per_data_time.time_range() { - reduced_query.range.set_min(i64::max( - reduced_query.range.min().as_i64(), - bucket_time_range.max().as_i64().saturating_add(1), - )); - } else { - return Some(reduced_query); - } - - if reduced_query.range.max() < reduced_query.range.min() { - return None; - } - - Some(reduced_query) - } -} - -// --- Queries --- - -macro_rules! impl_query_archetype_range { - (for N=$N:expr, M=$M:expr => povs=[$($pov:ident)+] comps=[$($comp:ident)*]) => { paste! { - #[doc = "Cached implementation of [`re_query::query_archetype`] and [`re_query::range_archetype`]"] - #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] - #[allow(non_snake_case)] - pub fn []<'a, A, $($pov,)+ $($comp,)* F>( - &self, - store: &'a DataStore, - query: &RangeQuery, - entity_path: &'a EntityPath, - mut f: F, - ) -> ::re_query::Result<()> - where - A: Archetype + 'a, - $($pov: Component,)+ - $($comp: Component,)* - F: FnMut( - std::ops::Range, - ( - &'_ std::collections::VecDeque<(re_data_store::TimeInt, re_log_types::RowId)>, - &'_ crate::FlatVecDeque, - $(&'_ crate::FlatVecDeque<$pov>,)+ - $(Option<&'_ crate::FlatVecDeque>>,)* - ) - ), - { - let range_results = | - bucket: &crate::CacheBucket, - time_range: TimeRange, - f: &mut F, - | -> crate::Result<()> { - re_tracing::profile_scope!("iter"); - - // Yield the static data that's available first. - let static_range = bucket.static_range(); - f( - static_range, - ( - &bucket.data_times, - &bucket.pov_instance_keys, - $(bucket.component::<$pov>() - .ok_or_else(|| re_query::ComponentNotFoundError(<$pov>::name()))?,)+ - $(bucket.component_opt::<$comp>(),)* - ) - ); - - let entry_range = bucket.entry_range(time_range); - f( - entry_range, - ( - &bucket.data_times, - &bucket.pov_instance_keys, - $(bucket.component::<$pov>() - .ok_or_else(|| re_query::ComponentNotFoundError(<$pov>::name()))?,)+ - $(bucket.component_opt::<$comp>(),)* - ) - ); - - Ok(()) - }; - - fn upsert_results<'a, A, $($pov,)+ $($comp,)*>( - arch_views: impl Iterator>, - bucket: &mut crate::CacheBucket, - ) -> crate::Result - where - A: Archetype + 'a, - $($pov: Component,)+ - $($comp: Component,)* - { - re_tracing::profile_scope!("fill"); - - // Grabbing the current time is quite costly on web. - #[cfg(not(target_arch = "wasm32"))] - let now = web_time::Instant::now(); - - #[cfg(not(target_arch = "wasm32"))] - let mut added_entries = 0u64; - - let mut added_size_bytes = 0u64; - - for arch_view in arch_views { - let data_time = arch_view.data_time(); - - if bucket.contains_data_row(data_time, arch_view.primary_row_id()) { - continue; - } - - added_size_bytes += bucket.[]::(data_time, &arch_view)?; - - #[cfg(not(target_arch = "wasm32"))] - { - added_entries += 1; - } - } - - #[cfg(not(target_arch = "wasm32"))] - if added_entries > 0 { - let elapsed = now.elapsed(); - ::re_log::trace!( - archetype=%A::name(), - added_size_bytes, - "cached {added_entries} entries in {elapsed:?} ({:0.3} entries/s)", - added_entries as f64 / elapsed.as_secs_f64() - ); - } - - Ok(added_size_bytes) - } - - let upsert_callback = |query: &RangeQuery, range_cache: &mut crate::RangeCache| -> crate::Result<()> { - re_tracing::profile_scope!("range", format!("{query:?}")); - - let mut query = query.clone(); - query.range.set_min(TimeInt::max(TimeInt::MIN, query.range.min())); - - for reduced_query in range_cache.compute_queries(&query) { - // NOTE: `+ 1` because we always grab the instance keys. - let arch_views = ::re_query::range_component_set::( - store, &reduced_query, entity_path, - &[$(<$pov>::name(),)+], - [::name(), $(<$pov>::name(),)+ $(<$comp>::name(),)*], - ); - upsert_results::(arch_views, &mut range_cache.per_data_time)?; - } - - Ok(()) - }; - - let iter_callback = |query: &RangeQuery, range_cache: &crate::RangeCache, f: &mut F| -> crate::Result<()> { - re_tracing::profile_scope!("range", format!("{query:?}")); - - // We don't bother implementing the slow path here (busy write lock), as that would - // require adding a bunch more complexity in order to know whether a range query is - // already cached (how can you know whether `TimeInt::MAX` is cached? you need to - // clamp queries based on store metadata first, etc). - // - // We can add the extra complexity if this proves to be glitchy in real-world - // scenarios -- otherwise all of this is giant hack meant to go away anyhow. - - let mut query = query.clone(); - query.range.set_min(TimeInt::max(TimeInt::MIN, query.range.min())); - - if !range_cache.per_data_time.is_empty() { - range_results(&range_cache.per_data_time, query.range, f)?; - } - - Ok(()) - }; - - let (res1, res2) = self.with_range::( - store, - entity_path.clone(), - query, - |range_cache| upsert_callback(query, range_cache), - |range_cache| iter_callback(query, range_cache, &mut f), - ); - - if let Some(res1) = res1 { - res1?; - } - res2?; - - Ok(()) - } } - }; - - // TODO(cmc): Supporting N>1 generically is quite painful due to limitations in declarative macros, - // not that we care at the moment. - (for N=1, M=$M:expr) => { - seq!(COMP in 1..=$M { - impl_query_archetype_range!(for N=1, M=$M => povs=[R1] comps=[#(C~COMP)*]); - }); - }; -} - -impl Caches { - seq!(NUM_COMP in 0..10 { - impl_query_archetype_range!(for N=1, M=NUM_COMP); - }); -} diff --git a/crates/re_query_cache2/src/range/mod.rs b/crates/re_query_cache/src/range/mod.rs similarity index 100% rename from crates/re_query_cache2/src/range/mod.rs rename to crates/re_query_cache/src/range/mod.rs diff --git a/crates/re_query_cache2/src/range/query.rs b/crates/re_query_cache/src/range/query.rs similarity index 100% rename from crates/re_query_cache2/src/range/query.rs rename to crates/re_query_cache/src/range/query.rs diff --git a/crates/re_query_cache2/src/range/results.rs b/crates/re_query_cache/src/range/results.rs similarity index 100% rename from crates/re_query_cache2/src/range/results.rs rename to crates/re_query_cache/src/range/results.rs diff --git a/crates/re_query_cache/tests/latest_at.rs b/crates/re_query_cache/tests/latest_at.rs index e05d07c05adc..4073c5ec7e53 100644 --- a/crates/re_query_cache/tests/latest_at.rs +++ b/crates/re_query_cache/tests/latest_at.rs @@ -2,15 +2,15 @@ //! - A 1:1 port of the tests in `crates/re_query/tests/archetype_query_tests.rs`, with caching enabled. //! - Invalidation tests. -use itertools::Itertools as _; - use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; use re_log_types::{ build_frame_nr, example_components::{MyColor, MyPoint, MyPoints}, DataRow, EntityPath, RowId, TimePoint, }; +use re_query2::PromiseResolver; use re_query_cache::Caches; +use re_types::Archetype as _; use re_types_core::{components::InstanceKey, Loggable as _}; // --- @@ -68,7 +68,7 @@ fn static_query() { DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); insert_and_react(&mut store, &mut caches, &row); - // Assign one of them a color with an explicit instance.. static_! + // Assign one of them a color with an explicit instance.. statically! let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; let row = DataRow::from_cells2_sized( @@ -409,7 +409,7 @@ fn invalidation_of_future_optionals() { } #[test] -fn invalidation_timeless() { +fn static_invalidation() { let mut store = DataStore::new( re_log_types::StoreId::random(re_log_types::StoreKind::Recording), InstanceKey::name(), @@ -476,49 +476,51 @@ fn query_and_compare( ) { re_log::setup_logging(); + let resolver = PromiseResolver::default(); + for _ in 0..3 { - let mut cached_data_time = None; - let mut cached_row_id = None; - let mut cached_instance_keys = Vec::new(); - let mut cached_positions = Vec::new(); - let mut cached_colors = Vec::new(); - caches - .query_archetype_pov1_comp1::( - store, - &query.clone().into(), - entity_path, - |((data_time, row_id), instance_keys, positions, colors)| { - cached_data_time = Some(data_time); - cached_row_id = Some(row_id); - cached_instance_keys.extend(instance_keys.iter().copied()); - cached_positions.extend(positions.iter().copied()); - cached_colors - .extend(re_query_cache::iter_or_repeat_opt(colors, positions.len())); - }, - ) + let cached = caches.latest_at( + store, + query, + entity_path, + MyPoints::all_components().iter().copied(), + ); + + let cached_points = cached.get_required(MyPoint::name()).unwrap(); + let cached_point_data = cached_points + .to_dense::(&resolver) + .flatten() + .unwrap(); + + let cached_colors = cached.get_or_empty(MyColor::name()); + let cached_color_data = cached_colors + .to_dense::(&resolver) + .flatten() .unwrap(); - let expected = re_query::query_archetype::(store, query, entity_path).unwrap(); - let expected_data_time = expected.data_time(); - let expected_row_id = expected.primary_row_id(); - - let expected_instance_keys = expected.iter_instance_keys().collect_vec(); - let expected_positions = expected - .iter_required_component::() - .unwrap() - .collect_vec(); - let expected_colors = expected - .iter_optional_component::() - .unwrap() - .collect_vec(); - - // Keep this around for the next unlucky chap. - // eprintln!("i={i} (expected={expected_data_time:?}, cached={cached_data_time:?})"); - - similar_asserts::assert_eq!(Some(expected_data_time), cached_data_time); - similar_asserts::assert_eq!(Some(expected_row_id), cached_row_id); - similar_asserts::assert_eq!(expected_instance_keys, cached_instance_keys); - similar_asserts::assert_eq!(expected_positions, cached_positions); - similar_asserts::assert_eq!(expected_colors, cached_colors); + let expected = re_query2::latest_at( + store, + query, + entity_path, + MyPoints::all_components().iter().copied(), + ); + + let expected_points = expected.get_required(MyPoint::name()).unwrap(); + let expected_point_data = expected_points + .to_dense::(&resolver) + .flatten() + .unwrap(); + + let expected_colors = expected.get_or_empty(MyColor::name()); + let expected_color_data = expected_colors + .to_dense::(&resolver) + .flatten() + .unwrap(); + + // eprintln!("{}", store.to_data_table().unwrap()); + + similar_asserts::assert_eq!(expected.compound_index, cached.compound_index); + similar_asserts::assert_eq!(expected_point_data, cached_point_data); + similar_asserts::assert_eq!(expected_color_data, cached_color_data); } } diff --git a/crates/re_query_cache/tests/range.rs b/crates/re_query_cache/tests/range.rs index 8dd3a76cbbb8..eb7380c8a1d0 100644 --- a/crates/re_query_cache/tests/range.rs +++ b/crates/re_query_cache/tests/range.rs @@ -1,23 +1,19 @@ -//! Contains: -//! - A 1:1 port of the tests in `crates/re_query/tests/archetype_range_tests.rs`, with caching enabled. -//! - Invalidation tests. +use itertools::{izip, Itertools as _}; -use itertools::Itertools as _; - -use re_data_store::{DataStore, RangeQuery, StoreSubscriber}; +use re_data_store::{DataStore, RangeQuery, StoreSubscriber as _, TimeInt, TimeRange}; use re_log_types::{ build_frame_nr, - example_components::{MyColor, MyLabel, MyPoint, MyPoints}, - DataRow, EntityPath, RowId, TimeInt, TimePoint, TimeRange, + example_components::{MyColor, MyPoint, MyPoints}, + DataRow, EntityPath, RowId, TimePoint, }; -use re_query_cache::Caches; -use re_types::components::InstanceKey; +use re_query_cache::{Caches, PromiseResolver, PromiseResult}; +use re_types::{components::InstanceKey, Archetype}; use re_types_core::Loggable as _; // --- #[test] -fn simple_range() { +fn simple_range() -> anyhow::Result<()> { let mut store = DataStore::new( re_log_types::StoreId::random(re_log_types::StoreKind::Recording), InstanceKey::name(), @@ -29,50 +25,30 @@ fn simple_range() { let timepoint1 = [build_frame_nr(123)]; { - // Create some Positions with implicit instances - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; + let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, positions) - .unwrap(); + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, points)?; insert_and_react(&mut store, &mut caches, &row); - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path.clone(), - timepoint1, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 1, colors)?; insert_and_react(&mut store, &mut caches, &row); } let timepoint2 = [build_frame_nr(223)]; { - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(0)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path.clone(), - timepoint2, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint2, 1, colors)?; insert_and_react(&mut store, &mut caches, &row); } let timepoint3 = [build_frame_nr(323)]; { - // Create some Positions with implicit instances - let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; + let points = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, positions) - .unwrap(); + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, points)?; insert_and_react(&mut store, &mut caches, &row); } @@ -87,14 +63,14 @@ fn simple_range() { // --- Second test: `[timepoint1, timepoint3]` --- - // The inclusion of `timepoint1` means latest-at semantics will _not_ kick in! - let query = re_data_store::RangeQuery::new( timepoint1[0].0, TimeRange::new(timepoint1[0].1, timepoint3[0].1), ); query_and_compare(&caches, &store, &query, &entity_path); + + Ok(()) } #[test] @@ -110,34 +86,30 @@ fn static_range() { let timepoint1 = [build_frame_nr(123)]; { - // Create some Positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let mut row = - DataRow::from_cells1(RowId::new(), entity_path.clone(), timepoint1, 2, positions) + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, positions) .unwrap(); - row.compute_all_size_bytes(); insert_and_react(&mut store, &mut caches, &row); - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( + let row = DataRow::from_cells1_sized( RowId::new(), entity_path.clone(), timepoint1, 1, - (color_instances.clone(), colors.clone()), + colors.clone(), ) .unwrap(); insert_and_react(&mut store, &mut caches, &row); - // Insert timelessly too! - let row = DataRow::from_cells2_sized( + // Insert statically too! + let row = DataRow::from_cells1_sized( RowId::new(), entity_path.clone(), TimePoint::default(), 1, - (color_instances, colors), + colors, ) .unwrap(); insert_and_react(&mut store, &mut caches, &row); @@ -145,26 +117,24 @@ fn static_range() { let timepoint2 = [build_frame_nr(223)]; { - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(0)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( + let row = DataRow::from_cells1_sized( RowId::new(), entity_path.clone(), timepoint2, 1, - (color_instances.clone(), colors.clone()), + colors.clone(), ) .unwrap(); insert_and_react(&mut store, &mut caches, &row); - // Insert static_ too! - let row = DataRow::from_cells2_sized( + // Insert statically too! + let row = DataRow::from_cells1_sized( RowId::new(), entity_path.clone(), TimePoint::default(), 1, - (color_instances, colors), + colors, ) .unwrap(); insert_and_react(&mut store, &mut caches, &row); @@ -221,7 +191,6 @@ fn simple_splatted_range() { let timepoint1 = [build_frame_nr(123)]; { - // Create some Positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, positions) @@ -229,38 +198,24 @@ fn simple_splatted_range() { insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path.clone(), - timepoint1, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 1, colors) + .unwrap(); insert_and_react(&mut store, &mut caches, &row); } let timepoint2 = [build_frame_nr(223)]; { - // Assign one of them a color with a splatted instance - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 255, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path.clone(), - timepoint2, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint2, 1, colors) + .unwrap(); insert_and_react(&mut store, &mut caches, &row); } let timepoint3 = [build_frame_nr(323)]; { - // Create some Positions with implicit instances let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; let row = DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, positions) @@ -279,8 +234,6 @@ fn simple_splatted_range() { // --- Second test: `[timepoint1, timepoint3]` --- - // The inclusion of `timepoint1` means latest-at semantics will _not_ kick in! - let query = re_data_store::RangeQuery::new( timepoint1[0].0, TimeRange::new(timepoint1[0].1, timepoint3[0].1), @@ -304,7 +257,6 @@ fn invalidation() { ); let mut caches = Caches::new(&store); - // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized( RowId::new(), @@ -316,15 +268,13 @@ fn invalidation() { .unwrap(); insert_and_react(&mut store, &mut caches, &row); - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(1, 2, 3)]; - let row = DataRow::from_cells2_sized( + let row = DataRow::from_cells1_sized( RowId::new(), entity_path, present_data_timepoint.clone(), 1, - (color_instances, colors), + colors, ) .unwrap(); insert_and_react(&mut store, &mut caches, &row); @@ -464,11 +414,6 @@ fn invalidation() { // ``` #[test] fn invalidation_of_future_optionals() { - // TODO(cmc): this test is coming back in the next PR. - if true { - return; - } - let mut store = DataStore::new( re_log_types::StoreId::random(re_log_types::StoreKind::Recording), InstanceKey::name(), @@ -491,44 +436,20 @@ fn invalidation_of_future_optionals() { query_and_compare(&caches, &store, &query, &entity_path.into()); - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame2, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame2, 1, colors).unwrap(); insert_and_react(&mut store, &mut caches, &row); query_and_compare(&caches, &store, &query, &entity_path.into()); - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame3, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame3, 1, colors).unwrap(); insert_and_react(&mut store, &mut caches, &row); query_and_compare(&caches, &store, &query, &entity_path.into()); - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 255, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame3, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame3, 1, colors).unwrap(); insert_and_react(&mut store, &mut caches, &row); query_and_compare(&caches, &store, &query, &entity_path.into()); @@ -557,30 +478,15 @@ fn invalidation_static() { query_and_compare(&caches, &store, &query, &entity_path.into()); - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timeless.clone(), - 1, - (color_instances, colors), - ) - .unwrap(); + let row = + DataRow::from_cells1_sized(RowId::new(), entity_path, timeless.clone(), 1, colors).unwrap(); insert_and_react(&mut store, &mut caches, &row); query_and_compare(&caches, &store, &query, &entity_path.into()); - let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timeless, - 1, - (color_instances, colors), - ) - .unwrap(); + let row = DataRow::from_cells1_sized(RowId::new(), entity_path, timeless, 1, colors).unwrap(); insert_and_react(&mut store, &mut caches, &row); query_and_compare(&caches, &store, &query, &entity_path.into()); @@ -598,62 +504,83 @@ fn query_and_compare( query: &RangeQuery, entity_path: &EntityPath, ) { + re_log::setup_logging(); + + let resolver = PromiseResolver::default(); + for _ in 0..3 { - let mut cached_data_times = Vec::new(); - let mut cached_instance_keys = Vec::new(); - let mut cached_positions = Vec::new(); - let mut cached_colors = Vec::new(); - caches - .query_archetype_pov1_comp2::( - store, - &query.clone().into(), - entity_path, - |((data_time, _), instance_keys, positions, colors, _)| { - cached_data_times.push(data_time); - cached_instance_keys.push(instance_keys.to_vec()); - cached_positions.push(positions.to_vec()); - cached_colors.push( - re_query_cache::iter_or_repeat_opt(colors, positions.len()) - .copied() - .collect_vec(), - ); - }, - ) - .unwrap(); - - let mut expected_data_times = Vec::new(); - let mut expected_instance_keys = Vec::new(); - let mut expected_positions = Vec::new(); - let mut expected_colors = Vec::new(); - let expected = re_query::range_archetype::( + let cached = caches.range( store, query, entity_path, + MyPoints::all_components().iter().copied(), ); - for arch_view in expected { - expected_data_times.push(arch_view.data_time()); - expected_instance_keys.push(arch_view.iter_instance_keys().collect_vec()); - expected_positions.push( - arch_view - .iter_required_component::() - .unwrap() - .collect_vec(), - ); - expected_colors.push( - arch_view - .iter_optional_component::() - .unwrap() - .collect_vec(), - ); - } - - // Keep this around for the next unlucky chap. - // eprintln!("(expected={expected_data_times:?}, cached={cached_data_times:?})"); + + let cached_all_points = cached + .get_required(MyPoint::name()) + .unwrap() + .to_dense::(&resolver); + assert!(matches!( + cached_all_points.status(), + (PromiseResult::Ready(()), PromiseResult::Ready(())), + )); + let cached_all_points_indexed = cached_all_points.range_indexed(); + + let cached_all_colors = cached + .get_or_empty(MyColor::name()) + .to_dense::(&resolver); + assert!(matches!( + cached_all_colors.status(), + (PromiseResult::Ready(()), PromiseResult::Ready(())), + )); + let cached_all_colors_indexed = cached_all_colors.range_indexed(); + + let expected = re_query2::range( + store, + query, + entity_path, + MyPoints::all_components().iter().copied(), + ); + + let expected_all_points = expected.get_required(MyPoint::name()).unwrap(); + let expected_all_points_indices = expected_all_points.indices(); + let expected_all_points_data = expected_all_points + .to_dense::(&resolver) + .into_iter() + .map(|batch| batch.flatten().unwrap()) + .collect_vec(); + let expected_all_points_indexed = + izip!(expected_all_points_indices, expected_all_points_data); + + let expected_all_colors = expected.get_or_empty(MyColor::name()); + let expected_all_colors_indices = expected_all_colors.indices(); + let expected_all_colors_data = expected_all_colors + .to_dense::(&resolver) + .into_iter() + .map(|batch| batch.flatten().unwrap()) + .collect_vec(); + let expected_all_colors_indexed = + izip!(expected_all_colors_indices, expected_all_colors_data); + + eprintln!("{query:?}"); eprintln!("{}", store.to_data_table().unwrap()); - similar_asserts::assert_eq!(expected_data_times, cached_data_times); - similar_asserts::assert_eq!(expected_instance_keys, cached_instance_keys); - similar_asserts::assert_eq!(expected_positions, cached_positions); - similar_asserts::assert_eq!(expected_colors, cached_colors); + similar_asserts::assert_eq!( + expected_all_points_indexed + .map(|(index, data)| (*index, data)) + .collect_vec(), + cached_all_points_indexed + .map(|(index, data)| (*index, data.to_vec())) + .collect_vec(), + ); + + similar_asserts::assert_eq!( + expected_all_colors_indexed + .map(|(index, data)| (*index, data)) + .collect_vec(), + cached_all_colors_indexed + .map(|(index, data)| (*index, data.to_vec())) + .collect_vec(), + ); } } diff --git a/crates/re_query_cache2/Cargo.toml b/crates/re_query_cache2/Cargo.toml deleted file mode 100644 index b8b67820c4ee..000000000000 --- a/crates/re_query_cache2/Cargo.toml +++ /dev/null @@ -1,82 +0,0 @@ -[package] -name = "re_query_cache2" -authors.workspace = true -description = "Temporary crate meant to replace re_query_cache" -edition.workspace = true -homepage.workspace = true -include.workspace = true -license.workspace = true -# TODO(cmc): Replace re_query with this crate. Never publish this one. -publish = false -readme = "README.md" -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[lints] -workspace = true - -[package.metadata.docs.rs] -all-features = true - - -[features] -default = ["to_archetype"] - -## Implements `ToArchetype` for all builtin archetypes on `CachedLatestAtResults`. -to_archetype = ["dep:re_types", "dep:re_types_blueprint"] - -[dependencies] -# Rerun dependencies: -re_data_store.workspace = true -re_error.workspace = true -re_format.workspace = true -re_log.workspace = true -re_log_types.workspace = true -re_query2.workspace = true -re_tracing.workspace = true -re_types_core.workspace = true - -# Rerun dependencies (optional): -re_types = { workspace = true, optional = true } -re_types_blueprint = { workspace = true, optional = true } - -# External dependencies: -ahash.workspace = true -anyhow.workspace = true -backtrace.workspace = true -indent.workspace = true -indexmap.workspace = true -itertools.workspace = true -nohash-hasher.workspace = true -parking_lot.workspace = true -paste.workspace = true -seq-macro.workspace = true -web-time.workspace = true - - -[dev-dependencies] -re_types = { workspace = true, features = ["datagen"] } - -criterion.workspace = true -mimalloc.workspace = true -rand = { workspace = true, features = ["std", "std_rng"] } -similar-asserts.workspace = true - - -[lib] -bench = false - - -[[example]] -name = "latest_at_archetype" -required-features = ["to_archetype"] - - -[[bench]] -name = "flat_vec_deque" -harness = false - -[[bench]] -name = "latest_at" -harness = false diff --git a/crates/re_query_cache2/README.md b/crates/re_query_cache2/README.md deleted file mode 100644 index db850d204a04..000000000000 --- a/crates/re_query_cache2/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# re_query_cache2 - -Temporary crate for implementing the new cached data APIs. Not published. - -Will replace `re_query_cache2` when ready. diff --git a/crates/re_query_cache2/benches/flat_vec_deque.rs b/crates/re_query_cache2/benches/flat_vec_deque.rs deleted file mode 100644 index f1dec8514b7d..000000000000 --- a/crates/re_query_cache2/benches/flat_vec_deque.rs +++ /dev/null @@ -1,333 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; - -use itertools::Itertools as _; - -use re_query_cache2::FlatVecDeque; - -// --- - -#[global_allocator] -static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; - -criterion_group!( - benches, - range, - insert, - insert_many, - insert_deque, - remove, - remove_range -); -criterion_main!(benches); - -// --- - -// `cargo test` also runs the benchmark setup code, so make sure they run quickly: -#[cfg(debug_assertions)] -mod constants { - pub const INITIAL_VALUES_PER_ENTRY: usize = 1; - pub const INITIAL_NUM_ENTRIES: usize = 1; - pub const ADDED_VALUES_PER_ENTRY: usize = 1; - pub const ADDED_NUM_ENTRIES: usize = 1; -} - -#[cfg(not(debug_assertions))] -mod constants { - pub const INITIAL_VALUES_PER_ENTRY: usize = 1000; - pub const INITIAL_NUM_ENTRIES: usize = 100; - pub const ADDED_VALUES_PER_ENTRY: usize = 1000; - pub const ADDED_NUM_ENTRIES: usize = 5; -} - -#[allow(clippy::wildcard_imports)] -use self::constants::*; - -// --- - -fn range(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements( - (ADDED_NUM_ENTRIES * ADDED_VALUES_PER_ENTRY) as _, - )); - - { - group.bench_function("range/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let v: FlatVecDeque = base.clone(); - v.range(0..ADDED_NUM_ENTRIES) - .map(ToOwned::to_owned) - .collect_vec() - }); - }); - group.bench_function("range/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let v: FlatVecDeque = base.clone(); - v.range( - INITIAL_NUM_ENTRIES / 2 - ADDED_NUM_ENTRIES / 2 - ..INITIAL_NUM_ENTRIES / 2 + ADDED_NUM_ENTRIES / 2, - ) - .map(ToOwned::to_owned) - .collect_vec() - }); - }); - group.bench_function("range/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let v: FlatVecDeque = base.clone(); - v.range(INITIAL_NUM_ENTRIES - ADDED_NUM_ENTRIES..INITIAL_NUM_ENTRIES) - .map(ToOwned::to_owned) - .collect_vec() - }); - }); - } -} - -fn insert(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let added = (0..ADDED_VALUES_PER_ENTRY as i64).collect_vec(); - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements(added.len() as _)); - - { - group.bench_function("insert/empty", |b| { - b.iter(|| { - let mut v: FlatVecDeque = FlatVecDeque::new(); - v.insert(0, added.clone()); - v - }); - }); - } - - { - group.bench_function("insert/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert(0, added.clone()); - v - }); - }); - group.bench_function("insert/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert(INITIAL_NUM_ENTRIES / 2, added.clone()); - v - }); - }); - group.bench_function("insert/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert(INITIAL_NUM_ENTRIES, added.clone()); - v - }); - }); - } -} - -fn insert_many(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let added = (0..ADDED_NUM_ENTRIES as i64) - .map(|_| (0..ADDED_VALUES_PER_ENTRY as i64).collect_vec()) - .collect_vec(); - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements( - (ADDED_NUM_ENTRIES * ADDED_VALUES_PER_ENTRY) as _, - )); - - { - group.bench_function("insert_many/empty", |b| { - b.iter(|| { - let mut v: FlatVecDeque = FlatVecDeque::new(); - v.insert_many(0, added.clone()); - v - }); - }); - } - - { - group.bench_function("insert_many/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_many(0, added.clone()); - v - }); - }); - group.bench_function("insert_many/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_many(INITIAL_NUM_ENTRIES / 2, added.clone()); - v - }); - }); - group.bench_function("insert_many/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_many(INITIAL_NUM_ENTRIES, added.clone()); - v - }); - }); - } -} - -fn insert_deque(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let mut added: FlatVecDeque = FlatVecDeque::new(); - for i in 0..ADDED_NUM_ENTRIES { - added.insert(i, (0..ADDED_VALUES_PER_ENTRY as i64).collect_vec()); - } - - let added = FlatVecDeque::from_vecs( - std::iter::repeat_with(|| (0..ADDED_VALUES_PER_ENTRY as i64).collect_vec()) - .take(ADDED_NUM_ENTRIES), - ); - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements( - (ADDED_NUM_ENTRIES * ADDED_VALUES_PER_ENTRY) as _, - )); - - { - group.bench_function("insert_deque/empty", |b| { - b.iter(|| { - let mut v: FlatVecDeque = FlatVecDeque::new(); - v.insert_deque(0, added.clone()); - v - }); - }); - } - - { - group.bench_function("insert_deque/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_deque(0, added.clone()); - v - }); - }); - group.bench_function("insert_deque/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_deque(INITIAL_NUM_ENTRIES / 2, added.clone()); - v - }); - }); - group.bench_function("insert_deque/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.insert_deque(INITIAL_NUM_ENTRIES, added.clone()); - v - }); - }); - } -} - -fn remove(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements(1)); - - { - group.bench_function("remove/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove(0); - v - }); - }); - group.bench_function("remove/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove(INITIAL_NUM_ENTRIES / 2); - v - }); - }); - group.bench_function("remove/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove(INITIAL_NUM_ENTRIES - 1); - v - }); - }); - } -} - -fn remove_range(c: &mut Criterion) { - if std::env::var("CI").is_ok() { - return; - } - - let mut group = c.benchmark_group("flat_vec_deque"); - group.throughput(criterion::Throughput::Elements( - (ADDED_NUM_ENTRIES * ADDED_VALUES_PER_ENTRY) as _, - )); - - { - group.bench_function("remove_range/prefilled/front", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove_range(0..ADDED_NUM_ENTRIES); - v - }); - }); - group.bench_function("remove_range/prefilled/middle", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove_range( - INITIAL_NUM_ENTRIES / 2 - ADDED_NUM_ENTRIES / 2 - ..INITIAL_NUM_ENTRIES / 2 + ADDED_NUM_ENTRIES / 2, - ); - v - }); - }); - group.bench_function("remove_range/prefilled/back", |b| { - let base = create_prefilled(); - b.iter(|| { - let mut v: FlatVecDeque = base.clone(); - v.remove_range(INITIAL_NUM_ENTRIES - ADDED_NUM_ENTRIES..INITIAL_NUM_ENTRIES); - v - }); - }); - } -} - -// --- - -fn create_prefilled() -> FlatVecDeque { - FlatVecDeque::from_vecs( - std::iter::repeat_with(|| (0..INITIAL_VALUES_PER_ENTRY as i64).collect_vec()) - .take(INITIAL_NUM_ENTRIES), - ) -} diff --git a/crates/re_query_cache2/benches/latest_at.rs b/crates/re_query_cache2/benches/latest_at.rs deleted file mode 100644 index cbda2b8826b3..000000000000 --- a/crates/re_query_cache2/benches/latest_at.rs +++ /dev/null @@ -1,374 +0,0 @@ -//! Contains: -//! - A 1:1 port of the benchmarks in `crates/re_query/benches/query_benchmarks.rs`, with caching enabled. - -use criterion::{criterion_group, criterion_main, Criterion}; - -use itertools::Itertools; -use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; -use re_log_types::{entity_path, DataRow, EntityPath, RowId, TimeInt, TimeType, Timeline}; -use re_query2::{clamped_zip_1x1, PromiseResolver}; -use re_query_cache2::{CachedLatestAtResults, Caches}; -use re_types::{ - archetypes::Points2D, - components::{Color, InstanceKey, Position2D, Text}, - Archetype as _, -}; -use re_types_core::Loggable as _; - -// --- - -// `cargo test` also runs the benchmark setup code, so make sure they run quickly: -#[cfg(debug_assertions)] -mod constants { - pub const NUM_FRAMES_POINTS: u32 = 1; - pub const NUM_POINTS: u32 = 1; - pub const NUM_FRAMES_STRINGS: u32 = 1; - pub const NUM_STRINGS: u32 = 1; -} - -#[cfg(not(debug_assertions))] -mod constants { - pub const NUM_FRAMES_POINTS: u32 = 1_000; - pub const NUM_POINTS: u32 = 1_000; - pub const NUM_FRAMES_STRINGS: u32 = 1_000; - pub const NUM_STRINGS: u32 = 1_000; -} - -#[allow(clippy::wildcard_imports)] -use self::constants::*; - -// --- - -#[global_allocator] -static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; - -criterion_group!( - benches, - mono_points, - mono_strings, - batch_points, - batch_strings -); -criterion_main!(benches); - -// --- - -fn mono_points(c: &mut Criterion) { - // Each mono point gets logged at a different path - let paths = (0..NUM_POINTS) - .map(move |point_idx| entity_path!("points", point_idx)) - .collect_vec(); - let msgs = build_points_rows(&paths, 1); - - { - let mut group = c.benchmark_group("arrow_mono_points2"); - // Mono-insert is slow -- decrease the sample size - group.sample_size(10); - group.throughput(criterion::Throughput::Elements( - (NUM_POINTS * NUM_FRAMES_POINTS) as _, - )); - group.bench_function("insert", |b| { - b.iter(|| insert_rows(msgs.iter())); - }); - } - - { - let mut group = c.benchmark_group("arrow_mono_points2"); - group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let (caches, store) = insert_rows(msgs.iter()); - group.bench_function("query", |b| { - b.iter(|| query_and_visit_points(&caches, &store, &paths)); - }); - } -} - -fn mono_strings(c: &mut Criterion) { - // Each mono string gets logged at a different path - let paths = (0..NUM_STRINGS) - .map(move |string_idx| entity_path!("strings", string_idx)) - .collect_vec(); - let msgs = build_strings_rows(&paths, 1); - - { - let mut group = c.benchmark_group("arrow_mono_strings2"); - group.sample_size(10); - group.throughput(criterion::Throughput::Elements( - (NUM_STRINGS * NUM_FRAMES_STRINGS) as _, - )); - group.bench_function("insert", |b| { - b.iter(|| insert_rows(msgs.iter())); - }); - } - - { - let mut group = c.benchmark_group("arrow_mono_strings2"); - group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let (caches, store) = insert_rows(msgs.iter()); - group.bench_function("query", |b| { - b.iter(|| query_and_visit_strings(&caches, &store, &paths)); - }); - } -} - -fn batch_points(c: &mut Criterion) { - // Batch points are logged together at a single path - let paths = [EntityPath::from("points")]; - let msgs = build_points_rows(&paths, NUM_POINTS as _); - - { - let mut group = c.benchmark_group("arrow_batch_points2"); - group.throughput(criterion::Throughput::Elements( - (NUM_POINTS * NUM_FRAMES_POINTS) as _, - )); - group.bench_function("insert", |b| { - b.iter(|| insert_rows(msgs.iter())); - }); - } - - { - let mut group = c.benchmark_group("arrow_batch_points2"); - group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let (caches, store) = insert_rows(msgs.iter()); - group.bench_function("query", |b| { - b.iter(|| query_and_visit_points(&caches, &store, &paths)); - }); - } -} - -fn batch_strings(c: &mut Criterion) { - // Batch strings are logged together at a single path - let paths = [EntityPath::from("points")]; - let msgs = build_strings_rows(&paths, NUM_STRINGS as _); - - { - let mut group = c.benchmark_group("arrow_batch_strings2"); - group.throughput(criterion::Throughput::Elements( - (NUM_STRINGS * NUM_FRAMES_STRINGS) as _, - )); - group.bench_function("insert", |b| { - b.iter(|| insert_rows(msgs.iter())); - }); - } - - { - let mut group = c.benchmark_group("arrow_batch_strings2"); - group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let (caches, store) = insert_rows(msgs.iter()); - group.bench_function("query", |b| { - b.iter(|| query_and_visit_strings(&caches, &store, &paths)); - }); - } -} - -// --- Helpers --- - -pub fn build_some_point2d(len: usize) -> Vec { - use rand::Rng as _; - let mut rng = rand::thread_rng(); - - (0..len) - .map(|_| Position2D::new(rng.gen_range(0.0..10.0), rng.gen_range(0.0..10.0))) - .collect() -} - -/// Create `len` dummy colors -pub fn build_some_colors(len: usize) -> Vec { - (0..len).map(|i| Color::from(i as u32)).collect() -} - -/// Build a ([`Timeline`], [`TimeInt`]) tuple from `frame_nr` suitable for inserting in a [`re_log_types::TimePoint`]. -pub fn build_frame_nr(frame_nr: TimeInt) -> (Timeline, TimeInt) { - (Timeline::new("frame_nr", TimeType::Sequence), frame_nr) -} - -pub fn build_some_strings(len: usize) -> Vec { - use rand::Rng as _; - let mut rng = rand::thread_rng(); - - (0..len) - .map(|_| { - let ilen: usize = rng.gen_range(0..10000); - let s: String = rand::thread_rng() - .sample_iter(&rand::distributions::Alphanumeric) - .take(ilen) - .map(char::from) - .collect(); - Text::from(s) - }) - .collect() -} - -fn build_points_rows(paths: &[EntityPath], num_points: usize) -> Vec { - (0..NUM_FRAMES_POINTS) - .flat_map(move |frame_idx| { - paths.iter().map(move |path| { - let mut row = DataRow::from_cells2( - RowId::new(), - path.clone(), - [build_frame_nr((frame_idx as i64).try_into().unwrap())], - num_points as _, - ( - build_some_point2d(num_points), - build_some_colors(num_points), - ), - ) - .unwrap(); - // NOTE: Using unsized cells will crash in debug mode, and benchmarks are run for 1 iteration, - // in debug mode, by the standard test harness. - if cfg!(debug_assertions) { - row.compute_all_size_bytes(); - } - row - }) - }) - .collect() -} - -fn build_strings_rows(paths: &[EntityPath], num_strings: usize) -> Vec { - (0..NUM_FRAMES_STRINGS) - .flat_map(move |frame_idx| { - paths.iter().map(move |path| { - let mut row = DataRow::from_cells2( - RowId::new(), - path.clone(), - [build_frame_nr((frame_idx as i64).try_into().unwrap())], - num_strings as _, - // We still need to create points because they are the primary for the - // archetype query we want to do. We won't actually deserialize the points - // during the query -- we just need it for the primary keys. - // TODO(jleibs): switch this to use `TextEntry` once the new type has - // landed. - ( - build_some_point2d(num_strings), - build_some_strings(num_strings), - ), - ) - .unwrap(); - // NOTE: Using unsized cells will crash in debug mode, and benchmarks are run for 1 iteration, - // in debug mode, by the standard test harness. - if cfg!(debug_assertions) { - row.compute_all_size_bytes(); - } - row - }) - }) - .collect() -} - -fn insert_rows<'a>(msgs: impl Iterator) -> (Caches, DataStore) { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - msgs.for_each(|row| { - caches.on_events(&[store.insert_row(row).unwrap()]); - }); - - (caches, store) -} - -struct SavePoint { - _pos: Position2D, - _color: Option, -} - -fn query_and_visit_points( - caches: &Caches, - store: &DataStore, - paths: &[EntityPath], -) -> Vec { - let resolver = PromiseResolver::default(); - - let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); - let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_POINTS as i64 / 2); - - let mut ret = Vec::with_capacity(NUM_POINTS as _); - - // TODO(jleibs): Add Radius once we have support for it in field_types - for entity_path in paths { - let results: CachedLatestAtResults = caches.latest_at( - store, - &query, - entity_path, - Points2D::all_components().iter().copied(), // no generics! - ); - - let points = results.get_required(Position2D::name()).unwrap(); - let colors = results.get_or_empty(Color::name()); - - let points = points - .iter_dense::(&resolver) - .flatten() - .unwrap() - .copied(); - - let colors = colors - .iter_dense::(&resolver) - .flatten() - .unwrap() - .copied(); - let color_default_fn = || Color::from(0xFF00FFFF); - - for (point, color) in clamped_zip_1x1(points, colors, color_default_fn) { - ret.push(SavePoint { - _pos: point, - _color: Some(color), - }); - } - } - assert_eq!(NUM_POINTS as usize, ret.len()); - ret -} - -struct SaveString { - _label: Option, -} - -fn query_and_visit_strings( - caches: &Caches, - store: &DataStore, - paths: &[EntityPath], -) -> Vec { - let resolver = PromiseResolver::default(); - - let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); - let query = LatestAtQuery::new(timeline_frame_nr, NUM_FRAMES_STRINGS as i64 / 2); - - let mut strings = Vec::with_capacity(NUM_STRINGS as _); - - for entity_path in paths { - let results: CachedLatestAtResults = caches.latest_at( - store, - &query, - entity_path, - Points2D::all_components().iter().copied(), // no generics! - ); - - let points = results.get_required(Position2D::name()).unwrap(); - let colors = results.get_or_empty(Text::name()); - - let points = points - .iter_dense::(&resolver) - .flatten() - .unwrap() - .copied(); - - let labels = colors - .iter_dense::(&resolver) - .flatten() - .unwrap() - .cloned(); - let label_default_fn = || Text(String::new().into()); - - for (_point, label) in clamped_zip_1x1(points, labels, label_default_fn) { - strings.push(SaveString { - _label: Some(label), - }); - } - } - assert_eq!(NUM_STRINGS as usize, strings.len()); - criterion::black_box(strings) -} diff --git a/crates/re_query_cache2/src/cache.rs b/crates/re_query_cache2/src/cache.rs deleted file mode 100644 index 7f46b49dbfb0..000000000000 --- a/crates/re_query_cache2/src/cache.rs +++ /dev/null @@ -1,258 +0,0 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -use ahash::{HashMap, HashSet}; -use parking_lot::RwLock; - -use re_data_store::{DataStore, StoreDiff, StoreEvent, StoreSubscriber, TimeInt}; -use re_log_types::{EntityPath, StoreId, TimeRange, Timeline}; -use re_types_core::ComponentName; - -use crate::{LatestAtCache, RangeCache}; - -// --- - -/// Uniquely identifies cached query results in the [`Caches`]. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct CacheKey { - pub entity_path: EntityPath, - pub timeline: Timeline, - pub component_name: ComponentName, -} - -impl re_types_core::SizeBytes for CacheKey { - #[inline] - fn heap_size_bytes(&self) -> u64 { - let Self { - entity_path, - timeline, - component_name, - } = self; - entity_path.heap_size_bytes() - + timeline.heap_size_bytes() - + component_name.heap_size_bytes() - } -} - -impl std::fmt::Debug for CacheKey { - #[inline] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - entity_path, - timeline, - component_name, - } = self; - f.write_fmt(format_args!( - "{entity_path}:{component_name} on {}", - timeline.name() - )) - } -} - -impl CacheKey { - #[inline] - pub fn new( - entity_path: impl Into, - timeline: impl Into, - component_name: impl Into, - ) -> Self { - Self { - entity_path: entity_path.into(), - timeline: timeline.into(), - component_name: component_name.into(), - } - } -} - -pub struct Caches { - /// The [`StoreId`] of the associated [`DataStore`]. - pub(crate) store_id: StoreId, - - // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. - pub(crate) latest_at_per_cache_key: RwLock>>>, - - // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. - pub(crate) range_per_cache_key: RwLock>>>, -} - -impl std::fmt::Debug for Caches { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - store_id, - latest_at_per_cache_key, - range_per_cache_key, - } = self; - - let mut strings = Vec::new(); - - strings.push(format!("[LatestAt @ {store_id}]")); - { - let latest_at_per_cache_key = latest_at_per_cache_key.read(); - let latest_at_per_cache_key: BTreeMap<_, _> = latest_at_per_cache_key.iter().collect(); - - for (cache_key, cache) in &latest_at_per_cache_key { - let cache = cache.read(); - strings.push(format!( - " [{cache_key:?} (pending_invalidation_min={:?})]", - cache.pending_invalidations.first().map(|&t| cache_key - .timeline - .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), - )); - strings.push(indent::indent_all_by(4, format!("{cache:?}"))); - } - } - - strings.push(format!("[Range @ {store_id}]")); - { - let range_per_cache_key = range_per_cache_key.read(); - let range_per_cache_key: BTreeMap<_, _> = range_per_cache_key.iter().collect(); - - for (cache_key, cache) in &range_per_cache_key { - let cache = cache.read(); - strings.push(format!( - " [{cache_key:?} (pending_invalidation_min={:?})]", - cache.pending_invalidation.map(|t| cache_key - .timeline - .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), - )); - strings.push(indent::indent_all_by(4, format!("{cache:?}"))); - } - } - - f.write_str(&strings.join("\n").replace("\n\n", "\n")) - } -} - -impl Caches { - #[inline] - pub fn new(store: &DataStore) -> Self { - Self { - store_id: store.id().clone(), - latest_at_per_cache_key: Default::default(), - range_per_cache_key: Default::default(), - } - } -} - -impl StoreSubscriber for Caches { - #[inline] - fn name(&self) -> String { - "rerun.store_subscribers.QueryCache".into() - } - - #[inline] - fn as_any(&self) -> &dyn std::any::Any { - self - } - - #[inline] - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } - - fn on_events(&mut self, events: &[StoreEvent]) { - re_tracing::profile_function!(format!("num_events={}", events.len())); - - #[derive(Default, Debug)] - struct CompactedEvents { - static_: HashSet<(EntityPath, ComponentName)>, - temporal: HashMap>, - } - - let mut compacted = CompactedEvents::default(); - - for event in events { - let StoreEvent { - store_id, - store_generation: _, - event_id: _, - diff, - } = event; - - assert!( - self.store_id == *store_id, - "attempted to use a query cache {} with the wrong datastore ({})", - self.store_id, - store_id, - ); - - let StoreDiff { - kind: _, // Don't care: both additions and deletions invalidate query results. - row_id: _, - times, - entity_path, - cells, - } = diff; - - { - re_tracing::profile_scope!("compact events"); - - if times.is_empty() { - for component_name in cells.keys() { - compacted - .static_ - .insert((entity_path.clone(), *component_name)); - } - } - - for &(timeline, data_time) in times { - for component_name in cells.keys() { - let key = CacheKey::new(entity_path.clone(), timeline, *component_name); - let data_times = compacted.temporal.entry(key).or_default(); - data_times.insert(data_time); - } - } - } - } - - let caches_latest_at = self.latest_at_per_cache_key.write(); - let caches_range = self.range_per_cache_key.write(); - // NOTE: Don't release the top-level locks -- even though this cannot happen yet with - // our current macro-architecture, we want to prevent queries from concurrently - // running while we're updating the invalidation flags. - - { - re_tracing::profile_scope!("static"); - - // TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding - // yet another layer of caching indirection. - // But since this pretty much never happens in practice, let's not go there until we - // have metrics showing that show we need to. - for (entity_path, component_name) in compacted.static_ { - for (key, cache) in caches_latest_at.iter() { - if key.entity_path == entity_path && key.component_name == component_name { - cache.write().pending_invalidations.insert(TimeInt::STATIC); - } - } - - for (key, cache) in caches_range.iter() { - if key.entity_path == entity_path && key.component_name == component_name { - cache.write().pending_invalidation = Some(TimeInt::STATIC); - } - } - } - } - - { - re_tracing::profile_scope!("temporal"); - - for (key, times) in compacted.temporal { - if let Some(cache) = caches_latest_at.get(&key) { - cache - .write() - .pending_invalidations - .extend(times.iter().copied()); - } - - if let Some(cache) = caches_range.get(&key) { - let pending_invalidation = &mut cache.write().pending_invalidation; - let min_time = times.first().copied(); - *pending_invalidation = - Option::min(*pending_invalidation, min_time).or(min_time); - } - } - } - } -} diff --git a/crates/re_query_cache2/src/cache_stats.rs b/crates/re_query_cache2/src/cache_stats.rs deleted file mode 100644 index 7c205f5a8321..000000000000 --- a/crates/re_query_cache2/src/cache_stats.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::collections::BTreeMap; - -use re_log_types::TimeRange; -use re_types_core::SizeBytes as _; - -use crate::{CacheKey, Caches}; - -// --- - -/// Stats for all primary caches. -/// -/// Fetch them via [`Caches::stats`]. -#[derive(Default, Debug, Clone)] -pub struct CachesStats { - pub latest_at: BTreeMap, - pub range: BTreeMap, CachedComponentStats)>, -} - -impl CachesStats { - #[inline] - pub fn total_size_bytes(&self) -> u64 { - re_tracing::profile_function!(); - - let Self { latest_at, range } = self; - - let latest_at_size_bytes: u64 = - latest_at.values().map(|stats| stats.total_size_bytes).sum(); - let range_size_bytes: u64 = range - .values() - .map(|(_, stats)| stats.total_size_bytes) - .sum(); - - latest_at_size_bytes + range_size_bytes - } -} - -/// Stats for a cached component. -#[derive(Default, Debug, Clone)] -pub struct CachedComponentStats { - pub total_indices: u64, - pub total_instances: u64, - pub total_size_bytes: u64, -} - -impl Caches { - /// Computes the stats for all primary caches. - pub fn stats(&self) -> CachesStats { - re_tracing::profile_function!(); - - let latest_at = { - let latest_at = self.latest_at_per_cache_key.read_recursive().clone(); - // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. - - latest_at - .iter() - .map(|(key, cache)| { - let cache = cache.read_recursive(); - ( - key.clone(), - CachedComponentStats { - total_indices: cache.per_data_time.len() as _, - total_instances: cache - .per_data_time - .values() - .map(|results| results.num_instances()) - .sum(), - total_size_bytes: cache.total_size_bytes(), - }, - ) - }) - .collect() - }; - - let range = { - let range = self.range_per_cache_key.read_recursive().clone(); - // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. - - range - .iter() - .map(|(key, cache)| { - let cache = cache.read_recursive(); - let cache = cache.per_data_time.read_recursive(); - ( - key.clone(), - ( - cache.time_range(), - CachedComponentStats { - total_indices: cache.indices.len() as _, - total_instances: cache.num_instances(), - total_size_bytes: cache.total_size_bytes(), - }, - ), - ) - }) - .collect() - }; - - CachesStats { latest_at, range } - } -} diff --git a/crates/re_query_cache2/src/flat_vec_deque.rs b/crates/re_query_cache2/src/flat_vec_deque.rs deleted file mode 100644 index a5508f0391c1..000000000000 --- a/crates/re_query_cache2/src/flat_vec_deque.rs +++ /dev/null @@ -1,945 +0,0 @@ -use std::{collections::VecDeque, ops::Range}; - -use itertools::Itertools as _; - -use re_types_core::SizeBytes; - -// --- - -/// A [`FlatVecDeque`] that can be erased into a trait object. -/// -/// Methods that don't require monomorphization over `T` are made dynamically dispatchable. -pub trait ErasedFlatVecDeque: std::any::Any { - fn as_any(&self) -> &dyn std::any::Any; - - fn as_any_mut(&mut self) -> &mut dyn std::any::Any; - - fn into_any(self: Box) -> Box; - - /// Dynamically dispatches to [`FlatVecDeque::num_entries`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_num_entries(&self) -> usize; - - /// Dynamically dispatches to [`FlatVecDeque::num_values`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_num_values(&self) -> usize; - - /// Dynamically dispatches to [`FlatVecDeque::remove`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_remove(&mut self, at: usize); - - /// Dynamically dispatches to [`FlatVecDeque::remove`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_remove_range(&mut self, range: Range); - - /// Dynamically dispatches to [`FlatVecDeque::truncate`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_truncate(&mut self, at: usize); - - /// Dynamically dispatches to [` as SizeBytes>::total_size_bytes(self)`]. - /// - /// This is prefixed with `dyn_` to avoid method dispatch ambiguities that are very hard to - /// avoid even with explicit syntax and that silently lead to infinite recursions. - fn dyn_total_size_bytes(&self) -> u64; -} - -impl ErasedFlatVecDeque for FlatVecDeque { - #[inline] - fn as_any(&self) -> &dyn std::any::Any { - self - } - - #[inline] - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } - - #[inline] - fn into_any(self: Box) -> Box { - self - } - - #[inline] - fn dyn_num_entries(&self) -> usize { - self.num_entries() - } - - #[inline] - fn dyn_num_values(&self) -> usize { - self.num_values() - } - - #[inline] - fn dyn_remove(&mut self, at: usize) { - FlatVecDeque::::remove(self, at); - } - - #[inline] - fn dyn_remove_range(&mut self, range: Range) { - FlatVecDeque::::remove_range(self, range); - } - - #[inline] - fn dyn_truncate(&mut self, at: usize) { - FlatVecDeque::::truncate(self, at); - } - - #[inline] - fn dyn_total_size_bytes(&self) -> u64 { - as SizeBytes>::total_size_bytes(self) - } -} - -// --- - -/// A double-ended queue implemented with a pair of growable ring buffers, where every single -/// entry is a flattened array of values. -/// -/// Logically like a `VecDeque>`, but with a less fragmented memory layout (each `Box<[T]>` -/// gets copied/inlined into the `FlatVecDeque`). -/// `FlatVecDeque` therefore optimizes for reads (cache locality, specifically) while `VecDeque>` -/// optimizes for writes. -/// -/// You can think of this as the native/deserialized version of an Arrow `ListArray`. -/// This is particularly useful when working with many small arrays of data (e.g. Rerun's -/// `TimeSeriesScalar`s). -// -// TODO(cmc): We could even use a bitmap for T=Option, which would bring this that much -// closer to a deserialized version of an Arrow array. -#[derive(Debug, Clone)] -pub struct FlatVecDeque { - /// Stores every value in the `FlatVecDeque` in a flattened `VecDeque`. - /// - /// E.g.: - /// - `FlatVecDeque[]` -> values=`[]`. - /// - `FlatVecDeque[[], [], []]` -> values=`[]`. - /// - `FlatVecDeque[[], [0], [1, 2, 3], [4, 5]]` -> values=`[0, 1, 2, 3, 4, 5]`. - values: VecDeque, - - /// Keeps track of each entry, i.e. logical slices of data. - /// - /// E.g.: - /// - `FlatVecDeque[]` -> offsets=`[]`. - /// - `FlatVecDeque[[], [], []]` -> offsets=`[0, 0, 0]`. - /// - `FlatVecDeque[[], [0], [1, 2, 3], [4, 5]]` -> offsets=`[0, 1, 4, 6]`. - offsets: VecDeque, -} - -impl SizeBytes for FlatVecDeque { - #[inline] - fn heap_size_bytes(&self) -> u64 { - // NOTE: It's all on the heap at this point. - - let values_size_bytes = if T::is_pod() { - (self.num_values() * std::mem::size_of::()) as _ - } else { - self.values - .iter() - .map(SizeBytes::total_size_bytes) - .sum::() - }; - - let offsets_size_bytes = self.num_entries() * std::mem::size_of::(); - - values_size_bytes + offsets_size_bytes as u64 - } -} - -impl From> for FlatVecDeque { - #[inline] - fn from(values: VecDeque) -> Self { - let num_values = values.len(); - Self { - values, - offsets: std::iter::once(num_values).collect(), - } - } -} - -impl From> for FlatVecDeque { - #[inline] - fn from(values: Vec) -> Self { - let num_values = values.len(); - Self { - values: values.into(), - offsets: std::iter::once(num_values).collect(), - } - } -} - -impl Default for FlatVecDeque { - #[inline] - fn default() -> Self { - Self::new() - } -} - -impl FlatVecDeque { - #[inline] - pub const fn new() -> Self { - Self { - values: VecDeque::new(), - offsets: VecDeque::new(), - } - } - - #[inline] - pub fn from_vecs(entries: impl IntoIterator>) -> Self { - let mut this = Self::new(); - - // NOTE: Do not use any of the insertion methods, they rely on `from_vecs` in the first - // place! - let mut value_offset = 0; - for entry in entries { - value_offset += entry.len(); // increment first! - this.offsets.push_back(value_offset); - this.values.extend(entry); - } - - this - } - - /// How many entries are there in the deque? - /// - /// Keep in mind: each entry is itself an array of values. - /// Use [`Self::num_values`] to get the total number of values across all entries. - #[inline] - pub fn num_entries(&self) -> usize { - self.offsets.len() - } - - /// How many values are there in the deque? - /// - /// Keep in mind: each entry in the deque holds an array of values. - /// Use [`Self::num_entries`] to get the total number of entries, irrelevant of how many - /// values each entry holds. - #[inline] - pub fn num_values(&self) -> usize { - self.values.len() - } - - #[inline] - fn value_offset(&self, entry_index: usize) -> usize { - if entry_index == 0 { - 0 - } else { - self.offsets[entry_index - 1] - } - } - - #[inline] - fn iter_offset_ranges(&self) -> impl Iterator> + '_ { - std::iter::once(0) - .chain(self.offsets.iter().copied()) - .tuple_windows::<(_, _)>() - .map(|(start, end)| (start..end)) - } -} - -// --- - -impl FlatVecDeque { - /// Iterates over all the entries in the deque. - /// - /// This is the same as `self.range(0..self.num_entries())`. - /// - /// Keep in mind that each entry is an array of values! - #[inline] - pub fn iter(&self) -> impl Iterator { - self.range(0..self.num_entries()) - } - - /// Iterates over all the entries in the deque in the given `entry_range`. - /// - /// Keep in mind that each entry is an array of values! - #[inline] - pub fn range(&self, entry_range: Range) -> impl Iterator { - let (values_left, values_right) = self.values.as_slices(); - // NOTE: We can't slice into our offsets, we don't even know if they're contiguous in - // memory at this point -> skip() and take(). - self.iter_offset_ranges() - .skip(entry_range.start) - .take(entry_range.len()) - .map(|offsets| { - if offsets.is_empty() { - return &[] as &'_ [T]; - } - - // NOTE: We do not need `make_contiguous` here because we always guarantee - // that a single entry's worth of values is fully contained in either the left or - // right buffer, but never straddling across both. - if offsets.start < values_left.len() { - &values_left[offsets] - } else { - &values_right[offsets] - } - }) - } -} - -#[test] -fn range() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_many(0, [vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - assert_iter_eq(&[&[1, 2, 3]], v.range(0..1)); - assert_iter_eq(&[&[4, 5, 6, 7]], v.range(1..2)); - assert_iter_eq(&[&[8, 9, 10]], v.range(2..3)); - - assert_iter_eq( - &[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], - v.range(0..v.num_entries()), - ); - - assert_iter_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], v.iter()); -} - -// --- - -impl FlatVecDeque { - /// Prepends an entry comprised of `values` to the deque. - /// - /// This is the same as `self.insert(0, values)`. - /// - /// See [`Self::insert`] for more information. - #[inline] - pub fn push_front(&mut self, values: impl IntoIterator) { - self.insert(0, values); - } - - /// Appends an entry comprised of `values` to the deque. - /// - /// This is the same as `self.insert(self.num_entries(), values)`. - /// - /// See [`Self::insert`] for more information. - #[inline] - pub fn push_back(&mut self, values: impl IntoIterator) { - self.insert(self.num_entries(), values); - } - - /// Inserts a single entry at `entry_index`, comprised of the multiple elements given as `values`. - /// - /// This is O(1) if `entry_index` corresponds to either the start or the end of the deque. - /// Otherwise, this requires splitting the deque into two pieces then stitching them back together - /// at both ends of the added data. - /// - /// Panics if `entry_index` is out of bounds. - /// Panics if `values` is empty. - #[inline] - pub fn insert(&mut self, entry_index: usize, values: impl IntoIterator) { - let values: VecDeque = values.into_iter().collect(); - let deque = values.into(); - self.insert_deque(entry_index, deque); - } - - /// Prepends multiple entries, each comprised of the multiple elements given in `entries`, - /// to the deque. - /// - /// This is the same as `self.insert_many(0, entries)`. - /// - /// See [`Self::insert_many`] for more information. - #[inline] - pub fn push_many_front(&mut self, entries: impl IntoIterator>) { - self.insert_many(0, entries); - } - - /// Appends multiple entries, each comprised of the multiple elements given in `entries`, - /// to the deque. - /// - /// This is the same as `self.insert_many(self.num_entries(), entries)`. - /// - /// See [`Self::insert_many`] for more information. - #[inline] - pub fn push_many_back(&mut self, entries: impl IntoIterator>) { - self.insert_many(self.num_entries(), entries); - } - - /// Inserts multiple entries, starting at `entry_index` onwards, each comprised of the multiple elements - /// given in `entries`. - /// - /// This is O(1) if `entry_index` corresponds to either the start or the end of the deque. - /// Otherwise, this requires splitting the deque into two pieces then stitching them back together - /// at both ends of the added data. - /// - /// Panics if `entry_index` is out of bounds. - /// Panics if any of the value arrays in `entries` is empty. - #[inline] - pub fn insert_many(&mut self, entry_index: usize, entries: impl IntoIterator>) { - let deque = Self::from_vecs(entries); - self.insert_deque(entry_index, deque); - } - - /// Prepends another full deque to the deque. - /// - /// This is the same as `self.insert_deque(0, rhs)`. - /// - /// See [`Self::insert_deque`] for more information. - #[inline] - pub fn push_front_deque(&mut self, rhs: FlatVecDeque) { - self.insert_deque(0, rhs); - } - - /// Appends another full deque to the deque. - /// - /// This is the same as `self.insert_deque(0, rhs)`. - /// - /// See [`Self::insert_deque`] for more information. - #[inline] - pub fn push_back_deque(&mut self, rhs: FlatVecDeque) { - self.insert_deque(self.num_entries(), rhs); - } - - /// Inserts another full deque, starting at `entry_index` and onwards. - /// - /// This is O(1) if `entry_index` corresponds to either the start or the end of the deque. - /// Otherwise, this requires splitting the deque into two pieces then stitching them back together - /// at both ends of the added data. - /// - /// Panics if `entry_index` is out of bounds. - /// Panics if any of the value arrays in `entries` is empty. - pub fn insert_deque(&mut self, entry_index: usize, mut rhs: FlatVecDeque) { - // NOTE: We're inserting _beyond_ the last element. - if entry_index == self.num_entries() { - let max_value_offset = self.offsets.back().copied().unwrap_or_default(); - self.offsets - .extend(rhs.offsets.into_iter().map(|o| o + max_value_offset)); - self.values.extend(rhs.values); - return; - } else if entry_index == 0 { - rhs.push_back_deque(std::mem::take(self)); - *self = rhs; - return; - } - - let right = self.split_off(entry_index); - self.push_back_deque(rhs); - self.push_back_deque(right); - - debug_assert!(self.iter_offset_ranges().all(|r| r.start <= r.end)); - } -} - -#[test] -fn insert() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert(0, [1, 2, 3]); - assert_deque_eq(&[&[1, 2, 3]], &v); - - v.insert(0, [4, 5, 6, 7]); - assert_deque_eq(&[&[4, 5, 6, 7], &[1, 2, 3]], &v); - - v.insert(0, [8, 9]); - assert_deque_eq(&[&[8, 9], &[4, 5, 6, 7], &[1, 2, 3]], &v); - - v.insert(2, [10, 11, 12, 13]); - assert_deque_eq(&[&[8, 9], &[4, 5, 6, 7], &[10, 11, 12, 13], &[1, 2, 3]], &v); - - v.insert(v.num_entries(), [14, 15]); - assert_deque_eq( - &[ - &[8, 9], - &[4, 5, 6, 7], - &[10, 11, 12, 13], - &[1, 2, 3], - &[14, 15], - ], - &v, - ); - - v.insert(v.num_entries() - 1, [42]); - assert_deque_eq( - &[ - &[8, 9], - &[4, 5, 6, 7], - &[10, 11, 12, 13], - &[1, 2, 3], - &[42], - &[14, 15], - ], - &v, - ); -} - -#[test] -fn insert_empty() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.push_back([]); - v.push_back([]); - v.push_back([]); - - assert_deque_eq(&[&[], &[], &[]], &v); -} - -// Simulate the bug that was making everything crash on the face tracking example (ultimately -// caused by recursive clears). -#[test] -fn insert_some_and_empty() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.push_back([0]); - v.push_back([]); - - v.push_back([1]); - v.push_back([]); - - v.push_back([2]); - v.push_back([]); - - // That used to crash. - assert_deque_eq(&[&[0], &[], &[1], &[], &[2], &[]], &v); -} - -#[test] -fn insert_many() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_many(0, [vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - v.insert_many(0, [vec![20], vec![21], vec![22]]); - assert_deque_eq( - &[&[20], &[21], &[22], &[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], - &v, - ); - - v.insert_many(4, [vec![41, 42], vec![43]]); - assert_deque_eq( - &[ - &[20], - &[21], - &[22], - &[1, 2, 3], - &[41, 42], - &[43], - &[4, 5, 6, 7], - &[8, 9, 10], - ], - &v, - ); - - v.insert_many(v.num_entries(), [vec![100], vec![200, 300, 400]]); - assert_deque_eq( - &[ - &[20], - &[21], - &[22], - &[1, 2, 3], - &[41, 42], - &[43], - &[4, 5, 6, 7], - &[8, 9, 10], - &[100], - &[200, 300, 400], - ], - &v, - ); -} - -#[test] -fn insert_deque() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_deque( - 0, - FlatVecDeque::from_vecs([vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]), - ); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - v.insert_deque(0, FlatVecDeque::from_vecs([vec![20], vec![21], vec![22]])); - assert_deque_eq( - &[&[20], &[21], &[22], &[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], - &v, - ); - - v.insert_deque(4, FlatVecDeque::from_vecs([vec![41, 42], vec![43]])); - assert_deque_eq( - &[ - &[20], - &[21], - &[22], - &[1, 2, 3], - &[41, 42], - &[43], - &[4, 5, 6, 7], - &[8, 9, 10], - ], - &v, - ); - - v.insert_deque( - v.num_entries(), - FlatVecDeque::from_vecs([vec![100], vec![200, 300, 400]]), - ); - assert_deque_eq( - &[ - &[20], - &[21], - &[22], - &[1, 2, 3], - &[41, 42], - &[43], - &[4, 5, 6, 7], - &[8, 9, 10], - &[100], - &[200, 300, 400], - ], - &v, - ); -} - -// --- - -impl FlatVecDeque { - /// Splits the deque into two at the given index. - /// - /// Returns a newly allocated `FlatVecDeque`. `self` contains entries `[0, entry_index)`, - /// and the returned deque contains entries `[entry_index, num_entries)`. - /// - /// Note that the capacity of `self` does not change. - /// - /// Panics if `entry_index` is out of bounds. - #[inline] - #[must_use = "use `.truncate()` if you don't need the other half"] - pub fn split_off(&mut self, entry_index: usize) -> Self { - let value_offset = self.value_offset(entry_index); - - let mut offsets = self.offsets.split_off(entry_index); - for offset in &mut offsets { - *offset -= value_offset; - } - - Self { - values: self.values.split_off(value_offset), - offsets, - } - } - - /// Shortens the deque, keeping all entries up to `entry_index` (excluded), and - /// dropping the rest. - /// - /// If `entry_index` is greater or equal to [`Self::num_entries`], this has no effect. - #[inline] - pub fn truncate(&mut self, entry_index: usize) { - if entry_index < self.num_entries() { - self.values.truncate(self.value_offset(entry_index)); - self.offsets.truncate(entry_index); - } - } - - /// Removes the entry at `entry_index` from the deque. - /// - /// This is O(1) if `entry_index` corresponds to either the start or the end of the deque. - /// Otherwise, this requires splitting the deque into three pieces, dropping the superfluous - /// one, then stitching the two remaining pices back together. - /// - /// Panics if `entry_index` is out of bounds. - pub fn remove(&mut self, entry_index: usize) { - let (start_offset, end_offset) = ( - self.value_offset(entry_index), - self.value_offset(entry_index + 1), - ); - let offset_count = end_offset - start_offset; - - if entry_index + 1 == self.num_entries() { - self.offsets.truncate(self.num_entries() - 1); - self.values.truncate(self.values.len() - offset_count); - return; - } else if entry_index == 0 { - *self = self.split_off(entry_index + 1); - return; - } - - // NOTE: elegant, but way too slow :) - // let right = self.split_off(entry_index + 1); - // _ = self.split_off(self.num_entries() - 1); - // self.push_back_deque(right); - - _ = self.offsets.remove(entry_index); - for offset in self.offsets.range_mut(entry_index..) { - *offset -= offset_count; - } - - let right = self.values.split_off(end_offset); - self.values.truncate(self.values.len() - offset_count); - self.values.extend(right); - } - - /// Removes all entries within the given `entry_range` from the deque. - /// - /// This is O(1) if `entry_range` either starts at the beginning of the deque, or ends at - /// the end of the deque, or both. - /// Otherwise, this requires splitting the deque into three pieces, dropping the superfluous - /// one, then stitching the two remaining pieces back together. - /// - /// Panics if `entry_range` is either out of bounds or isn't monotonically increasing. - #[inline] - pub fn remove_range(&mut self, entry_range: Range) { - assert!(entry_range.start <= entry_range.end); - - if entry_range.start == entry_range.end { - return; - } - - let (start_offset, end_offset) = ( - self.value_offset(entry_range.start), - self.value_offset(entry_range.end), - ); - let offset_count = end_offset - start_offset; - - // Reminder: `entry_range.end` is exclusive. - if entry_range.end == self.num_entries() { - self.offsets - .truncate(self.num_entries() - entry_range.len()); - self.values.truncate(self.values.len() - offset_count); - return; - } else if entry_range.start == 0 { - *self = self.split_off(entry_range.end); - return; - } - - let right = self.split_off(entry_range.end); - _ = self.split_off(self.num_entries() - entry_range.len()); - self.push_back_deque(right); - } -} - -#[test] -fn truncate() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_many(0, [vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - { - let mut v = v.clone(); - v.truncate(0); - assert_deque_eq(&[], &v); - } - - { - let mut v = v.clone(); - v.truncate(1); - assert_deque_eq(&[&[1, 2, 3]], &v); - } - - { - let mut v = v.clone(); - v.truncate(2); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7]], &v); - } - - { - let mut v = v.clone(); - v.truncate(3); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - } -} - -#[test] -fn split_off() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_many(0, [vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - { - let mut left = v.clone(); - let right = left.split_off(0); - - assert_deque_eq(&[], &left); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &right); - } - - { - let mut left = v.clone(); - let right = left.split_off(1); - - assert_deque_eq(&[&[1, 2, 3]], &left); - assert_deque_eq(&[&[4, 5, 6, 7], &[8, 9, 10]], &right); - } - - { - let mut left = v.clone(); - let right = left.split_off(2); - - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7]], &left); - assert_deque_eq(&[&[8, 9, 10]], &right); - } - - { - let mut left = v.clone(); - let right = left.split_off(3); - - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &left); - assert_deque_eq(&[], &right); - } -} - -#[test] -fn remove() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert(0, [1, 2, 3]); - assert_deque_eq(&[&[1, 2, 3]], &v); - - v.remove(0); - assert_deque_eq(&[], &v); - - v.insert(0, [1, 2, 3]); - assert_deque_eq(&[&[1, 2, 3]], &v); - - v.insert(1, [4, 5, 6, 7]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7]], &v); - - v.insert(2, [8, 9]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9]], &v); - - v.remove(0); - assert_deque_eq(&[&[4, 5, 6, 7], &[8, 9]], &v); - - v.insert(0, [1, 2, 3]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9]], &v); - - v.remove(1); - assert_deque_eq(&[&[1, 2, 3], &[8, 9]], &v); - - v.insert(1, [4, 5, 6, 7]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9]], &v); - - v.remove(2); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7]], &v); - - v.remove(0); - assert_deque_eq(&[&[4, 5, 6, 7]], &v); - - v.remove(0); - assert_deque_eq(&[], &v); -} - -#[test] -#[should_panic(expected = "Out of bounds access")] -fn remove_empty() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.remove(0); -} - -#[test] -#[should_panic(expected = "Out of bounds access")] -fn remove_oob() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert(0, [1, 2, 3]); - assert_deque_eq(&[&[1, 2, 3]], &v); - - assert_eq!(1, v.num_entries()); - assert_eq!(3, v.num_values()); - - v.remove(1); -} - -#[test] -fn remove_range() { - let mut v: FlatVecDeque = FlatVecDeque::new(); - - assert_eq!(0, v.num_entries()); - assert_eq!(0, v.num_values()); - - v.insert_many(0, [vec![1, 2, 3], vec![4, 5, 6, 7], vec![8, 9, 10]]); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7], &[8, 9, 10]], &v); - - { - let mut v = v.clone(); - v.remove_range(0..1); - assert_deque_eq(&[&[4, 5, 6, 7], &[8, 9, 10]], &v); - } - - { - let mut v = v.clone(); - v.remove_range(1..2); - assert_deque_eq(&[&[1, 2, 3], &[8, 9, 10]], &v); - } - - { - let mut v = v.clone(); - v.remove_range(2..3); - assert_deque_eq(&[&[1, 2, 3], &[4, 5, 6, 7]], &v); - } - - { - let mut v = v.clone(); - v.remove_range(0..2); - assert_deque_eq(&[&[8, 9, 10]], &v); - } - - { - let mut v = v.clone(); - v.remove_range(1..3); - assert_deque_eq(&[&[1, 2, 3]], &v); - } - - { - let mut v = v.clone(); - v.remove_range(0..3); - assert_deque_eq(&[], &v); - } -} - -// --- - -#[cfg(test)] -fn assert_deque_eq(expected: &[&'_ [i64]], got: &FlatVecDeque) { - similar_asserts::assert_eq!(expected, got.iter().collect_vec()); -} - -#[cfg(test)] -fn assert_iter_eq<'a>(expected: &[&'_ [i64]], got: impl Iterator) { - similar_asserts::assert_eq!(expected, got.collect_vec()); -} diff --git a/crates/re_query_cache2/src/lib.rs b/crates/re_query_cache2/src/lib.rs deleted file mode 100644 index cb995ab447aa..000000000000 --- a/crates/re_query_cache2/src/lib.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Caching datastructures for `re_query`. - -mod cache; -mod cache_stats; -mod flat_vec_deque; -mod latest_at; -mod range; - -pub use self::cache::{CacheKey, Caches}; -pub use self::cache_stats::{CachedComponentStats, CachesStats}; -pub use self::flat_vec_deque::{ErasedFlatVecDeque, FlatVecDeque}; -pub use self::latest_at::{ - CachedLatestAtComponentResults, CachedLatestAtMonoResult, CachedLatestAtResults, -}; -pub use self::range::{CachedRangeComponentResults, CachedRangeData, CachedRangeResults}; - -pub(crate) use self::latest_at::LatestAtCache; -pub(crate) use self::range::{CachedRangeComponentResultsInner, RangeCache}; - -pub use re_query2::{ - clamped_zip::*, range_zip::*, ExtraQueryHistory, Promise, PromiseId, PromiseResolver, - PromiseResult, QueryError, Result, ToArchetype, VisibleHistory, VisibleHistoryBoundary, -}; - -pub mod external { - pub use re_query2; - - pub use paste; - pub use seq_macro; -} - -// --- - -use re_data_store::{LatestAtQuery, RangeQuery}; - -#[derive(Debug)] -pub enum CachedResults { - LatestAt(LatestAtQuery, CachedLatestAtResults), - Range(RangeQuery, CachedRangeResults), -} - -impl From<(LatestAtQuery, CachedLatestAtResults)> for CachedResults { - #[inline] - fn from((query, results): (LatestAtQuery, CachedLatestAtResults)) -> Self { - Self::LatestAt(query, results) - } -} - -impl From<(RangeQuery, CachedRangeResults)> for CachedResults { - #[inline] - fn from((query, results): (RangeQuery, CachedRangeResults)) -> Self { - Self::Range(query, results) - } -} diff --git a/crates/re_query_cache2/tests/latest_at.rs b/crates/re_query_cache2/tests/latest_at.rs deleted file mode 100644 index 016f1c3d5c80..000000000000 --- a/crates/re_query_cache2/tests/latest_at.rs +++ /dev/null @@ -1,526 +0,0 @@ -//! Contains: -//! - A 1:1 port of the tests in `crates/re_query/tests/archetype_query_tests.rs`, with caching enabled. -//! - Invalidation tests. - -use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; -use re_log_types::{ - build_frame_nr, - example_components::{MyColor, MyPoint, MyPoints}, - DataRow, EntityPath, RowId, TimePoint, -}; -use re_query2::PromiseResolver; -use re_query_cache2::Caches; -use re_types::Archetype as _; -use re_types_core::{components::InstanceKey, Loggable as _}; - -// --- - -#[test] -fn simple_query() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "point"; - let timepoint = [build_frame_nr(123)]; - - // Create some positions with implicit instances - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timepoint, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn static_query() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "point"; - let timepoint = [build_frame_nr(123)]; - - // Create some positions with implicit instances - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign one of them a color with an explicit instance.. statically! - let color_instances = vec![InstanceKey(1)]; - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - TimePoint::default(), - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn no_instance_join_query() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "point"; - let timepoint = [build_frame_nr(123)]; - - // Create some positions with an implicit instance - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign them colors with explicit instances - let colors = vec![MyColor::from_rgb(255, 0, 0), MyColor::from_rgb(0, 255, 0)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn missing_column_join_query() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "point"; - let timepoint = [build_frame_nr(123)]; - - // Create some positions with an implicit instance - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn splatted_query() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "point"; - let timepoint = [build_frame_nr(123)]; - - // Create some positions with implicit instances - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timepoint, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign all of them a color via splat - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timepoint, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn invalidation_xxx() { - let entity_path = "point"; - - let test_invalidation = |query: LatestAtQuery, - present_data_timepoint: TimePoint, - past_data_timepoint: TimePoint, - future_data_timepoint: TimePoint| { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - // Create some positions with implicit instances - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign one of them a color with an explicit instance - let color_instances = vec![InstanceKey(1)]; - let colors = vec![MyColor::from_rgb(1, 2, 3)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify present --- - - // Modify the PoV component - let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint, - 2, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify past --- - - // Modify the PoV component - let positions = vec![MyPoint::new(100.0, 200.0), MyPoint::new(300.0, 400.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - past_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, past_data_timepoint, 2, colors) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify future --- - - // Modify the PoV component - let positions = vec![MyPoint::new(1000.0, 2000.0), MyPoint::new(3000.0, 4000.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - future_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(16, 17, 18)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, future_data_timepoint, 1, colors) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - }; - - let timeless = TimePoint::default(); - let frame_122 = build_frame_nr(122); - let frame_123 = build_frame_nr(123); - let frame_124 = build_frame_nr(124); - - test_invalidation( - LatestAtQuery::new(frame_123.0, frame_123.1), - [frame_123].into(), - [frame_122].into(), - [frame_124].into(), - ); - - test_invalidation( - LatestAtQuery::new(frame_123.0, frame_123.1), - [frame_123].into(), - timeless, - [frame_124].into(), - ); -} - -// Test the following scenario: -// ```py -// rr.log("points", rr.Points3D([1, 2, 3]), static=True) -// -// # Do first query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[] -// -// rr.set_time(2) -// rr.log_components("points", rr.components.MyColor(0xFF0000)) -// -// # Do second query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0xFF0000] -// -// rr.set_time(3) -// rr.log_components("points", rr.components.MyColor(0x0000FF)) -// -// # Do third query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0x0000FF] -// -// rr.set_time(3) -// rr.log_components("points", rr.components.MyColor(0x00FF00)) -// -// # Do fourth query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0x00FF00] -// ``` -#[test] -fn invalidation_of_future_optionals() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "points"; - - let timeless = TimePoint::default(); - let frame2 = [build_frame_nr(2)]; - let frame3 = [build_frame_nr(3)]; - - let query_time = [build_frame_nr(9999)]; - - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timeless, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame2, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame3, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(0, 255, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - frame3, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn static_invalidation() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "points"; - - let timeless = TimePoint::default(); - - let query_time = [build_frame_nr(9999)]; - - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, timeless.clone(), 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timeless.clone(), - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let color_instances = vec![InstanceKey::SPLAT]; - let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells2_sized( - RowId::new(), - entity_path, - timeless, - 1, - (color_instances, colors), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -// --- - -fn insert_and_react(store: &mut DataStore, caches: &mut Caches, row: &DataRow) { - caches.on_events(&[store.insert_row(row).unwrap()]); -} - -fn query_and_compare( - caches: &Caches, - store: &DataStore, - query: &LatestAtQuery, - entity_path: &EntityPath, -) { - re_log::setup_logging(); - - let resolver = PromiseResolver::default(); - - for _ in 0..3 { - let cached = caches.latest_at( - store, - query, - entity_path, - MyPoints::all_components().iter().copied(), - ); - - let cached_points = cached.get_required(MyPoint::name()).unwrap(); - let cached_point_data = cached_points - .to_dense::(&resolver) - .flatten() - .unwrap(); - - let cached_colors = cached.get_or_empty(MyColor::name()); - let cached_color_data = cached_colors - .to_dense::(&resolver) - .flatten() - .unwrap(); - - let expected = re_query2::latest_at( - store, - query, - entity_path, - MyPoints::all_components().iter().copied(), - ); - - let expected_points = expected.get_required(MyPoint::name()).unwrap(); - let expected_point_data = expected_points - .to_dense::(&resolver) - .flatten() - .unwrap(); - - let expected_colors = expected.get_or_empty(MyColor::name()); - let expected_color_data = expected_colors - .to_dense::(&resolver) - .flatten() - .unwrap(); - - // eprintln!("{}", store.to_data_table().unwrap()); - - similar_asserts::assert_eq!(expected.compound_index, cached.compound_index); - similar_asserts::assert_eq!(expected_point_data, cached_point_data); - similar_asserts::assert_eq!(expected_color_data, cached_color_data); - } -} diff --git a/crates/re_query_cache2/tests/range.rs b/crates/re_query_cache2/tests/range.rs deleted file mode 100644 index d06a7a8d8b9a..000000000000 --- a/crates/re_query_cache2/tests/range.rs +++ /dev/null @@ -1,586 +0,0 @@ -use itertools::{izip, Itertools as _}; - -use re_data_store::{DataStore, RangeQuery, StoreSubscriber as _, TimeInt, TimeRange}; -use re_log_types::{ - build_frame_nr, - example_components::{MyColor, MyPoint, MyPoints}, - DataRow, EntityPath, RowId, TimePoint, -}; -use re_query_cache2::{Caches, PromiseResolver, PromiseResult}; -use re_types::{components::InstanceKey, Archetype}; -use re_types_core::Loggable as _; - -// --- - -#[test] -fn simple_range() -> anyhow::Result<()> { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path: EntityPath = "point".into(); - - let timepoint1 = [build_frame_nr(123)]; - { - let points = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, points)?; - insert_and_react(&mut store, &mut caches, &row); - - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 1, colors)?; - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint2 = [build_frame_nr(223)]; - { - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint2, 1, colors)?; - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint3 = [build_frame_nr(323)]; - { - let points = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, points)?; - insert_and_react(&mut store, &mut caches, &row); - } - - // --- First test: `(timepoint1, timepoint3]` --- - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1.as_i64() + 1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); - - // --- Second test: `[timepoint1, timepoint3]` --- - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); - - Ok(()) -} - -#[test] -fn static_range() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path: EntityPath = "point".into(); - - let timepoint1 = [build_frame_nr(123)]; - { - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path.clone(), - timepoint1, - 1, - colors.clone(), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Insert statically too! - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path.clone(), - TimePoint::default(), - 1, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint2 = [build_frame_nr(223)]; - { - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path.clone(), - timepoint2, - 1, - colors.clone(), - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Insert statically too! - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path.clone(), - TimePoint::default(), - 1, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint3 = [build_frame_nr(323)]; - { - // Create some Positions with implicit instances - let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - // --- First test: `(timepoint1, timepoint3]` --- - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1.as_i64() + 1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); - - // --- Second test: `[timepoint1, timepoint3]` --- - - // The inclusion of `timepoint1` means latest-at semantics will fall back to timeless data! - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); - - // --- Third test: `[-inf, +inf]` --- - - let query = - re_data_store::RangeQuery::new(timepoint1[0].0, TimeRange::new(TimeInt::MIN, TimeInt::MAX)); - - query_and_compare(&caches, &store, &query, &entity_path); -} - -#[test] -fn simple_splatted_range() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path: EntityPath = "point".into(); - - let timepoint1 = [build_frame_nr(123)]; - { - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - // Assign one of them a color with an explicit instance - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint1, 1, colors) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint2 = [build_frame_nr(223)]; - { - let colors = vec![MyColor::from_rgb(0, 255, 0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint2, 1, colors) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - let timepoint3 = [build_frame_nr(323)]; - { - let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path.clone(), timepoint3, 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - } - - // --- First test: `(timepoint1, timepoint3]` --- - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1.as_i64() + 1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); - - // --- Second test: `[timepoint1, timepoint3]` --- - - let query = re_data_store::RangeQuery::new( - timepoint1[0].0, - TimeRange::new(timepoint1[0].1, timepoint3[0].1), - ); - - query_and_compare(&caches, &store, &query, &entity_path); -} - -#[test] -fn invalidation() { - let entity_path = "point"; - - let test_invalidation = |query: RangeQuery, - present_data_timepoint: TimePoint, - past_data_timepoint: TimePoint, - future_data_timepoint: TimePoint| { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - let colors = vec![MyColor::from_rgb(1, 2, 3)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 1, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify present --- - - // Modify the PoV component - let positions = vec![MyPoint::new(10.0, 20.0), MyPoint::new(30.0, 40.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - present_data_timepoint, - 2, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify past --- - - // Modify the PoV component - let positions = vec![MyPoint::new(100.0, 200.0), MyPoint::new(300.0, 400.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - past_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - past_data_timepoint.clone(), - 2, - colors, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // --- Modify future --- - - // Modify the PoV component - let positions = vec![MyPoint::new(1000.0, 2000.0), MyPoint::new(3000.0, 4000.0)]; - let row = DataRow::from_cells1_sized( - RowId::new(), - entity_path, - future_data_timepoint.clone(), - 2, - positions, - ) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - // Modify the optional component - let colors = vec![MyColor::from_rgb(16, 17, 18)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, future_data_timepoint, 1, colors) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - }; - - let timeless = TimePoint::default(); - let frame_122 = build_frame_nr(122); - let frame_123 = build_frame_nr(123); - let frame_124 = build_frame_nr(124); - - test_invalidation( - RangeQuery::new(frame_123.0, TimeRange::EVERYTHING), - [frame_123].into(), - [frame_122].into(), - [frame_124].into(), - ); - - test_invalidation( - RangeQuery::new(frame_123.0, TimeRange::EVERYTHING), - [frame_123].into(), - timeless, - [frame_124].into(), - ); -} - -// Test the following scenario: -// ```py -// rr.log("points", rr.Points3D([1, 2, 3]), static=True) -// -// # Do first query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[] -// -// rr.set_time(2) -// rr.log_components("points", rr.components.MyColor(0xFF0000)) -// -// # Do second query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0xFF0000] -// -// rr.set_time(3) -// rr.log_components("points", rr.components.MyColor(0x0000FF)) -// -// # Do third query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0x0000FF] -// -// rr.set_time(3) -// rr.log_components("points", rr.components.MyColor(0x00FF00)) -// -// # Do fourth query here: LatestAt(+inf) -// # Expected: points=[[1,2,3]] colors=[0x00FF00] -// ``` -#[test] -fn invalidation_of_future_optionals() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "points"; - - let timeless = TimePoint::default(); - let frame2 = [build_frame_nr(2)]; - let frame3 = [build_frame_nr(3)]; - - let query = re_data_store::RangeQuery::new(frame2[0].0, TimeRange::EVERYTHING); - - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timeless, 2, positions).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame2, 1, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame3, 1, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let colors = vec![MyColor::from_rgb(0, 255, 0)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, frame3, 1, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -#[test] -fn invalidation_static() { - let mut store = DataStore::new( - re_log_types::StoreId::random(re_log_types::StoreKind::Recording), - InstanceKey::name(), - Default::default(), - ); - let mut caches = Caches::new(&store); - - let entity_path = "points"; - - let timeless = TimePoint::default(); - - let frame0 = [build_frame_nr(TimeInt::ZERO)]; - let query = re_data_store::RangeQuery::new(frame0[0].0, TimeRange::EVERYTHING); - - let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, timeless.clone(), 2, positions) - .unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let colors = vec![MyColor::from_rgb(255, 0, 0)]; - let row = - DataRow::from_cells1_sized(RowId::new(), entity_path, timeless.clone(), 1, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); - - let colors = vec![MyColor::from_rgb(0, 0, 255)]; - let row = DataRow::from_cells1_sized(RowId::new(), entity_path, timeless, 1, colors).unwrap(); - insert_and_react(&mut store, &mut caches, &row); - - query_and_compare(&caches, &store, &query, &entity_path.into()); -} - -// --- - -fn insert_and_react(store: &mut DataStore, caches: &mut Caches, row: &DataRow) { - caches.on_events(&[store.insert_row(row).unwrap()]); -} - -fn query_and_compare( - caches: &Caches, - store: &DataStore, - query: &RangeQuery, - entity_path: &EntityPath, -) { - re_log::setup_logging(); - - let resolver = PromiseResolver::default(); - - for _ in 0..3 { - let cached = caches.range( - store, - query, - entity_path, - MyPoints::all_components().iter().copied(), - ); - - let cached_all_points = cached - .get_required(MyPoint::name()) - .unwrap() - .to_dense::(&resolver); - assert!(matches!( - cached_all_points.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())), - )); - let cached_all_points_indexed = cached_all_points.range_indexed(); - - let cached_all_colors = cached - .get_or_empty(MyColor::name()) - .to_dense::(&resolver); - assert!(matches!( - cached_all_colors.status(), - (PromiseResult::Ready(()), PromiseResult::Ready(())), - )); - let cached_all_colors_indexed = cached_all_colors.range_indexed(); - - let expected = re_query2::range( - store, - query, - entity_path, - MyPoints::all_components().iter().copied(), - ); - - let expected_all_points = expected.get_required(MyPoint::name()).unwrap(); - let expected_all_points_indices = expected_all_points.indices(); - let expected_all_points_data = expected_all_points - .to_dense::(&resolver) - .into_iter() - .map(|batch| batch.flatten().unwrap()) - .collect_vec(); - let expected_all_points_indexed = - izip!(expected_all_points_indices, expected_all_points_data); - - let expected_all_colors = expected.get_or_empty(MyColor::name()); - let expected_all_colors_indices = expected_all_colors.indices(); - let expected_all_colors_data = expected_all_colors - .to_dense::(&resolver) - .into_iter() - .map(|batch| batch.flatten().unwrap()) - .collect_vec(); - let expected_all_colors_indexed = - izip!(expected_all_colors_indices, expected_all_colors_data); - - eprintln!("{query:?}"); - eprintln!("{}", store.to_data_table().unwrap()); - - similar_asserts::assert_eq!( - expected_all_points_indexed - .map(|(index, data)| (*index, data)) - .collect_vec(), - cached_all_points_indexed - .map(|(index, data)| (*index, data.to_vec())) - .collect_vec(), - ); - - similar_asserts::assert_eq!( - expected_all_colors_indexed - .map(|(index, data)| (*index, data)) - .collect_vec(), - cached_all_colors_indexed - .map(|(index, data)| (*index, data.to_vec())) - .collect_vec(), - ); - } -} diff --git a/crates/re_space_view/src/sub_archetypes.rs b/crates/re_space_view/src/sub_archetypes.rs index f1fff5423f9b..5838139635a4 100644 --- a/crates/re_space_view/src/sub_archetypes.rs +++ b/crates/re_space_view/src/sub_archetypes.rs @@ -1,6 +1,6 @@ use re_data_store::LatestAtQuery; use re_entity_db::{ - external::re_query_cache2::{CachedLatestAtResults, PromiseResult, ToArchetype}, + external::re_query_cache::{CachedLatestAtResults, PromiseResult, ToArchetype}, EntityDb, }; use re_log_types::EntityPath; diff --git a/crates/re_space_view_dataframe/src/space_view_class.rs b/crates/re_space_view_dataframe/src/space_view_class.rs index b6189eb53312..06b619a9bc55 100644 --- a/crates/re_space_view_dataframe/src/space_view_class.rs +++ b/crates/re_space_view_dataframe/src/space_view_class.rs @@ -160,7 +160,7 @@ impl SpaceViewClass for DataframeSpaceView { // TODO(#4466): make it explicit if that value results // from a splat joint. - let results = ctx.recording().query_caches2().latest_at( + let results = ctx.recording().query_caches().latest_at( ctx.recording_store(), &latest_at_query, &instance.entity_path, diff --git a/crates/re_space_view_spatial/Cargo.toml b/crates/re_space_view_spatial/Cargo.toml index 4f45589b01fc..2bf7b4745914 100644 --- a/crates/re_space_view_spatial/Cargo.toml +++ b/crates/re_space_view_spatial/Cargo.toml @@ -28,7 +28,6 @@ re_log_types.workspace = true re_log.workspace = true re_query.workspace = true re_query_cache.workspace = true -re_query_cache2.workspace = true re_renderer = { workspace = true, features = [ "import-gltf", "import-obj", diff --git a/crates/re_space_view_spatial/src/visualizers/arrows2d.rs b/crates/re_space_view_spatial/src/visualizers/arrows2d.rs index 4f5792346a3b..f7ed6472e5de 100644 --- a/crates/re_space_view_spatial/src/visualizers/arrows2d.rs +++ b/crates/re_space_view_spatial/src/visualizers/arrows2d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x6; +use re_query_cache::range_zip_1x6; use re_renderer::{renderer::LineStripFlags, LineDrawableBuilder, PickingLayerInstanceId}; use re_types::{ archetypes::Arrows2D, diff --git a/crates/re_space_view_spatial/src/visualizers/arrows3d.rs b/crates/re_space_view_spatial/src/visualizers/arrows3d.rs index e5e61d400514..05fdb1ef9764 100644 --- a/crates/re_space_view_spatial/src/visualizers/arrows3d.rs +++ b/crates/re_space_view_spatial/src/visualizers/arrows3d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x6; +use re_query_cache::range_zip_1x6; use re_renderer::{renderer::LineStripFlags, LineDrawableBuilder, PickingLayerInstanceId}; use re_types::{ archetypes::Arrows3D, diff --git a/crates/re_space_view_spatial/src/visualizers/boxes2d.rs b/crates/re_space_view_spatial/src/visualizers/boxes2d.rs index a22b9ed8ebf8..07fb7d9b9acd 100644 --- a/crates/re_space_view_spatial/src/visualizers/boxes2d.rs +++ b/crates/re_space_view_spatial/src/visualizers/boxes2d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x6; +use re_query_cache::range_zip_1x6; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId}; use re_types::{ archetypes::Boxes2D, diff --git a/crates/re_space_view_spatial/src/visualizers/boxes3d.rs b/crates/re_space_view_spatial/src/visualizers/boxes3d.rs index 9df41ce52207..8819231a5aea 100644 --- a/crates/re_space_view_spatial/src/visualizers/boxes3d.rs +++ b/crates/re_space_view_spatial/src/visualizers/boxes3d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x7; +use re_query_cache::range_zip_1x7; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId}; use re_types::{ archetypes::Boxes3D, diff --git a/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs b/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs index c9eec8176a6c..f63691dc3753 100644 --- a/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs +++ b/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs @@ -3,7 +3,7 @@ use re_data_store::{LatestAtQuery, RangeQuery}; use re_entity_db::{EntityDb, EntityProperties}; use re_log_types::{EntityPath, TimeInt, Timeline}; use re_query::{ArchetypeView, QueryError}; -use re_query_cache2::{CachedResults, ExtraQueryHistory}; +use re_query_cache::{CachedResults, ExtraQueryHistory}; use re_renderer::DepthOffset; use re_space_view::query_visual_history; use re_types::Archetype; @@ -54,7 +54,7 @@ pub fn query_archetype_with_history( let time_range = visible_history.time_range(*time); let store = entity_db.store(); - let caches = entity_db.query_caches2(); + let caches = entity_db.query_caches(); if !history.enabled || time_range.min() == time_range.max() { let latest_query = LatestAtQuery::new(*timeline, time_range.min()); diff --git a/crates/re_space_view_spatial/src/visualizers/lines2d.rs b/crates/re_space_view_spatial/src/visualizers/lines2d.rs index bca1670ef291..e7c688eab4a7 100644 --- a/crates/re_space_view_spatial/src/visualizers/lines2d.rs +++ b/crates/re_space_view_spatial/src/visualizers/lines2d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x5; +use re_query_cache::range_zip_1x5; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId}; use re_types::{ archetypes::LineStrips2D, diff --git a/crates/re_space_view_spatial/src/visualizers/lines3d.rs b/crates/re_space_view_spatial/src/visualizers/lines3d.rs index 442c38dd9724..9bbb657425f9 100644 --- a/crates/re_space_view_spatial/src/visualizers/lines3d.rs +++ b/crates/re_space_view_spatial/src/visualizers/lines3d.rs @@ -1,5 +1,5 @@ use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x5; +use re_query_cache::range_zip_1x5; use re_renderer::PickingLayerInstanceId; use re_types::{ archetypes::LineStrips3D, diff --git a/crates/re_space_view_spatial/src/visualizers/points2d.rs b/crates/re_space_view_spatial/src/visualizers/points2d.rs index 9fe4f1a69039..31bc5389316c 100644 --- a/crates/re_space_view_spatial/src/visualizers/points2d.rs +++ b/crates/re_space_view_spatial/src/visualizers/points2d.rs @@ -1,7 +1,7 @@ use itertools::Itertools as _; use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x5; +use re_query_cache::range_zip_1x5; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId, PointCloudBuilder}; use re_types::{ archetypes::Points2D, diff --git a/crates/re_space_view_spatial/src/visualizers/points3d.rs b/crates/re_space_view_spatial/src/visualizers/points3d.rs index a0b62e66b133..2d70126c2c5b 100644 --- a/crates/re_space_view_spatial/src/visualizers/points3d.rs +++ b/crates/re_space_view_spatial/src/visualizers/points3d.rs @@ -1,7 +1,7 @@ use itertools::Itertools as _; use re_entity_db::{EntityPath, InstancePathHash}; -use re_query_cache2::range_zip_1x5; +use re_query_cache::range_zip_1x5; use re_renderer::{LineDrawableBuilder, PickingLayerInstanceId, PointCloudBuilder}; use re_types::{ archetypes::Points3D, diff --git a/crates/re_space_view_spatial/src/visualizers/results_ext.rs b/crates/re_space_view_spatial/src/visualizers/results_ext.rs index 5ef852651964..27474a9cc886 100644 --- a/crates/re_space_view_spatial/src/visualizers/results_ext.rs +++ b/crates/re_space_view_spatial/src/visualizers/results_ext.rs @@ -1,4 +1,4 @@ -use re_query_cache2::{ +use re_query_cache::{ CachedLatestAtResults, CachedRangeData, CachedRangeResults, CachedResults, PromiseResolver, PromiseResult, }; @@ -14,19 +14,19 @@ pub trait CachedRangeResultsExt { fn get_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> Option>>; + ) -> Option>>; fn get_or_empty_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> re_query_cache2::Result>; + ) -> re_query_cache::Result>; } impl CachedRangeResultsExt for CachedResults { fn get_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> Option>> { + ) -> Option>> { match self { CachedResults::LatestAt(_, results) => results.get_dense(resolver), CachedResults::Range(_, results) => results.get_dense(resolver), @@ -36,7 +36,7 @@ impl CachedRangeResultsExt for CachedResults { fn get_or_empty_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> re_query_cache2::Result> { + ) -> re_query_cache::Result> { match self { CachedResults::LatestAt(_, results) => results.get_or_empty_dense(resolver), CachedResults::Range(_, results) => results.get_or_empty_dense(resolver), @@ -49,20 +49,20 @@ impl CachedRangeResultsExt for CachedRangeResults { fn get_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> Option>> { + ) -> Option>> { let results = self.get(C::name())?.to_dense(resolver); // TODO(#5607): what should happen if the promise is still pending? let (front_status, back_status) = results.status(); match front_status { PromiseResult::Error(err) => { - return Some(Err(re_query_cache2::QueryError::Other(err.into()))) + return Some(Err(re_query_cache::QueryError::Other(err.into()))) } PromiseResult::Pending | PromiseResult::Ready(_) => {} } match back_status { PromiseResult::Error(err) => { - return Some(Err(re_query_cache2::QueryError::Other(err.into()))) + return Some(Err(re_query_cache::QueryError::Other(err.into()))) } PromiseResult::Pending | PromiseResult::Ready(_) => {} } @@ -74,21 +74,17 @@ impl CachedRangeResultsExt for CachedRangeResults { fn get_or_empty_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> re_query_cache2::Result> { + ) -> re_query_cache::Result> { let results = self.get_or_empty(C::name()).to_dense(resolver); // TODO(#5607): what should happen if the promise is still pending? let (front_status, back_status) = results.status(); match front_status { - PromiseResult::Error(err) => { - return Err(re_query_cache2::QueryError::Other(err.into())) - } + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Pending | PromiseResult::Ready(_) => {} } match back_status { - PromiseResult::Error(err) => { - return Err(re_query_cache2::QueryError::Other(err.into())) - } + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Pending | PromiseResult::Ready(_) => {} } @@ -101,7 +97,7 @@ impl CachedRangeResultsExt for CachedLatestAtResults { fn get_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> Option>> { + ) -> Option>> { let results = self.get(C::name())?; let data = CachedRangeData::from_latest_at(resolver, results); @@ -109,13 +105,13 @@ impl CachedRangeResultsExt for CachedLatestAtResults { let (front_status, back_status) = data.status(); match front_status { PromiseResult::Error(err) => { - return Some(Err(re_query_cache2::QueryError::Other(err.into()))) + return Some(Err(re_query_cache::QueryError::Other(err.into()))) } PromiseResult::Pending | PromiseResult::Ready(_) => {} } match back_status { PromiseResult::Error(err) => { - return Some(Err(re_query_cache2::QueryError::Other(err.into()))) + return Some(Err(re_query_cache::QueryError::Other(err.into()))) } PromiseResult::Pending | PromiseResult::Ready(_) => {} } @@ -127,22 +123,18 @@ impl CachedRangeResultsExt for CachedLatestAtResults { fn get_or_empty_dense<'a, C: Component>( &'a self, resolver: &PromiseResolver, - ) -> re_query_cache2::Result> { + ) -> re_query_cache::Result> { let results = self.get_or_empty(C::name()); let data = CachedRangeData::from_latest_at(resolver, results); // TODO(#5607): what should happen if the promise is still pending? let (front_status, back_status) = data.status(); match front_status { - PromiseResult::Error(err) => { - return Err(re_query_cache2::QueryError::Other(err.into())) - } + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Pending | PromiseResult::Ready(_) => {} } match back_status { - PromiseResult::Error(err) => { - return Err(re_query_cache2::QueryError::Other(err.into())) - } + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Pending | PromiseResult::Ready(_) => {} } diff --git a/crates/re_space_view_tensor/src/visualizer_system.rs b/crates/re_space_view_tensor/src/visualizer_system.rs index 9842a64a8e7f..b22578d5da31 100644 --- a/crates/re_space_view_tensor/src/visualizer_system.rs +++ b/crates/re_space_view_tensor/src/visualizer_system.rs @@ -1,5 +1,5 @@ use re_data_store::LatestAtQuery; -use re_entity_db::{external::re_query_cache2::CachedLatestAtMonoResult, EntityPath}; +use re_entity_db::{external::re_query_cache::CachedLatestAtMonoResult, EntityPath}; use re_log_types::RowId; use re_types::{archetypes::Tensor, components::TensorData, tensor_data::DecodedTensor}; use re_viewer_context::{ diff --git a/crates/re_space_view_text_log/Cargo.toml b/crates/re_space_view_text_log/Cargo.toml index f73a87deb47f..d1343494859e 100644 --- a/crates/re_space_view_text_log/Cargo.toml +++ b/crates/re_space_view_text_log/Cargo.toml @@ -25,7 +25,6 @@ re_entity_db.workspace = true re_log_types.workspace = true re_log.workspace = true re_query_cache.workspace = true -re_query_cache2.workspace = true re_renderer.workspace = true re_tracing.workspace = true re_types.workspace = true diff --git a/crates/re_space_view_text_log/src/visualizer_system.rs b/crates/re_space_view_text_log/src/visualizer_system.rs index c9cb50dffd7c..a06212b65488 100644 --- a/crates/re_space_view_text_log/src/visualizer_system.rs +++ b/crates/re_space_view_text_log/src/visualizer_system.rs @@ -1,7 +1,7 @@ use re_data_store::TimeRange; use re_entity_db::EntityPath; use re_log_types::{RowId, TimeInt}; -use re_query_cache2::{clamped_zip_1x2, range_zip_1x2, CachedRangeData, PromiseResult}; +use re_query_cache::{clamped_zip_1x2, range_zip_1x2, CachedRangeData, PromiseResult}; use re_types::{ archetypes::TextLog, components::{Color, Text, TextLogLevel}, @@ -57,7 +57,7 @@ impl VisualizerSystem for TextLogSystem { for data_result in view_query.iter_visible_data_results(ctx, Self::identifier()) { re_tracing::profile_scope!("primary", &data_result.entity_path.to_string()); - let results = ctx.recording().query_caches2().range( + let results = ctx.recording().query_caches().range( ctx.recording_store(), &query, &data_result.entity_path, @@ -129,16 +129,16 @@ impl VisualizerSystem for TextLogSystem { #[inline] fn check_range<'a, C: Component>( results: &'a CachedRangeData<'a, C>, -) -> re_query_cache2::Result<()> { +) -> re_query_cache::Result<()> { let (front_status, back_status) = results.status(); match front_status { PromiseResult::Pending => return Ok(()), - PromiseResult::Error(err) => return Err(re_query_cache2::QueryError::Other(err.into())), + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Ready(_) => {} } match back_status { PromiseResult::Pending => return Ok(()), - PromiseResult::Error(err) => return Err(re_query_cache2::QueryError::Other(err.into())), + PromiseResult::Error(err) => return Err(re_query_cache::QueryError::Other(err.into())), PromiseResult::Ready(_) => {} } diff --git a/crates/re_space_view_time_series/Cargo.toml b/crates/re_space_view_time_series/Cargo.toml index 6a7f0691aba4..16faadc51266 100644 --- a/crates/re_space_view_time_series/Cargo.toml +++ b/crates/re_space_view_time_series/Cargo.toml @@ -25,7 +25,6 @@ re_log.workspace = true re_log_types.workspace = true re_query.workspace = true re_query_cache.workspace = true -re_query_cache2.workspace = true re_renderer.workspace = true re_space_view.workspace = true re_tracing.workspace = true diff --git a/crates/re_space_view_time_series/src/line_visualizer_system.rs b/crates/re_space_view_time_series/src/line_visualizer_system.rs index 83008a3f169e..814f7369d249 100644 --- a/crates/re_space_view_time_series/src/line_visualizer_system.rs +++ b/crates/re_space_view_time_series/src/line_visualizer_system.rs @@ -1,5 +1,5 @@ use itertools::Itertools as _; -use re_query_cache2::{PromiseResult, QueryError}; +use re_query_cache::{PromiseResult, QueryError}; use re_types::archetypes; use re_types::{ archetypes::SeriesLine, @@ -195,7 +195,7 @@ fn load_series( let entity_path = &data_result.entity_path; let query = re_data_store::RangeQuery::new(query.timeline, time_range); - let results = ctx.recording().query_caches2().range( + let results = ctx.recording().query_caches().range( ctx.recording_store(), &query, entity_path, @@ -259,7 +259,7 @@ fn load_series( .map(|index| (index, ())); let all_frames = - re_query_cache2::range_zip_1x1(all_scalars_indexed, all_colors.range_indexed()) + re_query_cache::range_zip_1x1(all_scalars_indexed, all_colors.range_indexed()) .enumerate(); for (i, (_index, _scalars, colors)) in all_frames { @@ -296,7 +296,7 @@ fn load_series( .range_indices(all_scalars_entry_range.clone()) .map(|index| (index, ())); - let all_frames = re_query_cache2::range_zip_1x1( + let all_frames = re_query_cache::range_zip_1x1( all_scalars_indexed, all_stroke_widths.range_indexed(), ) diff --git a/crates/re_space_view_time_series/src/point_visualizer_system.rs b/crates/re_space_view_time_series/src/point_visualizer_system.rs index 5aba1e13589c..6b5b0c997ed7 100644 --- a/crates/re_space_view_time_series/src/point_visualizer_system.rs +++ b/crates/re_space_view_time_series/src/point_visualizer_system.rs @@ -1,6 +1,6 @@ use itertools::Itertools as _; -use re_query_cache2::{PromiseResult, QueryError}; +use re_query_cache::{PromiseResult, QueryError}; use re_types::{ archetypes::{self, SeriesPoint}, components::{Color, MarkerShape, MarkerSize, Name, Scalar}, @@ -149,7 +149,7 @@ impl SeriesPointSystem { let entity_path = &data_result.entity_path; let query = re_data_store::RangeQuery::new(query.timeline, time_range); - let results = ctx.recording().query_caches2().range( + let results = ctx.recording().query_caches().range( ctx.recording_store(), &query, entity_path, @@ -217,7 +217,7 @@ impl SeriesPointSystem { .range_indices(all_scalars_entry_range.clone()) .map(|index| (index, ())); - let all_frames = re_query_cache2::range_zip_1x1( + let all_frames = re_query_cache::range_zip_1x1( all_scalars_indexed, all_colors.range_indexed(), ) @@ -257,7 +257,7 @@ impl SeriesPointSystem { .range_indices(all_scalars_entry_range.clone()) .map(|index| (index, ())); - let all_frames = re_query_cache2::range_zip_1x1( + let all_frames = re_query_cache::range_zip_1x1( all_scalars_indexed, all_marker_sizes.range_indexed(), ) @@ -289,7 +289,7 @@ impl SeriesPointSystem { .range_indices(all_scalars_entry_range.clone()) .map(|index| (index, ())); - let all_frames = re_query_cache2::range_zip_1x1( + let all_frames = re_query_cache::range_zip_1x1( all_scalars_indexed, all_marker_shapes.range_indexed(), ) diff --git a/crates/re_types_builder/src/codegen/rust/to_archetype.rs b/crates/re_types_builder/src/codegen/rust/to_archetype.rs index 740f8ce97090..fac93e1fed22 100644 --- a/crates/re_types_builder/src/codegen/rust/to_archetype.rs +++ b/crates/re_types_builder/src/codegen/rust/to_archetype.rs @@ -1,4 +1,4 @@ -//! Generates code in `re_query_cache2` so that cached results can easily be converted to +//! Generates code in `re_query_cache` so that cached results can easily be converted to //! ready-to-use archetypes. //! //! That code needs to be generated directly in the caching crates as it needs access to the cached @@ -35,7 +35,7 @@ fn generate_mod( files_to_write: &mut BTreeMap, ) { let generated_path = - Utf8PathBuf::from("crates/re_query_cache2/src/latest_at/to_archetype/mod.rs"); + Utf8PathBuf::from("crates/re_query_cache/src/latest_at/to_archetype/mod.rs"); let mut code = String::new(); code.push_str(&format!("// {}\n\n", crate::codegen::autogen_warning!())); @@ -62,7 +62,7 @@ fn generate_impls( objects: &Objects, files_to_write: &mut BTreeMap, ) { - let generated_path = Utf8PathBuf::from("crates/re_query_cache2/src/latest_at/to_archetype"); + let generated_path = Utf8PathBuf::from("crates/re_query_cache/src/latest_at/to_archetype"); let quoted_imports = quote! { use std::sync::Arc; diff --git a/crates/re_viewer/Cargo.toml b/crates/re_viewer/Cargo.toml index 51a91c365277..2168bec9fe30 100644 --- a/crates/re_viewer/Cargo.toml +++ b/crates/re_viewer/Cargo.toml @@ -54,7 +54,6 @@ re_log_encoding = { workspace = true, features = [ re_log_types.workspace = true re_memory.workspace = true re_query_cache.workspace = true -re_query_cache2.workspace = true re_renderer = { workspace = true, default-features = false } re_smart_channel.workspace = true re_space_view.workspace = true diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 58e2c612b769..f86b13586241 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -691,7 +691,7 @@ impl App { #[cfg(not(target_arch = "wasm32"))] UICommand::PrintPrimaryCache => { if let Some(ctx) = store_context { - let text = format!("{:?}", ctx.recording.query_caches2()); + let text = format!("{:?}", ctx.recording.query_caches()); self.re_ui .egui_ctx .output_mut(|o| o.copied_text = text.clone()); diff --git a/crates/re_viewer/src/ui/memory_panel.rs b/crates/re_viewer/src/ui/memory_panel.rs index 36a89ef0e34e..d60b680732b3 100644 --- a/crates/re_viewer/src/ui/memory_panel.rs +++ b/crates/re_viewer/src/ui/memory_panel.rs @@ -2,7 +2,7 @@ use itertools::Itertools; use re_data_store::{DataStoreConfig, DataStoreRowStats, DataStoreStats}; use re_format::{format_bytes, format_uint}; use re_memory::{util::sec_since_start, MemoryHistory, MemoryLimit, MemoryUse}; -use re_query_cache2::{CachedComponentStats, CachesStats}; +use re_query_cache::{CachedComponentStats, CachesStats}; use re_renderer::WgpuResourcePoolStatistics; use re_viewer_context::store_hub::StoreHubStats; diff --git a/crates/re_viewer/src/ui/override_ui.rs b/crates/re_viewer/src/ui/override_ui.rs index 5c34fdbbecea..6ff2c4b7b794 100644 --- a/crates/re_viewer/src/ui/override_ui.rs +++ b/crates/re_viewer/src/ui/override_ui.rs @@ -142,7 +142,7 @@ pub fn override_ui( let query = ctx.blueprint_query; ctx.store_context .blueprint - .query_caches2() + .query_caches() .latest_at(store, query, entity_path, [*component_name]) .components .get(component_name) @@ -150,7 +150,7 @@ pub fn override_ui( } StoreKind::Recording => { ctx.recording() - .query_caches2() + .query_caches() .latest_at( ctx.recording_store(), &query, diff --git a/crates/re_viewer_context/Cargo.toml b/crates/re_viewer_context/Cargo.toml index 16de5cba4b51..7a328f3578ad 100644 --- a/crates/re_viewer_context/Cargo.toml +++ b/crates/re_viewer_context/Cargo.toml @@ -25,7 +25,6 @@ re_entity_db = { workspace = true, features = ["serde"] } re_log_types.workspace = true re_log.workspace = true re_query_cache.workspace = true -re_query_cache2.workspace = true re_query.workspace = true re_query2.workspace = true re_renderer.workspace = true diff --git a/crates/re_viewer_context/src/component_ui_registry.rs b/crates/re_viewer_context/src/component_ui_registry.rs index f687e5ec6168..85acff5c5f77 100644 --- a/crates/re_viewer_context/src/component_ui_registry.rs +++ b/crates/re_viewer_context/src/component_ui_registry.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use re_data_store::LatestAtQuery; use re_entity_db::{ - external::re_query_cache2::CachedLatestAtComponentResults, EntityDb, EntityPath, + external::re_query_cache::CachedLatestAtComponentResults, EntityDb, EntityPath, }; use re_log_types::DataCell; use re_types::{components::InstanceKey, ComponentName, Loggable as _}; diff --git a/crates/re_viewer_context/src/store_hub.rs b/crates/re_viewer_context/src/store_hub.rs index 5c3a454babcd..f90c08ee3611 100644 --- a/crates/re_viewer_context/src/store_hub.rs +++ b/crates/re_viewer_context/src/store_hub.rs @@ -7,7 +7,7 @@ use re_data_store::StoreGeneration; use re_data_store::{DataStoreConfig, DataStoreStats}; use re_entity_db::{EntityDb, StoreBundle}; use re_log_types::{ApplicationId, StoreId, StoreKind}; -use re_query_cache2::CachesStats; +use re_query_cache::CachesStats; use crate::StoreContext; @@ -715,7 +715,7 @@ impl StoreHub { .unwrap_or_default(); let recording_cached_stats = recording - .map(|entity_db| entity_db.query_caches2().stats()) + .map(|entity_db| entity_db.query_caches().stats()) .unwrap_or_default(); let recording_config = recording diff --git a/examples/rust/extend_viewer_ui/src/main.rs b/examples/rust/extend_viewer_ui/src/main.rs index f7cb1533392e..883ca0bdd316 100644 --- a/examples/rust/extend_viewer_ui/src/main.rs +++ b/examples/rust/extend_viewer_ui/src/main.rs @@ -150,7 +150,7 @@ fn component_ui( // just show the last value logged for each component: let query = re_data_store::LatestAtQuery::latest(timeline); - let results = entity_db.query_caches2().latest_at( + let results = entity_db.query_caches().latest_at( entity_db.store(), &query, entity_path, diff --git a/scripts/lint.py b/scripts/lint.py index f2f4cad8073d..51cf334d17db 100755 --- a/scripts/lint.py +++ b/scripts/lint.py @@ -980,7 +980,7 @@ def lint_crate_docs(should_ignore: Callable[[Any], bool]) -> int: """Make sure ARCHITECTURE.md talks about every single crate we have.""" # These crates will replace existing ones and won't ever be published as-is. - tmp_crates = ["re_query2", "re_query_cache2"] + tmp_crates = ["re_query2"] crates_dir = Path("crates") architecture_md_file = Path("ARCHITECTURE.md")