From 1cd97aa0eedb60f28f0bde564077bea03e64ebd7 Mon Sep 17 00:00:00 2001 From: Clement Rey Date: Fri, 26 Apr 2024 12:40:20 +0200 Subject: [PATCH] New data APIs 10: stats and debug tools for new caches (#5990) Title. The new cache being natively component-based makes things much smoothier than before. --- Part of a PR series to completely revamp the data APIs in preparation for the removal of instance keys and the introduction of promises: - #5573 - #5574 - #5581 - #5605 - #5606 - #5633 - #5673 - #5679 - #5687 - #5755 - #5990 - #5992 - #5993 - #5994 - #6035 - #6036 - #6037 Builds on top of the static data PR series: - #5534 --- Cargo.lock | 3 + crates/re_query_cache2/src/cache.rs | 58 ++++++- crates/re_query_cache2/src/cache_stats.rs | 100 ++++++++++++ crates/re_query_cache2/src/latest_at/query.rs | 66 ++++++-- .../re_query_cache2/src/latest_at/results.rs | 47 ++++-- crates/re_query_cache2/src/lib.rs | 26 ++++ crates/re_query_cache2/src/range/results.rs | 6 +- crates/re_viewer/Cargo.toml | 1 + crates/re_viewer/src/app.rs | 6 +- crates/re_viewer/src/ui/memory_panel.rs | 147 ++++++------------ crates/re_viewer_context/Cargo.toml | 2 + .../re_viewer_context/src/space_view/mod.rs | 3 + crates/re_viewer_context/src/store_hub.rs | 6 +- 13 files changed, 322 insertions(+), 149 deletions(-) create mode 100644 crates/re_query_cache2/src/cache_stats.rs diff --git a/Cargo.lock b/Cargo.lock index da19fba00f29..aced4d0dead4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5084,6 +5084,7 @@ dependencies = [ "re_log_types", "re_memory", "re_query_cache", + "re_query_cache2", "re_renderer", "re_smart_channel", "re_space_view", @@ -5143,7 +5144,9 @@ dependencies = [ "re_log", "re_log_types", "re_query", + "re_query2", "re_query_cache", + "re_query_cache2", "re_renderer", "re_smart_channel", "re_string_interner", diff --git a/crates/re_query_cache2/src/cache.rs b/crates/re_query_cache2/src/cache.rs index 6e5278a9f1e5..7f46b49dbfb0 100644 --- a/crates/re_query_cache2/src/cache.rs +++ b/crates/re_query_cache2/src/cache.rs @@ -1,10 +1,13 @@ -use std::{collections::BTreeSet, sync::Arc}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; use ahash::{HashMap, HashSet}; use parking_lot::RwLock; use re_data_store::{DataStore, StoreDiff, StoreEvent, StoreSubscriber, TimeInt}; -use re_log_types::{EntityPath, StoreId, Timeline}; +use re_log_types::{EntityPath, StoreId, TimeRange, Timeline}; use re_types_core::ComponentName; use crate::{LatestAtCache, RangeCache}; @@ -63,7 +66,6 @@ impl CacheKey { } } -#[derive(Debug)] pub struct Caches { /// The [`StoreId`] of the associated [`DataStore`]. pub(crate) store_id: StoreId, @@ -75,6 +77,54 @@ pub struct Caches { pub(crate) range_per_cache_key: RwLock>>>, } +impl std::fmt::Debug for Caches { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + store_id, + latest_at_per_cache_key, + range_per_cache_key, + } = self; + + let mut strings = Vec::new(); + + strings.push(format!("[LatestAt @ {store_id}]")); + { + let latest_at_per_cache_key = latest_at_per_cache_key.read(); + let latest_at_per_cache_key: BTreeMap<_, _> = latest_at_per_cache_key.iter().collect(); + + for (cache_key, cache) in &latest_at_per_cache_key { + let cache = cache.read(); + strings.push(format!( + " [{cache_key:?} (pending_invalidation_min={:?})]", + cache.pending_invalidations.first().map(|&t| cache_key + .timeline + .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), + )); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); + } + } + + strings.push(format!("[Range @ {store_id}]")); + { + let range_per_cache_key = range_per_cache_key.read(); + let range_per_cache_key: BTreeMap<_, _> = range_per_cache_key.iter().collect(); + + for (cache_key, cache) in &range_per_cache_key { + let cache = cache.read(); + strings.push(format!( + " [{cache_key:?} (pending_invalidation_min={:?})]", + cache.pending_invalidation.map(|t| cache_key + .timeline + .format_time_range_utc(&TimeRange::new(t, TimeInt::MAX))), + )); + strings.push(indent::indent_all_by(4, format!("{cache:?}"))); + } + } + + f.write_str(&strings.join("\n").replace("\n\n", "\n")) + } +} + impl Caches { #[inline] pub fn new(store: &DataStore) -> Self { @@ -164,7 +214,7 @@ impl StoreSubscriber for Caches { // running while we're updating the invalidation flags. { - re_tracing::profile_scope!("timeless"); + re_tracing::profile_scope!("static"); // TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding // yet another layer of caching indirection. diff --git a/crates/re_query_cache2/src/cache_stats.rs b/crates/re_query_cache2/src/cache_stats.rs new file mode 100644 index 000000000000..7c205f5a8321 --- /dev/null +++ b/crates/re_query_cache2/src/cache_stats.rs @@ -0,0 +1,100 @@ +use std::collections::BTreeMap; + +use re_log_types::TimeRange; +use re_types_core::SizeBytes as _; + +use crate::{CacheKey, Caches}; + +// --- + +/// Stats for all primary caches. +/// +/// Fetch them via [`Caches::stats`]. +#[derive(Default, Debug, Clone)] +pub struct CachesStats { + pub latest_at: BTreeMap, + pub range: BTreeMap, CachedComponentStats)>, +} + +impl CachesStats { + #[inline] + pub fn total_size_bytes(&self) -> u64 { + re_tracing::profile_function!(); + + let Self { latest_at, range } = self; + + let latest_at_size_bytes: u64 = + latest_at.values().map(|stats| stats.total_size_bytes).sum(); + let range_size_bytes: u64 = range + .values() + .map(|(_, stats)| stats.total_size_bytes) + .sum(); + + latest_at_size_bytes + range_size_bytes + } +} + +/// Stats for a cached component. +#[derive(Default, Debug, Clone)] +pub struct CachedComponentStats { + pub total_indices: u64, + pub total_instances: u64, + pub total_size_bytes: u64, +} + +impl Caches { + /// Computes the stats for all primary caches. + pub fn stats(&self) -> CachesStats { + re_tracing::profile_function!(); + + let latest_at = { + let latest_at = self.latest_at_per_cache_key.read_recursive().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + latest_at + .iter() + .map(|(key, cache)| { + let cache = cache.read_recursive(); + ( + key.clone(), + CachedComponentStats { + total_indices: cache.per_data_time.len() as _, + total_instances: cache + .per_data_time + .values() + .map(|results| results.num_instances()) + .sum(), + total_size_bytes: cache.total_size_bytes(), + }, + ) + }) + .collect() + }; + + let range = { + let range = self.range_per_cache_key.read_recursive().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + range + .iter() + .map(|(key, cache)| { + let cache = cache.read_recursive(); + let cache = cache.per_data_time.read_recursive(); + ( + key.clone(), + ( + cache.time_range(), + CachedComponentStats { + total_indices: cache.indices.len() as _, + total_instances: cache.num_instances(), + total_size_bytes: cache.total_size_bytes(), + }, + ), + ) + }) + .collect() + }; + + CachesStats { latest_at, range } + } +} diff --git a/crates/re_query_cache2/src/latest_at/query.rs b/crates/re_query_cache2/src/latest_at/query.rs index b2e9e931d2e3..825830e9a731 100644 --- a/crates/re_query_cache2/src/latest_at/query.rs +++ b/crates/re_query_cache2/src/latest_at/query.rs @@ -1,11 +1,10 @@ use std::collections::BTreeSet; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering::Relaxed; use std::{collections::BTreeMap, sync::Arc}; -use ahash::HashMap; - +use indexmap::IndexMap; +use itertools::Itertools; use parking_lot::RwLock; + use re_data_store::{DataStore, LatestAtQuery, TimeInt}; use re_log_types::EntityPath; use re_query2::Promise; @@ -108,22 +107,44 @@ impl std::fmt::Debug for LatestAtCache { let mut strings = Vec::new(); - let data_times_per_bucket: HashMap<_, _> = per_data_time + struct StatsPerBucket { + query_times: BTreeSet, + data_time: TimeInt, + total_size_bytes: u64, + } + + let mut buckets: IndexMap<_, _> = per_data_time .iter() - .map(|(time, bucket)| (Arc::as_ptr(bucket), *time)) + .map(|(&data_time, bucket)| { + ( + Arc::as_ptr(bucket), + StatsPerBucket { + query_times: Default::default(), + data_time, + total_size_bytes: bucket.total_size_bytes(), + }, + ) + }) .collect(); - for (query_time, bucket) in per_query_time { - let query_time = cache_key.timeline.typ().format_utc(*query_time); - let data_time = data_times_per_bucket.get(&Arc::as_ptr(bucket)).map_or_else( - || "MISSING?!".to_owned(), - |t| cache_key.timeline.typ().format_utc(*t), - ); + for (&query_time, bucket) in per_query_time { + if let Some(bucket) = buckets.get_mut(&Arc::as_ptr(bucket)) { + bucket.query_times.insert(query_time); + } + } + + for bucket in buckets.values() { strings.push(format!( - "query_time={query_time} -> data_time={data_time} ({})", - re_format::format_bytes(bucket.cached_heap_size_bytes.load(Relaxed) as _), + "query_times=[{}] -> data_time={:?} ({})", + bucket + .query_times + .iter() + .map(|t| cache_key.timeline.typ().format_utc(*t)) + .collect_vec() + .join(", "), + bucket.data_time.as_i64(), + re_format::format_bytes(bucket.total_size_bytes as _), )); - strings.push(indent::indent_all_by(2, format!("{bucket:?}"))); } if strings.is_empty() { @@ -148,7 +169,19 @@ impl SizeBytes for LatestAtCache { .keys() .map(|k| k.total_size_bytes()) .sum::(); - let per_data_time = per_data_time.total_size_bytes(); + // NOTE: per query time buckets are just pointers, don't count them. + + let per_data_time_keys = per_data_time + .keys() + .map(|k| k.total_size_bytes()) + .sum::(); + let per_data_time_values = per_data_time + .values() + // NOTE: make sure to dereference the Arc, else this will account for zero (assumed amortized!) + .map(|arc| (**arc).total_size_bytes()) + .sum::(); + + let per_data_time = per_data_time_keys + per_data_time_values; let pending_invalidations = pending_invalidations.total_size_bytes(); per_query_time + per_data_time + pending_invalidations @@ -217,7 +250,6 @@ impl LatestAtCache { index: (data_time, row_id), promise: Some(Promise::new(cell)), cached_dense: Default::default(), - cached_heap_size_bytes: AtomicU64::new(0), }); // Slowest path: this is a complete cache miss. diff --git a/crates/re_query_cache2/src/latest_at/results.rs b/crates/re_query_cache2/src/latest_at/results.rs index 4597807f3ec5..0a1bbd714b2d 100644 --- a/crates/re_query_cache2/src/latest_at/results.rs +++ b/crates/re_query_cache2/src/latest_at/results.rs @@ -1,7 +1,4 @@ -use std::sync::{ - atomic::{AtomicU64, Ordering::Relaxed}, - Arc, OnceLock, -}; +use std::sync::{Arc, OnceLock}; use nohash_hasher::IntMap; @@ -130,8 +127,6 @@ pub struct CachedLatestAtComponentResults { /// The resolved, converted, deserialized dense data. pub(crate) cached_dense: OnceLock>, - - pub(crate) cached_heap_size_bytes: AtomicU64, } impl CachedLatestAtComponentResults { @@ -141,7 +136,6 @@ impl CachedLatestAtComponentResults { index: (TimeInt::STATIC, RowId::ZERO), promise: None, cached_dense: OnceLock::new(), - cached_heap_size_bytes: AtomicU64::new(0), } } @@ -159,12 +153,37 @@ impl CachedLatestAtComponentResults { pub fn is_static(&self) -> bool { self.index.0 == TimeInt::STATIC } + + /// How many _indices_ across this entire cache? + #[inline] + pub fn num_indices(&self) -> u64 { + _ = self; + 1 + } + + /// How many _instances_ across this entire cache? + #[inline] + pub fn num_instances(&self) -> u64 { + self.cached_dense + .get() + .map_or(0u64, |cached| cached.dyn_num_values() as _) + } } impl SizeBytes for CachedLatestAtComponentResults { #[inline] fn heap_size_bytes(&self) -> u64 { - self.cached_heap_size_bytes.load(Relaxed) + let Self { + index, + promise, + cached_dense, + } = self; + + index.total_size_bytes() + + promise.total_size_bytes() + + cached_dense + .get() + .map_or(0, |data| data.dyn_total_size_bytes()) } } @@ -174,14 +193,13 @@ impl std::fmt::Debug for CachedLatestAtComponentResults { index, promise: _, cached_dense: _, // we can't, we don't know the type - cached_heap_size_bytes, } = self; f.write_fmt(format_args!( "[{:?}#{}] {}", index.0, index.1, - re_format::format_bytes(cached_heap_size_bytes.load(Relaxed) as _) + re_format::format_bytes(self.total_size_bytes() as _) )) } } @@ -256,12 +274,9 @@ impl CachedLatestAtComponentResults { .map_err(|err| DeserializationError::DataCellError(err.to_string()))?; #[allow(clippy::borrowed_box)] - let cached: &Box = - self.cached_dense.get_or_init(move || { - self.cached_heap_size_bytes - .fetch_add(data.total_size_bytes(), Relaxed); - Box::new(FlatVecDeque::from(data)) - }); + let cached: &Box = self + .cached_dense + .get_or_init(move || Box::new(FlatVecDeque::from(data))); downcast(&**cached) } diff --git a/crates/re_query_cache2/src/lib.rs b/crates/re_query_cache2/src/lib.rs index d8965db3cc81..cb995ab447aa 100644 --- a/crates/re_query_cache2/src/lib.rs +++ b/crates/re_query_cache2/src/lib.rs @@ -1,11 +1,13 @@ //! Caching datastructures for `re_query`. mod cache; +mod cache_stats; mod flat_vec_deque; mod latest_at; mod range; pub use self::cache::{CacheKey, Caches}; +pub use self::cache_stats::{CachedComponentStats, CachesStats}; pub use self::flat_vec_deque::{ErasedFlatVecDeque, FlatVecDeque}; pub use self::latest_at::{ CachedLatestAtComponentResults, CachedLatestAtMonoResult, CachedLatestAtResults, @@ -26,3 +28,27 @@ pub mod external { pub use paste; pub use seq_macro; } + +// --- + +use re_data_store::{LatestAtQuery, RangeQuery}; + +#[derive(Debug)] +pub enum CachedResults { + LatestAt(LatestAtQuery, CachedLatestAtResults), + Range(RangeQuery, CachedRangeResults), +} + +impl From<(LatestAtQuery, CachedLatestAtResults)> for CachedResults { + #[inline] + fn from((query, results): (LatestAtQuery, CachedLatestAtResults)) -> Self { + Self::LatestAt(query, results) + } +} + +impl From<(RangeQuery, CachedRangeResults)> for CachedResults { + #[inline] + fn from((query, results): (RangeQuery, CachedRangeResults)) -> Self { + Self::Range(query, results) + } +} diff --git a/crates/re_query_cache2/src/range/results.rs b/crates/re_query_cache2/src/range/results.rs index cfd804055fc0..98a39eebed52 100644 --- a/crates/re_query_cache2/src/range/results.rs +++ b/crates/re_query_cache2/src/range/results.rs @@ -567,9 +567,9 @@ impl SizeBytes for CachedRangeComponentResultsInner { cached_dense, } = self; - indices.heap_size_bytes() - + promises_front.heap_size_bytes() - + promises_back.heap_size_bytes() + indices.total_size_bytes() + + promises_front.total_size_bytes() + + promises_back.total_size_bytes() + cached_dense .as_ref() .map_or(0, |data| data.dyn_total_size_bytes()) diff --git a/crates/re_viewer/Cargo.toml b/crates/re_viewer/Cargo.toml index 2168bec9fe30..51a91c365277 100644 --- a/crates/re_viewer/Cargo.toml +++ b/crates/re_viewer/Cargo.toml @@ -54,6 +54,7 @@ re_log_encoding = { workspace = true, features = [ re_log_types.workspace = true re_memory.workspace = true re_query_cache.workspace = true +re_query_cache2.workspace = true re_renderer = { workspace = true, default-features = false } re_smart_channel.workspace = true re_space_view.workspace = true diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index fdf4fa9a5cc0..58e2c612b769 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -691,7 +691,7 @@ impl App { #[cfg(not(target_arch = "wasm32"))] UICommand::PrintPrimaryCache => { if let Some(ctx) = store_context { - let text = format!("{:?}", ctx.recording.query_caches()); + let text = format!("{:?}", ctx.recording.query_caches2()); self.re_ui .egui_ctx .output_mut(|o| o.copied_text = text.clone()); @@ -1415,9 +1415,7 @@ impl eframe::App for App { // NOTE: Store and caching stats are very costly to compute: only do so if the memory panel // is opened. - let store_stats = self - .memory_panel_open - .then(|| store_hub.stats(self.memory_panel.primary_cache_detailed_stats_enabled())); + let store_stats = self.memory_panel_open.then(|| store_hub.stats()); // do early, before doing too many allocations self.memory_panel diff --git a/crates/re_viewer/src/ui/memory_panel.rs b/crates/re_viewer/src/ui/memory_panel.rs index 44d9abf02e5a..36a89ef0e34e 100644 --- a/crates/re_viewer/src/ui/memory_panel.rs +++ b/crates/re_viewer/src/ui/memory_panel.rs @@ -1,9 +1,8 @@ -use std::sync::atomic::AtomicBool; - +use itertools::Itertools; use re_data_store::{DataStoreConfig, DataStoreRowStats, DataStoreStats}; use re_format::{format_bytes, format_uint}; use re_memory::{util::sec_since_start, MemoryHistory, MemoryLimit, MemoryUse}; -use re_query_cache::{CachedComponentStats, CachedEntityStats, CachesStats}; +use re_query_cache2::{CachedComponentStats, CachesStats}; use re_renderer::WgpuResourcePoolStatistics; use re_viewer_context::store_hub::StoreHubStats; @@ -15,13 +14,6 @@ use crate::env_vars::RERUN_TRACK_ALLOCATIONS; pub struct MemoryPanel { history: MemoryHistory, memory_purge_times: Vec, - - /// If `true`, enables the much-more-costly-to-compute per-component stats for the primary - /// cache. - prim_cache_detailed_stats: AtomicBool, - - /// If `true`, will show stats about empty primary caches too, which likely indicates a bug (dangling bucket). - prim_cache_show_empty: AtomicBool, } impl MemoryPanel { @@ -49,12 +41,6 @@ impl MemoryPanel { self.memory_purge_times.push(sec_since_start()); } - #[inline] - pub fn primary_cache_detailed_stats_enabled(&self) -> bool { - self.prim_cache_detailed_stats - .load(std::sync::atomic::Ordering::Relaxed) - } - #[allow(clippy::too_many_arguments)] pub fn ui( &self, @@ -74,7 +60,7 @@ impl MemoryPanel { .min_width(250.0) .default_width(300.0) .show_inside(ui, |ui| { - self.left_side(ui, re_ui, limit, gpu_resource_stats, store_stats); + Self::left_side(ui, re_ui, limit, gpu_resource_stats, store_stats); }); egui::CentralPanel::default().show_inside(ui, |ui| { @@ -84,7 +70,6 @@ impl MemoryPanel { } fn left_side( - &self, ui: &mut egui::Ui, re_ui: &re_ui::ReUi, limit: &MemoryLimit, @@ -115,7 +100,7 @@ impl MemoryPanel { ui.separator(); ui.collapsing("Primary Cache Resources", |ui| { - self.caches_stats(ui, re_ui, &store_stats.recording_cached_stats); + Self::caches_stats(ui, &store_stats.recording_cached_stats); }); ui.separator(); @@ -319,27 +304,15 @@ impl MemoryPanel { }); } - fn caches_stats(&self, ui: &mut egui::Ui, re_ui: &re_ui::ReUi, caches_stats: &CachesStats) { - use std::sync::atomic::Ordering::Relaxed; - - let mut detailed_stats = self.prim_cache_detailed_stats.load(Relaxed); - re_ui - .checkbox(ui, &mut detailed_stats, "Detailed stats") - .on_hover_text("Show detailed statistics when hovering entity paths below.\nThis will slow down the program."); - self.prim_cache_detailed_stats - .store(detailed_stats, Relaxed); - - let mut show_empty = self.prim_cache_show_empty.load(Relaxed); - re_ui - .checkbox(ui, &mut show_empty, "Show empty caches") - .on_hover_text( - "Show empty caches too.\nDangling buckets are generally the result of a bug.", - ); - self.prim_cache_show_empty.store(show_empty, Relaxed); - + fn caches_stats(ui: &mut egui::Ui, caches_stats: &CachesStats) { let CachesStats { latest_at, range } = caches_stats; - if show_empty || !latest_at.is_empty() { + let latest_at = latest_at + .iter() + .filter(|(_, stats)| stats.total_indices > 0) + .collect_vec(); + + if !latest_at.is_empty() { ui.separator(); ui.strong("LatestAt"); egui::ScrollArea::vertical() @@ -350,27 +323,36 @@ impl MemoryPanel { .num_columns(3) .show(ui, |ui| { ui.label(egui::RichText::new("Entity").underline()); - ui.label(egui::RichText::new("Rows").underline()) - .on_hover_text( - "How many distinct data timestamps have been cached?", - ); + ui.label(egui::RichText::new("Component").underline()); + ui.label(egui::RichText::new("Indices").underline()); + ui.label(egui::RichText::new("Instances").underline()); ui.label(egui::RichText::new("Size").underline()); ui.end_row(); - for (entity_path, stats) in latest_at { - if !show_empty && stats.is_empty() { - continue; - } + for (cache_key, stats) in latest_at { + let &CachedComponentStats { + total_indices, + total_instances, + total_size_bytes, + } = stats; - let res = ui.label(entity_path.to_string()); - entity_stats_ui(ui, res, stats); + ui.label(cache_key.entity_path.to_string()); + ui.label(cache_key.component_name.to_string()); + ui.label(re_format::format_uint(total_indices)); + ui.label(re_format::format_uint(total_instances)); + ui.label(re_format::format_bytes(total_size_bytes as _)); ui.end_row(); } }); }); } - if show_empty || !latest_at.is_empty() { + let range = range + .iter() + .filter(|(_, (_, stats))| stats.total_indices > 0) + .collect_vec(); + + if !range.is_empty() { ui.separator(); ui.strong("Range"); egui::ScrollArea::vertical() @@ -381,75 +363,36 @@ impl MemoryPanel { .num_columns(4) .show(ui, |ui| { ui.label(egui::RichText::new("Entity").underline()); - ui.label(egui::RichText::new("Time range").underline()); - ui.label(egui::RichText::new("Rows").underline()) - .on_hover_text( - "How many distinct data timestamps have been cached?", - ); - ui.label(egui::RichText::new("Size").underline()); - ui.end_row(); - - for (entity_path, stats_per_range) in range { - for (timeline, time_range, stats) in stats_per_range { - if !show_empty && stats.is_empty() { - continue; - } - - let res = ui.label(entity_path.to_string()); - ui.label(format!( - "{}({})", - timeline.name(), - timeline.format_time_range_utc(time_range) - )); - entity_stats_ui(ui, res, stats); - ui.end_row(); - } - } - }); - }); - } - - fn entity_stats_ui( - ui: &mut egui::Ui, - hover_response: egui::Response, - entity_stats: &CachedEntityStats, - ) { - let CachedEntityStats { - total_size_bytes, - total_rows, - per_component, - } = entity_stats; - - if let Some(per_component) = per_component.as_ref() { - hover_response.on_hover_ui_at_pointer(|ui| { - egui::Grid::new("component cache stats grid") - .num_columns(3) - .show(ui, |ui| { ui.label(egui::RichText::new("Component").underline()); - ui.label(egui::RichText::new("Rows").underline()); + ui.label(egui::RichText::new("Indices").underline()); ui.label(egui::RichText::new("Instances").underline()); ui.label(egui::RichText::new("Size").underline()); + ui.label(egui::RichText::new("Time range").underline()); ui.end_row(); - for (component_name, stats) in per_component { + for (cache_key, (time_range, stats)) in range { let &CachedComponentStats { - total_rows, + total_indices, total_instances, total_size_bytes, } = stats; - ui.label(component_name.to_string()); - ui.label(re_format::format_uint(total_rows)); + ui.label(cache_key.entity_path.to_string()); + ui.label(cache_key.component_name.to_string()); + ui.label(re_format::format_uint(total_indices)); ui.label(re_format::format_uint(total_instances)); ui.label(re_format::format_bytes(total_size_bytes as _)); + ui.label(format!( + "{}({})", + cache_key.timeline.name(), + time_range.map_or("".to_owned(), |time_range| { + cache_key.timeline.format_time_range_utc(&time_range) + }) + )); ui.end_row(); } }); }); - } - - ui.label(re_format::format_uint(*total_rows)); - ui.label(re_format::format_bytes(*total_size_bytes as _)); } } diff --git a/crates/re_viewer_context/Cargo.toml b/crates/re_viewer_context/Cargo.toml index 835b3c5aec4c..16de5cba4b51 100644 --- a/crates/re_viewer_context/Cargo.toml +++ b/crates/re_viewer_context/Cargo.toml @@ -25,7 +25,9 @@ re_entity_db = { workspace = true, features = ["serde"] } re_log_types.workspace = true re_log.workspace = true re_query_cache.workspace = true +re_query_cache2.workspace = true re_query.workspace = true +re_query2.workspace = true re_renderer.workspace = true re_smart_channel.workspace = true re_string_interner.workspace = true diff --git a/crates/re_viewer_context/src/space_view/mod.rs b/crates/re_viewer_context/src/space_view/mod.rs index b4da1ba5f82b..61c60dfadf77 100644 --- a/crates/re_viewer_context/src/space_view/mod.rs +++ b/crates/re_viewer_context/src/space_view/mod.rs @@ -48,6 +48,9 @@ pub enum SpaceViewSystemExecutionError { #[error(transparent)] QueryError(#[from] re_query::QueryError), + #[error(transparent)] + QueryError2(#[from] re_query2::QueryError), + #[error(transparent)] DeserializationError(#[from] re_types::DeserializationError), diff --git a/crates/re_viewer_context/src/store_hub.rs b/crates/re_viewer_context/src/store_hub.rs index 2e6c9050a92f..5c3a454babcd 100644 --- a/crates/re_viewer_context/src/store_hub.rs +++ b/crates/re_viewer_context/src/store_hub.rs @@ -7,7 +7,7 @@ use re_data_store::StoreGeneration; use re_data_store::{DataStoreConfig, DataStoreStats}; use re_entity_db::{EntityDb, StoreBundle}; use re_log_types::{ApplicationId, StoreId, StoreKind}; -use re_query_cache::CachesStats; +use re_query_cache2::CachesStats; use crate::StoreContext; @@ -687,7 +687,7 @@ impl StoreHub { // // TODO(jleibs): We probably want stats for all recordings, not just // the active recording. - pub fn stats(&self, detailed_cache_stats: bool) -> StoreHubStats { + pub fn stats(&self) -> StoreHubStats { re_tracing::profile_function!(); // If we have an app-id, then use it to look up the blueprint. @@ -715,7 +715,7 @@ impl StoreHub { .unwrap_or_default(); let recording_cached_stats = recording - .map(|entity_db| entity_db.query_caches().stats(detailed_cache_stats)) + .map(|entity_db| entity_db.query_caches2().stats()) .unwrap_or_default(); let recording_config = recording