Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Primary caching 13: stats & memory panel integration for range queries #4785

Merged
merged 5 commits into from
Jan 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion crates/re_data_store/src/store_format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,11 @@ impl std::fmt::Display for IndexedBucket {
let time_range = {
let time_range = &self.inner.read().time_range;
if time_range.min.as_i64() != i64::MAX && time_range.max.as_i64() != i64::MIN {
self.timeline.format_time_range_utc(time_range)
format!(
" - {}: {}",
self.timeline.name(),
self.timeline.format_time_range_utc(time_range)
)
} else {
"time range: N/A\n".to_owned()
}
Expand Down
5 changes: 1 addition & 4 deletions crates/re_data_store/src/store_write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -384,10 +384,7 @@ impl IndexedTable {
if 0 < config.indexed_bucket_num_rows {
let bucket_time_range = bucket.inner.read().time_range;

re_log::debug_once!(
"Failed to split bucket on timeline {}",
bucket.timeline.format_time_range_utc(&bucket_time_range)
);
re_log::debug_once!("Failed to split bucket on timeline {}", timeline.name());

if 1 < config.indexed_bucket_num_rows
&& bucket_time_range.min == bucket_time_range.max
Expand Down
3 changes: 1 addition & 2 deletions crates/re_log_types/src/time_point/timeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,7 @@ impl Timeline {
time_zone_for_timestamps: TimeZone,
) -> String {
format!(
" - {}: from {} to {} (all inclusive)",
self.name,
"{}..={}",
self.typ.format(time_range.min, time_zone_for_timestamps),
self.typ.format(time_range.max, time_zone_for_timestamps),
)
Expand Down
9 changes: 8 additions & 1 deletion crates/re_query_cache/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use seq_macro::seq;
use re_data_store::{
LatestAtQuery, RangeQuery, StoreDiff, StoreEvent, StoreSubscriber, StoreSubscriberHandle,
};
use re_log_types::{EntityPath, RowId, StoreId, TimeInt, Timeline};
use re_log_types::{EntityPath, RowId, StoreId, TimeInt, TimeRange, Timeline};
use re_query::ArchetypeView;
use re_types_core::{
components::InstanceKey, Archetype, ArchetypeName, Component, ComponentName, SizeBytes as _,
Expand Down Expand Up @@ -425,6 +425,13 @@ impl CacheBucket {
self.data_times.iter()
}

#[inline]
pub fn time_range(&self) -> Option<TimeRange> {
let first_time = self.data_times.front().map(|(t, _)| *t)?;
let last_time = self.data_times.back().map(|(t, _)| *t)?;
Some(TimeRange::new(first_time, last_time))
}

#[inline]
pub fn contains_data_time(&self, data_time: TimeInt) -> bool {
let first_time = self.data_times.front().map_or(&TimeInt::MAX, |(t, _)| t);
Expand Down
101 changes: 76 additions & 25 deletions crates/re_query_cache/src/cache_stats.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,9 @@
use std::{collections::BTreeMap, sync::atomic::AtomicBool};
use std::collections::BTreeMap;

use re_log_types::EntityPath;
use re_log_types::{EntityPath, TimeRange, Timeline};
use re_types_core::ComponentName;

use crate::{Caches, LatestAtCache};

// ---

/// If `true`, enables the much-more-costly-to-compute per-component stats.
static ENABLE_DETAILED_STATS: AtomicBool = AtomicBool::new(false);

#[inline]
pub fn detailed_stats() -> bool {
ENABLE_DETAILED_STATS.load(std::sync::atomic::Ordering::Relaxed)
}

#[inline]
pub fn set_detailed_stats(b: bool) {
ENABLE_DETAILED_STATS.store(b, std::sync::atomic::Ordering::Relaxed);
}
use crate::{Caches, LatestAtCache, RangeCache};

// ---

Expand All @@ -28,19 +13,28 @@ pub fn set_detailed_stats(b: bool) {
#[derive(Default, Debug, Clone)]
pub struct CachesStats {
pub latest_at: BTreeMap<EntityPath, CachedEntityStats>,
pub range: BTreeMap<EntityPath, Vec<(Timeline, TimeRange, CachedEntityStats)>>,
}

impl CachesStats {
#[inline]
pub fn total_size_bytes(&self) -> u64 {
re_tracing::profile_function!();

let Self { latest_at } = self;
let Self { latest_at, range } = self;

let latest_at_size_bytes: u64 =
latest_at.values().map(|stats| stats.total_size_bytes).sum();

latest_at_size_bytes
let range_size_bytes: u64 = range
.values()
.flat_map(|all_ranges| {
all_ranges
.iter()
.map(|(_, _, stats)| stats.total_size_bytes)
})
.sum();

latest_at_size_bytes + range_size_bytes
}
}

Expand All @@ -50,10 +44,19 @@ pub struct CachedEntityStats {
pub total_rows: u64,
pub total_size_bytes: u64,

/// Only if [`detailed_stats`] returns `true` (see [`set_detailed_stats`]).
/// Only if `detailed_stats` is `true` (see [`Caches::stats`]).
pub per_component: Option<BTreeMap<ComponentName, CachedComponentStats>>,
}

impl CachedEntityStats {
#[inline]
pub fn is_empty(&self) -> bool {
// NOTE: That looks non-sensical, but it can happen if the cache is bugged, which we'd like
// to know.
self.total_rows == 0 && self.total_size_bytes == 0
}
}

/// Stats for a cached component.
#[derive(Default, Debug, Clone)]
pub struct CachedComponentStats {
Expand All @@ -65,7 +68,7 @@ impl Caches {
/// Computes the stats for all primary caches.
///
/// `per_component` toggles per-component stats.
pub fn stats() -> CachesStats {
pub fn stats(detailed_stats: bool) -> CachesStats {
re_tracing::profile_function!();

Self::with(|caches| {
Expand All @@ -77,7 +80,7 @@ impl Caches {
(key.entity_path.clone(), {
let mut total_size_bytes = 0u64;
let mut total_rows = 0u64;
let mut per_component = detailed_stats().then(BTreeMap::default);
let mut per_component = detailed_stats.then(BTreeMap::default);

for latest_at_cache in
caches_per_arch.latest_at_per_archetype.read().values()
Expand All @@ -93,6 +96,8 @@ impl Caches {
total_rows = per_data_time.len() as u64 + timeless.is_some() as u64;

if let Some(per_component) = per_component.as_mut() {
re_tracing::profile_scope!("detailed");

for bucket in per_data_time.values() {
for (component_name, data) in &bucket.read().components {
let stats: &mut CachedComponentStats =
Expand Down Expand Up @@ -123,7 +128,53 @@ impl Caches {
})
.collect();

CachesStats { latest_at }
let range = caches
.0
.read()
.iter()
.map(|(key, caches_per_arch)| {
(key.entity_path.clone(), {
caches_per_arch
.range_per_archetype
.read()
.values()
.map(|range_cache| {
let RangeCache {
bucket,
total_size_bytes,
} = &*range_cache.read();

let total_rows = bucket.data_times.len() as u64;

let mut per_component = detailed_stats.then(BTreeMap::default);
if let Some(per_component) = per_component.as_mut() {
re_tracing::profile_scope!("detailed");

for (component_name, data) in &bucket.components {
let stats: &mut CachedComponentStats =
per_component.entry(*component_name).or_default();
stats.total_rows += data.dyn_num_entries() as u64;
stats.total_instances += data.dyn_num_values() as u64;
}
}

(
key.timeline,
bucket.time_range().unwrap_or(TimeRange::EMPTY),
CachedEntityStats {
total_size_bytes: *total_size_bytes,
total_rows,

per_component,
},
)
})
.collect()
})
})
.collect();

CachesStats { latest_at, range }
})
}
}
4 changes: 1 addition & 3 deletions crates/re_query_cache/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@ mod query;
mod range;

pub use self::cache::{AnyQuery, Caches};
pub use self::cache_stats::{
detailed_stats, set_detailed_stats, CachedComponentStats, CachedEntityStats, CachesStats,
};
pub use self::cache_stats::{CachedComponentStats, CachedEntityStats, CachesStats};
pub use self::flat_vec_deque::{ErasedFlatVecDeque, FlatVecDeque};
pub use self::query::{
query_archetype_pov1, query_archetype_with_history_pov1, MaybeCachedComponentData,
Expand Down
3 changes: 2 additions & 1 deletion crates/re_viewer/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,8 @@ impl eframe::App for App {
};

let store_stats = store_hub.stats();
let caches_stats = re_query_cache::Caches::stats();
let caches_stats =
re_query_cache::Caches::stats(self.memory_panel.primary_cache_detailed_stats_enabled());

// do early, before doing too many allocations
self.memory_panel
Expand Down
Loading
Loading