From a05b905f6ddc601f5f2038211203da645edb8678 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Thu, 18 Jan 2024 11:50:16 +0800 Subject: [PATCH 01/11] chore: make block data private Signed-off-by: MrCroxx --- src/storage/src/hummock/sstable/block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index d21869f8ba09d..a1baffd6b8728 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -142,7 +142,7 @@ impl RestartPoint { #[derive(Clone)] pub struct Block { /// Uncompressed entries data, with restart encoded restart points info. - pub data: Bytes, + data: Bytes, /// Uncompressed entried data length. data_len: usize, From 321b3863800c8f871f80b891dea3b1f072e3b632 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Thu, 18 Jan 2024 11:56:04 +0800 Subject: [PATCH 02/11] chore: rename Block::raw_data to Block::raw Signed-off-by: MrCroxx --- src/storage/src/hummock/file_cache/store.rs | 12 ++++++------ src/storage/src/hummock/sstable/block.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/storage/src/hummock/file_cache/store.rs b/src/storage/src/hummock/file_cache/store.rs index 79342e8a074ac..3435227bd317b 100644 --- a/src/storage/src/hummock/file_cache/store.rs +++ b/src/storage/src/hummock/file_cache/store.rs @@ -455,7 +455,7 @@ impl Value for CachedBlock { fn serialized_len(&self) -> usize { 1 /* type */ + match self { - CachedBlock::Loaded { block } => block.raw_data().len(), + CachedBlock::Loaded { block } => block.raw().len(), CachedBlock::Fetched { bytes, uncompressed_capacity: _ } => 8 + bytes.len(), } } @@ -506,7 +506,7 @@ impl std::io::Read for CachedBlockCursor { if self.pos < 1 { self.pos += copy([0], &mut buf); } - self.pos += copy(&block.raw_data()[self.pos - 1..], &mut buf); + self.pos += copy(&block.raw()[self.pos - 1..], &mut buf); } CachedBlock::Fetched { bytes, @@ -541,7 +541,7 @@ impl Value for Box { type Cursor = BoxBlockCursor; fn serialized_len(&self) -> usize { - self.raw_data().len() + self.raw().len() } fn read(buf: &[u8]) -> CodingResult { @@ -571,7 +571,7 @@ impl BoxBlockCursor { impl std::io::Read for BoxBlockCursor { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { let pos = self.pos; - self.pos += copy(&self.inner.raw_data()[self.pos..], buf); + self.pos += copy(&self.inner.raw()[self.pos..], buf); let n = self.pos - pos; Ok(n) } @@ -748,7 +748,7 @@ mod tests { std::io::copy(&mut cursor, &mut buf).unwrap(); let target = cursor.into_inner(); let block = Box::::read(&buf[..]).unwrap(); - assert_eq!(target.raw_data(), block.raw_data()); + assert_eq!(target.raw(), block.raw()); } { @@ -779,7 +779,7 @@ mod tests { CachedBlock::Loaded { block } => block, CachedBlock::Fetched { .. } => panic!(), }; - assert_eq!(target.raw_data(), block.raw_data()); + assert_eq!(target.raw(), block.raw()); } { diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index a1baffd6b8728..51aa521383ada 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -314,7 +314,7 @@ impl Block { &self.data[..self.data_len] } - pub fn raw_data(&self) -> &[u8] { + pub fn raw(&self) -> &[u8] { &self.data[..] } } From 20c5ade55cf8e95985ae109b57057692728a1665 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Thu, 18 Jan 2024 17:12:44 +0800 Subject: [PATCH 03/11] feat(stats): introduce block hitmap Signed-off-by: MrCroxx --- src/common/src/range.rs | 8 +- src/storage/src/hummock/sstable/block.rs | 182 +++++++++++++++++- .../src/hummock/sstable/block_iterator.rs | 10 +- src/storage/src/lib.rs | 2 + 4 files changed, 191 insertions(+), 11 deletions(-) diff --git a/src/common/src/range.rs b/src/common/src/range.rs index 323a9d7057283..6d2ef88518297 100644 --- a/src/common/src/range.rs +++ b/src/common/src/range.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::ops::{Add, Bound, RangeBounds, Sub}; +use std::ops::{Add, Bound, Range, RangeBounds, Sub}; mod private { @@ -75,6 +75,12 @@ pub trait RangeBoundsExt: RangeBounds { } } + fn bounds(&self, start: T, end: T) -> Range { + let start = self.start().unwrap_or(start); + let end = self.end().unwrap_or(end); + start..end + } + fn len(&self) -> Option { let start = self.start()?; let end = self.end()?; diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index 51aa521383ada..4f3c1d2ec8203 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -15,11 +15,13 @@ use std::cmp::Ordering; use std::fmt::Debug; use std::io::{Read, Write}; -use std::mem::size_of; -use std::ops::Range; +use std::mem::{size_of, MaybeUninit}; +use std::ops::{Range, RangeBounds}; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use risingwave_common::catalog::TableId; +use risingwave_common::range::RangeBoundsExt; use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::KeyComparator; use {lz4, zstd}; @@ -139,7 +141,96 @@ impl RestartPoint { } } -#[derive(Clone)] +#[derive(Debug)] +pub struct Hitmap { + /// For [`Block`] is rarely access in multi-thread pattern, + /// the cons of false-sharing can be ignored. + data: [AtomicU64; N], +} + +impl Default for Hitmap { + fn default() -> Self { + let mut data: [MaybeUninit; N] = MaybeUninit::uninit_array(); + for elem in &mut data[..] { + elem.write(AtomicU64::new(0)); + } + let data = unsafe { MaybeUninit::array_assume_init(data) }; + Self { data } + } +} + +impl Hitmap { + pub const fn bits() -> usize { + N * 64 + } + + pub const fn bytes() -> usize { + N * 8 + } + + pub fn reset(&self) { + for elem in &self.data { + elem.store(0, AtomicOrdering::Relaxed); + } + } + + pub fn fill(&self, start_bit: usize, end_bit: usize) { + const MASK: usize = (1 << 6) - 1; + + let end_bit = std::cmp::min(end_bit, Self::bits()); + + let head_bits = start_bit & MASK; + let tail_bits_rev = end_bit & MASK; + + let head_elem = start_bit >> 6; + let tail_elem = end_bit >> 6; + + for i in head_elem..=std::cmp::min(tail_elem, N - 1) { + let elem = &self.data[i]; + let mut umask = 0u64; + if i == head_elem { + umask |= (1u64 << head_bits) - 1; + } + if i == tail_elem { + umask |= !((1u64 << tail_bits_rev) - 1); + } + elem.fetch_or(!umask, AtomicOrdering::Relaxed); + } + } + + pub fn fill_with_ratio(&self, start: f64, end: f64) { + let start_bit = (Self::bits() as f64 * start) as usize; + let end_bit = (Self::bits() as f64 * end) as usize; + self.fill(start_bit, end_bit) + } + + pub fn ones(&self) -> usize { + let mut res = 0; + for elem in &self.data { + res += elem.load(AtomicOrdering::Relaxed).count_ones() as usize; + } + res + } + + pub fn zeros(&self) -> usize { + Self::bits() - self.ones() + } + + pub fn ratio(self) -> f64 { + self.ones() as f64 / Self::bits() as f64 + } + + #[cfg(test)] + pub fn to_hex_vec(&self) -> Vec { + use itertools::Itertools; + self.data + .iter() + .map(|elem| elem.load(AtomicOrdering::Relaxed)) + .map(|v| format!("{v:016x}")) + .collect_vec() + } +} + pub struct Block { /// Uncompressed entries data, with restart encoded restart points info. data: Bytes, @@ -151,6 +242,20 @@ pub struct Block { /// Restart points. restart_points: Vec, + + hitmap: Hitmap<4>, +} + +impl Clone for Block { + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + data_len: self.data_len, + table_id: self.table_id, + restart_points: self.restart_points.clone(), + hitmap: Hitmap::default(), + } + } } impl Debug for Block { @@ -272,6 +377,7 @@ impl Block { data_len, restart_points, table_id: TableId::new(table_id), + hitmap: Hitmap::default(), } } @@ -310,8 +416,13 @@ impl Block { self.restart_points.partition_point(pred) } - pub fn data(&self) -> &[u8] { - &self.data[..self.data_len] + pub fn slice(&self, range: impl RangeBounds) -> &[u8] { + let range = range.bounds(0, self.data.len()); + self.hitmap.fill_with_ratio( + range.start as f64 / self.data_len as f64, + range.end as f64 / self.data_len as f64, + ); + &self.data[range] } pub fn raw(&self) -> &[u8] { @@ -929,4 +1040,65 @@ mod tests { builder.clear(); } } + + #[test] + fn test_hitmap() { + // hex: high <== low + let h = Hitmap::<4>::default(); + assert_eq!( + h.to_hex_vec(), + vec![ + "0000000000000000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 0); + h.fill(16, 24); + assert_eq!( + h.to_hex_vec(), + vec![ + "0000000000ff0000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 8); + h.fill(32, 64); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffff00ff0000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 40); + h.fill(96, 224); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffff00ff0000", + "ffffffff00000000", + "ffffffffffffffff", + "00000000ffffffff", + ] + ); + assert_eq!(h.ones(), 168); + h.fill(0, 256); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + ] + ); + assert_eq!(h.ones(), 256); + h.reset(); + } } diff --git a/src/storage/src/hummock/sstable/block_iterator.rs b/src/storage/src/hummock/sstable/block_iterator.rs index b1a46b7595d2a..9cf58e552eeee 100644 --- a/src/storage/src/hummock/sstable/block_iterator.rs +++ b/src/storage/src/hummock/sstable/block_iterator.rs @@ -87,7 +87,7 @@ impl BlockIterator { pub fn value(&self) -> &[u8] { assert!(self.is_valid()); - &self.block.data()[self.value_range.clone()] + self.block.slice(self.value_range.clone()) } pub fn is_valid(&self) -> bool { @@ -166,7 +166,7 @@ impl BlockIterator { self.decode_prefix_at(offset, self.last_key_len_type, self.last_value_len_type); self.key.truncate(prefix.overlap_len()); self.key - .extend_from_slice(&self.block.data()[prefix.diff_key_range()]); + .extend_from_slice(self.block.slice(prefix.diff_key_range())); self.value_range = prefix.value_range(); self.offset = offset; @@ -233,7 +233,7 @@ impl BlockIterator { value_len_type: LenType, ) -> KeyPrefix { KeyPrefix::decode( - &mut &self.block.data()[offset..], + &mut self.block.slice(offset..), offset, key_len_type, value_len_type, @@ -252,7 +252,7 @@ impl BlockIterator { }| { let prefix = self.decode_prefix_at(probe as usize, key_len_type, value_len_type); - let probe_key = &self.block.data()[prefix.diff_key_range()]; + let probe_key = self.block.slice(prefix.diff_key_range()); let full_probe_key = FullKey::from_slice_without_table_id(self.block.table_id(), probe_key); match full_probe_key.cmp(&key) { @@ -280,7 +280,7 @@ impl BlockIterator { restart_point.value_len_type, ); - self.key = BytesMut::from(&self.block.data()[prefix.diff_key_range()]); + self.key = BytesMut::from(self.block.slice(prefix.diff_key_range())); self.value_range = prefix.value_range(); self.offset = offset; self.entry_len = prefix.entry_len(); diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index 8bf78347d1803..8e6efa63e3545 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -40,6 +40,8 @@ #![feature(associated_type_bounds)] #![feature(exclusive_range_pattern)] #![feature(impl_trait_in_assoc_type)] +#![feature(maybe_uninit_uninit_array)] +#![feature(maybe_uninit_array_assume_init)] pub mod hummock; pub mod memory; From 36c522537dee9b2fbb456edc3e4af4255484e9ed Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Thu, 18 Jan 2024 17:50:44 +0800 Subject: [PATCH 04/11] feat(stats): introduce block efficiency metrics Signed-off-by: MrCroxx --- src/storage/src/hummock/sstable/block.rs | 6 +++++- src/storage/src/hummock/sstable_store.rs | 10 +++++++++- src/storage/src/monitor/hummock_metrics.rs | 13 +++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index 4f3c1d2ec8203..43be006419808 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -216,7 +216,7 @@ impl Hitmap { Self::bits() - self.ones() } - pub fn ratio(self) -> f64 { + pub fn ratio(&self) -> f64 { self.ones() as f64 / Self::bits() as f64 } @@ -428,6 +428,10 @@ impl Block { pub fn raw(&self) -> &[u8] { &self.data[..] } + + pub fn efficiency(&self) -> f64 { + self.hitmap.ratio() + } } /// [`KeyPrefix`] contains info for prefix compression. diff --git a/src/storage/src/hummock/sstable_store.rs b/src/storage/src/hummock/sstable_store.rs index 9918274b8ee91..dd72bc169b18b 100644 --- a/src/storage/src/hummock/sstable_store.rs +++ b/src/storage/src/hummock/sstable_store.rs @@ -50,7 +50,9 @@ use crate::hummock::multi_builder::UploadJoinHandle; use crate::hummock::{ BlockHolder, CacheableEntry, HummockError, HummockResult, LruCache, MemoryLimiter, }; -use crate::monitor::{MemoryCollector, StoreLocalStatistic}; +use crate::monitor::{ + HummockMetrics, MemoryCollector, StoreLocalStatistic, GLOBAL_HUMMOCK_METRICS, +}; const MAX_META_CACHE_SHARD_BITS: usize = 2; const MAX_CACHE_SHARD_BITS: usize = 6; // It means that there will be 64 shards lru-cache to avoid lock conflict. @@ -101,6 +103,7 @@ impl From for TracedCachePolicy { struct BlockCacheEventListener { data_file_cache: FileCache, + metrics: HummockMetrics, } impl LruCacheEventListener for BlockCacheEventListener { @@ -112,6 +115,9 @@ impl LruCacheEventListener for BlockCacheEventListener { sst_id: key.0, block_idx: key.1, }; + self.metrics + .block_efficiency_histogram + .observe(value.efficiency()); // temporarily avoid spawn task while task drop with madsim // FYI: https://github.com/madsim-rs/madsim/issues/182 #[cfg(not(madsim))] @@ -194,8 +200,10 @@ impl SstableStore { while (meta_cache_capacity >> shard_bits) < MIN_BUFFER_SIZE_PER_SHARD && shard_bits > 0 { shard_bits -= 1; } + let metrics = GLOBAL_HUMMOCK_METRICS.clone(); let block_cache_listener = Arc::new(BlockCacheEventListener { data_file_cache: data_file_cache.clone(), + metrics, }); let meta_cache_listener = Arc::new(MetaCacheEventListener(meta_file_cache.clone())); Self { diff --git a/src/storage/src/monitor/hummock_metrics.rs b/src/storage/src/monitor/hummock_metrics.rs index 4585fa8010257..9d03ce8bb8cea 100644 --- a/src/storage/src/monitor/hummock_metrics.rs +++ b/src/storage/src/monitor/hummock_metrics.rs @@ -28,6 +28,9 @@ pub struct HummockMetrics { pub report_compaction_task_counts: GenericCounter, pub get_new_sst_ids_latency: Histogram, pub report_compaction_task_latency: Histogram, + + // block statistics + pub block_efficiency_histogram: Histogram, } pub static GLOBAL_HUMMOCK_METRICS: LazyLock = @@ -72,11 +75,21 @@ impl HummockMetrics { register_histogram_with_registry!(report_compaction_task_latency_opts, registry) .unwrap(); + let block_efficiency_histogram = register_histogram_with_registry!( + "block_efficiency_histogram", + "block efficiency histogram", + exponential_buckets(0.001, 2.0, 11).unwrap(), + registry, + ) + .unwrap(); + Self { get_new_sst_ids_counts, report_compaction_task_counts, get_new_sst_ids_latency, report_compaction_task_latency, + + block_efficiency_histogram, } } From a1563d1da9a7017fc0e5905c1b2ad15ba7d856b4 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Thu, 18 Jan 2024 23:18:15 +0800 Subject: [PATCH 05/11] feat(stats): add table id info to block efficiency metrics Signed-off-by: MrCroxx --- src/ctl/src/common/hummock_service.rs | 35 +++++----- src/jni_core/src/hummock_iterator.rs | 33 +++++----- src/storage/benches/bench_compactor.rs | 28 ++++---- src/storage/benches/bench_multi_builder.rs | 31 ++++----- .../hummock_test/src/bin/replay/main.rs | 37 +++++------ .../src/hummock/iterator/test_utils.rs | 26 ++++---- src/storage/src/hummock/sstable_store.rs | 65 ++++++++++--------- src/storage/src/monitor/hummock_metrics.rs | 13 ---- .../monitor/hummock_state_store_metrics.rs | 18 +++++ src/storage/src/store_impl.rs | 21 +++--- .../src/delete_range_runner.rs | 26 ++++---- 11 files changed, 178 insertions(+), 155 deletions(-) diff --git a/src/ctl/src/common/hummock_service.rs b/src/ctl/src/common/hummock_service.rs index 7eb8af52b51ae..b88dd265d382b 100644 --- a/src/ctl/src/common/hummock_service.rs +++ b/src/ctl/src/common/hummock_service.rs @@ -17,14 +17,14 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{anyhow, bail, Result}; -use risingwave_common::config::ObjectStoreConfig; +use risingwave_common::config::{MetricLevel, ObjectStoreConfig}; use risingwave_object_store::object::build_remote_object_store; use risingwave_rpc_client::MetaClient; use risingwave_storage::hummock::hummock_meta_client::MonitoredHummockMetaClient; -use risingwave_storage::hummock::{FileCache, HummockStorage, SstableStore}; +use risingwave_storage::hummock::{FileCache, HummockStorage, SstableStore, SstableStoreConfig}; use risingwave_storage::monitor::{ - CompactorMetrics, HummockMetrics, HummockStateStoreMetrics, MonitoredStateStore, - MonitoredStorageMetrics, ObjectStoreMetrics, + global_hummock_state_store_metrics, CompactorMetrics, HummockMetrics, HummockStateStoreMetrics, + MonitoredStateStore, MonitoredStorageMetrics, ObjectStoreMetrics, }; use risingwave_storage::opts::StorageOpts; use risingwave_storage::{StateStore, StateStoreImpl}; @@ -162,17 +162,20 @@ impl HummockServiceOpts { let opts = self.get_storage_opts(); - Ok(Arc::new(SstableStore::new( - Arc::new(object_store), - opts.data_directory, - opts.block_cache_capacity_mb * (1 << 20), - opts.meta_cache_capacity_mb * (1 << 20), - 0, - opts.block_cache_capacity_mb * (1 << 20), - opts.max_prefetch_block_number, - FileCache::none(), - FileCache::none(), - None, - ))) + Ok(Arc::new(SstableStore::new(SstableStoreConfig { + store: Arc::new(object_store), + path: opts.data_directory, + block_cache_capacity: opts.block_cache_capacity_mb * (1 << 20), + meta_cache_capacity: opts.meta_cache_capacity_mb * (1 << 20), + high_priority_ratio: 0, + prefetch_buffer_capacity: opts.block_cache_capacity_mb * (1 << 20), + max_prefetch_block_number: opts.max_prefetch_block_number, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: Arc::new(global_hummock_state_store_metrics( + MetricLevel::Disabled, + )), + }))) } } diff --git a/src/jni_core/src/hummock_iterator.rs b/src/jni_core/src/hummock_iterator.rs index c66669d559154..ee2084b6ecf81 100644 --- a/src/jni_core/src/hummock_iterator.rs +++ b/src/jni_core/src/hummock_iterator.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use bytes::Bytes; use futures::TryStreamExt; use risingwave_common::catalog::ColumnDesc; -use risingwave_common::config::ObjectStoreConfig; +use risingwave_common::config::{MetricLevel, ObjectStoreConfig}; use risingwave_common::hash::VirtualNode; use risingwave_common::row::OwnedRow; use risingwave_common::util::value_encoding::column_aware_row_encoding::ColumnAwareSerde; @@ -33,9 +33,9 @@ use risingwave_storage::hummock::local_version::pinned_version::PinnedVersion; use risingwave_storage::hummock::store::version::HummockVersionReader; use risingwave_storage::hummock::store::HummockStorageIterator; use risingwave_storage::hummock::{ - get_committed_read_version_tuple, CachePolicy, FileCache, SstableStore, + get_committed_read_version_tuple, CachePolicy, FileCache, SstableStore, SstableStoreConfig, }; -use risingwave_storage::monitor::HummockStateStoreMetrics; +use risingwave_storage::monitor::{global_hummock_state_store_metrics, HummockStateStoreMetrics}; use risingwave_storage::row_serde::value_serde::ValueRowSerdeNew; use risingwave_storage::store::{ReadOptions, StateStoreReadIterStream, StreamTypeOfIter}; use rw_futures_util::select_all; @@ -66,18 +66,21 @@ impl HummockJavaBindingIterator { ) .await, ); - let sstable_store = Arc::new(SstableStore::new( - object_store, - read_plan.data_dir, - 1 << 10, - 1 << 10, - 0, - 1 << 10, - 16, - FileCache::none(), - FileCache::none(), - None, - )); + let sstable_store = Arc::new(SstableStore::new(SstableStoreConfig { + store: object_store, + path: read_plan.data_dir, + block_cache_capacity: 1 << 10, + meta_cache_capacity: 1 << 10, + high_priority_ratio: 0, + prefetch_buffer_capacity: 1 << 10, + max_prefetch_block_number: 16, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: Arc::new(global_hummock_state_store_metrics( + MetricLevel::Disabled, + )), + })); let reader = HummockVersionReader::new( sstable_store, Arc::new(HummockStateStoreMetrics::unused()), diff --git a/src/storage/benches/bench_compactor.rs b/src/storage/benches/bench_compactor.rs index 29d2696d8323b..5f9ee1c8e8524 100644 --- a/src/storage/benches/bench_compactor.rs +++ b/src/storage/benches/bench_compactor.rs @@ -19,6 +19,7 @@ use criterion::async_executor::FuturesExecutor; use criterion::{criterion_group, criterion_main, Criterion}; use risingwave_common::cache::CachePriority; use risingwave_common::catalog::TableId; +use risingwave_common::config::MetricLevel; use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::key_range::KeyRange; @@ -41,26 +42,29 @@ use risingwave_storage::hummock::sstable_store::SstableStoreRef; use risingwave_storage::hummock::value::HummockValue; use risingwave_storage::hummock::{ CachePolicy, CompactionDeleteRangeIterator, FileCache, SstableBuilder, SstableBuilderOptions, - SstableIterator, SstableStore, SstableWriterOptions, Xor16FilterBuilder, + SstableIterator, SstableStore, SstableStoreConfig, SstableWriterOptions, Xor16FilterBuilder, +}; +use risingwave_storage::monitor::{ + global_hummock_state_store_metrics, CompactorMetrics, StoreLocalStatistic, }; -use risingwave_storage::monitor::{CompactorMetrics, StoreLocalStatistic}; pub fn mock_sstable_store() -> SstableStoreRef { let store = InMemObjectStore::new().monitored(Arc::new(ObjectStoreMetrics::unused())); let store = Arc::new(ObjectStoreImpl::InMem(store)); let path = "test".to_string(); - Arc::new(SstableStore::new( + Arc::new(SstableStore::new(SstableStoreConfig { store, path, - 64 << 20, - 128 << 20, - 0, - 64 << 20, - 16, - FileCache::none(), - FileCache::none(), - None, - )) + block_cache_capacity: 64 << 20, + meta_cache_capacity: 128 << 20, + high_priority_ratio: 0, + prefetch_buffer_capacity: 64 << 20, + max_prefetch_block_number: 16, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: Arc::new(global_hummock_state_store_metrics(MetricLevel::Disabled)), + })) } pub fn default_writer_opts() -> SstableWriterOptions { diff --git a/src/storage/benches/bench_multi_builder.rs b/src/storage/benches/bench_multi_builder.rs index a55cb24fff801..7d1abf67ec857 100644 --- a/src/storage/benches/bench_multi_builder.rs +++ b/src/storage/benches/bench_multi_builder.rs @@ -23,17 +23,17 @@ use criterion::{criterion_group, criterion_main, Criterion}; use futures::future::try_join_all; use itertools::Itertools; use risingwave_common::catalog::TableId; -use risingwave_common::config::ObjectStoreConfig; +use risingwave_common::config::{MetricLevel, ObjectStoreConfig}; use risingwave_hummock_sdk::key::{FullKey, UserKey}; use risingwave_object_store::object::{ObjectStore, ObjectStoreImpl, S3ObjectStore}; use risingwave_storage::hummock::multi_builder::{CapacitySplitTableBuilder, TableBuilderFactory}; use risingwave_storage::hummock::value::HummockValue; use risingwave_storage::hummock::{ BatchSstableWriterFactory, CachePolicy, FileCache, HummockResult, MemoryLimiter, - SstableBuilder, SstableBuilderOptions, SstableStore, SstableWriterFactory, + SstableBuilder, SstableBuilderOptions, SstableStore, SstableStoreConfig, SstableWriterFactory, SstableWriterOptions, StreamingSstableWriterFactory, Xor16FilterBuilder, }; -use risingwave_storage::monitor::ObjectStoreMetrics; +use risingwave_storage::monitor::{global_hummock_state_store_metrics, ObjectStoreMetrics}; const RANGE: Range = 0..1500000; const VALUE: &[u8] = &[0; 400]; @@ -141,18 +141,19 @@ fn bench_builder( .monitored(metrics) }); let object_store = Arc::new(ObjectStoreImpl::S3(object_store)); - let sstable_store = Arc::new(SstableStore::new( - object_store, - "test".to_string(), - 64 << 20, - 128 << 20, - 0, - 64 << 20, - 16, - FileCache::none(), - FileCache::none(), - None, - )); + let sstable_store = Arc::new(SstableStore::new(SstableStoreConfig { + store: object_store, + path: "test".to_string(), + block_cache_capacity: 64 << 20, + meta_cache_capacity: 128 << 20, + high_priority_ratio: 0, + prefetch_buffer_capacity: 64 << 20, + max_prefetch_block_number: 16, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: Arc::new(global_hummock_state_store_metrics(MetricLevel::Disabled)), + })); let mut group = c.benchmark_group("bench_multi_builder"); group diff --git a/src/storage/hummock_test/src/bin/replay/main.rs b/src/storage/hummock_test/src/bin/replay/main.rs index e8637de487734..0f2d12a89dd4f 100644 --- a/src/storage/hummock_test/src/bin/replay/main.rs +++ b/src/storage/hummock_test/src/bin/replay/main.rs @@ -40,7 +40,7 @@ use risingwave_object_store::object::build_remote_object_store; use risingwave_storage::filter_key_extractor::{ FakeRemoteTableAccessor, RpcFilterKeyExtractorManager, }; -use risingwave_storage::hummock::{FileCache, HummockStorage, SstableStore}; +use risingwave_storage::hummock::{FileCache, HummockStorage, SstableStore, SstableStoreConfig}; use risingwave_storage::monitor::{CompactorMetrics, HummockStateStoreMetrics, ObjectStoreMetrics}; use risingwave_storage::opts::StorageOpts; use serde::{Deserialize, Serialize}; @@ -94,33 +94,32 @@ async fn create_replay_hummock(r: Record, args: &Args) -> Result Result` with human-readable format. #[macro_export] @@ -59,18 +60,19 @@ pub fn mock_sstable_store() -> SstableStoreRef { pub fn mock_sstable_store_with_object_store(store: ObjectStoreRef) -> SstableStoreRef { let path = "test".to_string(); - Arc::new(SstableStore::new( + Arc::new(SstableStore::new(SstableStoreConfig { store, path, - 64 << 20, - 64 << 20, - 0, - 64 << 20, - 16, - FileCache::none(), - FileCache::none(), - None, - )) + block_cache_capacity: 64 << 20, + meta_cache_capacity: 64 << 20, + high_priority_ratio: 0, + prefetch_buffer_capacity: 64 << 20, + max_prefetch_block_number: 16, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: Arc::new(global_hummock_state_store_metrics(MetricLevel::Disabled)), + })) } pub fn iterator_test_table_key_of(idx: usize) -> Vec { diff --git a/src/storage/src/hummock/sstable_store.rs b/src/storage/src/hummock/sstable_store.rs index dd72bc169b18b..5d4509355ed95 100644 --- a/src/storage/src/hummock/sstable_store.rs +++ b/src/storage/src/hummock/sstable_store.rs @@ -50,9 +50,7 @@ use crate::hummock::multi_builder::UploadJoinHandle; use crate::hummock::{ BlockHolder, CacheableEntry, HummockError, HummockResult, LruCache, MemoryLimiter, }; -use crate::monitor::{ - HummockMetrics, MemoryCollector, StoreLocalStatistic, GLOBAL_HUMMOCK_METRICS, -}; +use crate::monitor::{HummockStateStoreMetrics, MemoryCollector, StoreLocalStatistic}; const MAX_META_CACHE_SHARD_BITS: usize = 2; const MAX_CACHE_SHARD_BITS: usize = 6; // It means that there will be 64 shards lru-cache to avoid lock conflict. @@ -103,7 +101,7 @@ impl From for TracedCachePolicy { struct BlockCacheEventListener { data_file_cache: FileCache, - metrics: HummockMetrics, + metrics: Arc, } impl LruCacheEventListener for BlockCacheEventListener { @@ -117,6 +115,7 @@ impl LruCacheEventListener for BlockCacheEventListener { }; self.metrics .block_efficiency_histogram + .with_label_values(&[&value.table_id().to_string()]) .observe(value.efficiency()); // temporarily avoid spawn task while task drop with madsim // FYI: https://github.com/madsim-rs/madsim/issues/182 @@ -164,6 +163,20 @@ where } } +pub struct SstableStoreConfig { + pub store: ObjectStoreRef, + pub path: String, + pub block_cache_capacity: usize, + pub meta_cache_capacity: usize, + pub high_priority_ratio: usize, + pub prefetch_buffer_capacity: usize, + pub max_prefetch_block_number: usize, + pub data_file_cache: FileCache, + pub meta_file_cache: FileCache, + pub recent_filter: Option>>, + pub state_store_metrics: Arc, +} + pub struct SstableStore { path: String, store: ObjectStoreRef, @@ -182,53 +195,43 @@ pub struct SstableStore { } impl SstableStore { - pub fn new( - store: ObjectStoreRef, - path: String, - block_cache_capacity: usize, - meta_cache_capacity: usize, - high_priority_ratio: usize, - prefetch_buffer_capacity: usize, - max_prefetch_block_number: usize, - data_file_cache: FileCache, - meta_file_cache: FileCache, - recent_filter: Option>>, - ) -> Self { + pub fn new(config: SstableStoreConfig) -> Self { // TODO: We should validate path early. Otherwise object store won't report invalid path // error until first write attempt. let mut shard_bits = MAX_META_CACHE_SHARD_BITS; - while (meta_cache_capacity >> shard_bits) < MIN_BUFFER_SIZE_PER_SHARD && shard_bits > 0 { + while (config.meta_cache_capacity >> shard_bits) < MIN_BUFFER_SIZE_PER_SHARD + && shard_bits > 0 + { shard_bits -= 1; } - let metrics = GLOBAL_HUMMOCK_METRICS.clone(); let block_cache_listener = Arc::new(BlockCacheEventListener { - data_file_cache: data_file_cache.clone(), - metrics, + data_file_cache: config.data_file_cache.clone(), + metrics: config.state_store_metrics, }); - let meta_cache_listener = Arc::new(MetaCacheEventListener(meta_file_cache.clone())); + let meta_cache_listener = Arc::new(MetaCacheEventListener(config.meta_file_cache.clone())); Self { - path, - store, + path: config.path, + store: config.store, block_cache: BlockCache::with_event_listener( - block_cache_capacity, + config.block_cache_capacity, MAX_CACHE_SHARD_BITS, - high_priority_ratio, + config.high_priority_ratio, block_cache_listener, ), meta_cache: Arc::new(LruCache::with_event_listener( shard_bits, - meta_cache_capacity, + config.meta_cache_capacity, 0, meta_cache_listener, )), - data_file_cache, - meta_file_cache, + data_file_cache: config.data_file_cache, + meta_file_cache: config.meta_file_cache, - recent_filter, + recent_filter: config.recent_filter, prefetch_buffer_usage: Arc::new(AtomicUsize::new(0)), - prefetch_buffer_capacity, - max_prefetch_block_number, + prefetch_buffer_capacity: config.prefetch_buffer_capacity, + max_prefetch_block_number: config.max_prefetch_block_number, } } diff --git a/src/storage/src/monitor/hummock_metrics.rs b/src/storage/src/monitor/hummock_metrics.rs index 9d03ce8bb8cea..4585fa8010257 100644 --- a/src/storage/src/monitor/hummock_metrics.rs +++ b/src/storage/src/monitor/hummock_metrics.rs @@ -28,9 +28,6 @@ pub struct HummockMetrics { pub report_compaction_task_counts: GenericCounter, pub get_new_sst_ids_latency: Histogram, pub report_compaction_task_latency: Histogram, - - // block statistics - pub block_efficiency_histogram: Histogram, } pub static GLOBAL_HUMMOCK_METRICS: LazyLock = @@ -75,21 +72,11 @@ impl HummockMetrics { register_histogram_with_registry!(report_compaction_task_latency_opts, registry) .unwrap(); - let block_efficiency_histogram = register_histogram_with_registry!( - "block_efficiency_histogram", - "block efficiency histogram", - exponential_buckets(0.001, 2.0, 11).unwrap(), - registry, - ) - .unwrap(); - Self { get_new_sst_ids_counts, report_compaction_task_counts, get_new_sst_ids_latency, report_compaction_task_latency, - - block_efficiency_histogram, } } diff --git a/src/storage/src/monitor/hummock_state_store_metrics.rs b/src/storage/src/monitor/hummock_state_store_metrics.rs index 45b72bd1eee86..4764c48fff59b 100644 --- a/src/storage/src/monitor/hummock_state_store_metrics.rs +++ b/src/storage/src/monitor/hummock_state_store_metrics.rs @@ -76,6 +76,9 @@ pub struct HummockStateStoreMetrics { // memory pub mem_table_spill_counts: RelabeledCounterVec, + + // block statistics + pub block_efficiency_histogram: RelabeledHistogramVec, } pub static GLOBAL_HUMMOCK_STATE_STORE_METRICS: OnceLock = OnceLock::new(); @@ -371,6 +374,19 @@ impl HummockStateStoreMetrics { metric_level, ); + let opts = histogram_opts!( + "block_efficiency_histogram", + "Access ratio of in-memory block.", + exponential_buckets(0.001, 2.0, 11).unwrap(), + ); + let block_efficiency_histogram = + register_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); + let block_efficiency_histogram = RelabeledHistogramVec::with_metric_level( + MetricLevel::Debug, + block_efficiency_histogram, + metric_level, + ); + Self { bloom_filter_true_negative_counts, bloom_filter_check_counts, @@ -396,6 +412,8 @@ impl HummockStateStoreMetrics { spill_task_size_from_unsealed: spill_task_size.with_label_values(&["unsealed"]), uploader_uploading_task_size, mem_table_spill_counts, + + block_efficiency_histogram, } } diff --git a/src/storage/src/store_impl.rs b/src/storage/src/store_impl.rs index 86adbe13b01bd..e4ee411d3ad21 100644 --- a/src/storage/src/store_impl.rs +++ b/src/storage/src/store_impl.rs @@ -28,7 +28,7 @@ use crate::hummock::file_cache::preclude::*; use crate::hummock::hummock_meta_client::MonitoredHummockMetaClient; use crate::hummock::{ set_foyer_metrics_registry, FileCache, FileCacheConfig, HummockError, HummockStorage, - RecentFilter, SstableStore, + RecentFilter, SstableStore, SstableStoreConfig, }; use crate::memory::sled::SledStateStore; use crate::memory::MemoryStateStore; @@ -610,18 +610,19 @@ impl StateStoreImpl { ) .await; - let sstable_store = Arc::new(SstableStore::new( - Arc::new(object_store), - opts.data_directory.to_string(), - opts.block_cache_capacity_mb * (1 << 20), - opts.meta_cache_capacity_mb * (1 << 20), - opts.high_priority_ratio, - opts.prefetch_buffer_capacity_mb * (1 << 20), - opts.max_prefetch_block_number, + let sstable_store = Arc::new(SstableStore::new(SstableStoreConfig { + store: Arc::new(object_store), + path: opts.data_directory.to_string(), + block_cache_capacity: opts.block_cache_capacity_mb * (1 << 20), + meta_cache_capacity: opts.meta_cache_capacity_mb * (1 << 20), + high_priority_ratio: opts.high_priority_ratio, + prefetch_buffer_capacity: opts.prefetch_buffer_capacity_mb * (1 << 20), + max_prefetch_block_number: opts.max_prefetch_block_number, data_file_cache, meta_file_cache, recent_filter, - )); + state_store_metrics: state_store_metrics.clone(), + })); let notification_client = RpcNotificationClient::new(hummock_meta_client.get_inner().clone()); let key_filter_manager = Arc::new(RpcFilterKeyExtractorManager::new(Box::new( diff --git a/src/tests/compaction_test/src/delete_range_runner.rs b/src/tests/compaction_test/src/delete_range_runner.rs index ab65973dcf00e..26638dde1d8d6 100644 --- a/src/tests/compaction_test/src/delete_range_runner.rs +++ b/src/tests/compaction_test/src/delete_range_runner.rs @@ -55,6 +55,7 @@ use risingwave_storage::hummock::sstable_store::SstableStoreRef; use risingwave_storage::hummock::utils::cmp_delete_range_left_bounds; use risingwave_storage::hummock::{ CachePolicy, FileCache, HummockStorage, MemoryLimiter, SstableObjectIdManager, SstableStore, + SstableStoreConfig, }; use risingwave_storage::monitor::{CompactorMetrics, HummockStateStoreMetrics}; use risingwave_storage::opts::StorageOpts; @@ -212,18 +213,19 @@ async fn compaction_test( ObjectStoreConfig::default(), ) .await; - let sstable_store = Arc::new(SstableStore::new( - Arc::new(remote_object_store), - system_params.data_directory().to_string(), - storage_memory_config.block_cache_capacity_mb * (1 << 20), - storage_memory_config.meta_cache_capacity_mb * (1 << 20), - 0, - storage_memory_config.prefetch_buffer_capacity_mb * (1 << 20), - storage_opts.max_prefetch_block_number, - FileCache::none(), - FileCache::none(), - None, - )); + let sstable_store = Arc::new(SstableStore::new(SstableStoreConfig { + store: Arc::new(remote_object_store), + path: system_params.data_directory().to_string(), + block_cache_capacity: storage_memory_config.block_cache_capacity_mb * (1 << 20), + meta_cache_capacity: storage_memory_config.meta_cache_capacity_mb * (1 << 20), + high_priority_ratio: 0, + prefetch_buffer_capacity: storage_memory_config.prefetch_buffer_capacity_mb * (1 << 20), + max_prefetch_block_number: storage_opts.max_prefetch_block_number, + data_file_cache: FileCache::none(), + meta_file_cache: FileCache::none(), + recent_filter: None, + state_store_metrics: state_store_metrics.clone(), + })); let store = HummockStorage::new( storage_opts.clone(), From 69f3b9aedb26078b64a0118db10179cd3cad387e Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Fri, 19 Jan 2024 14:19:37 +0800 Subject: [PATCH 06/11] chore(stats): advance block efficiency metrics level to INFO Signed-off-by: MrCroxx --- src/storage/src/monitor/hummock_state_store_metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storage/src/monitor/hummock_state_store_metrics.rs b/src/storage/src/monitor/hummock_state_store_metrics.rs index 4764c48fff59b..bc8ffde58cbf2 100644 --- a/src/storage/src/monitor/hummock_state_store_metrics.rs +++ b/src/storage/src/monitor/hummock_state_store_metrics.rs @@ -382,7 +382,7 @@ impl HummockStateStoreMetrics { let block_efficiency_histogram = register_histogram_vec_with_registry!(opts, &["table_id"], registry).unwrap(); let block_efficiency_histogram = RelabeledHistogramVec::with_metric_level( - MetricLevel::Debug, + MetricLevel::Info, block_efficiency_histogram, metric_level, ); From fe9f44d3b0096674da1487dabff53282ee57dee6 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Tue, 23 Jan 2024 12:43:22 +0800 Subject: [PATCH 07/11] chore: resolve comments Signed-off-by: MrCroxx --- src/common/src/range.rs | 8 +------- src/storage/src/hummock/sstable/block.rs | 26 +++++++++++------------- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/common/src/range.rs b/src/common/src/range.rs index 6d2ef88518297..323a9d7057283 100644 --- a/src/common/src/range.rs +++ b/src/common/src/range.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::ops::{Add, Bound, Range, RangeBounds, Sub}; +use std::ops::{Add, Bound, RangeBounds, Sub}; mod private { @@ -75,12 +75,6 @@ pub trait RangeBoundsExt: RangeBounds { } } - fn bounds(&self, start: T, end: T) -> Range { - let start = self.start().unwrap_or(start); - let end = self.end().unwrap_or(end); - start..end - } - fn len(&self) -> Option { let start = self.start()?; let end = self.end()?; diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index 43be006419808..da600852f0a35 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -15,7 +15,7 @@ use std::cmp::Ordering; use std::fmt::Debug; use std::io::{Read, Write}; -use std::mem::{size_of, MaybeUninit}; +use std::mem::size_of; use std::ops::{Range, RangeBounds}; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; @@ -145,23 +145,20 @@ impl RestartPoint { pub struct Hitmap { /// For [`Block`] is rarely access in multi-thread pattern, /// the cons of false-sharing can be ignored. - data: [AtomicU64; N], + data: Box<[AtomicU64; N]>, } impl Default for Hitmap { fn default() -> Self { - let mut data: [MaybeUninit; N] = MaybeUninit::uninit_array(); - for elem in &mut data[..] { - elem.write(AtomicU64::new(0)); - } - let data = unsafe { MaybeUninit::array_assume_init(data) }; + let data = [(); N].map(|_| AtomicU64::default()); + let data = Box::new(data); Self { data } } } impl Hitmap { pub const fn bits() -> usize { - N * 64 + N * u64::BITS as usize } pub const fn bytes() -> usize { @@ -169,7 +166,7 @@ impl Hitmap { } pub fn reset(&self) { - for elem in &self.data { + for elem in &*self.data { elem.store(0, AtomicOrdering::Relaxed); } } @@ -206,7 +203,7 @@ impl Hitmap { pub fn ones(&self) -> usize { let mut res = 0; - for elem in &self.data { + for elem in &*self.data { res += elem.load(AtomicOrdering::Relaxed).count_ones() as usize; } res @@ -417,12 +414,13 @@ impl Block { } pub fn slice(&self, range: impl RangeBounds) -> &[u8] { - let range = range.bounds(0, self.data.len()); + let start = range.start().unwrap_or_default(); + let end = range.end().unwrap_or(self.data_len); self.hitmap.fill_with_ratio( - range.start as f64 / self.data_len as f64, - range.end as f64 / self.data_len as f64, + start as f64 / self.data_len as f64, + end as f64 / self.data_len as f64, ); - &self.data[range] + &self.data[start..end] } pub fn raw(&self) -> &[u8] { From 44fc5ff0aeb6cbd4377b33d80df380f9387e541a Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Tue, 23 Jan 2024 15:38:42 +0800 Subject: [PATCH 08/11] refactor: introduce local hitmap - local hitmap is updated when iter - global hitmap is updated when local hitmap drops - metrics is updated when block is evicted from block cache Signed-off-by: MrCroxx --- src/storage/src/hummock/sstable/block.rs | 162 +---------- .../src/hummock/sstable/block_iterator.rs | 21 +- src/storage/src/monitor/hitmap.rs | 268 ++++++++++++++++++ src/storage/src/monitor/mod.rs | 3 + 4 files changed, 296 insertions(+), 158 deletions(-) create mode 100644 src/storage/src/monitor/hitmap.rs diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index da600852f0a35..53f490cc65722 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -17,7 +17,6 @@ use std::fmt::Debug; use std::io::{Read, Write}; use std::mem::size_of; use std::ops::{Range, RangeBounds}; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use risingwave_common::catalog::TableId; @@ -30,11 +29,14 @@ use super::utils::{bytes_diff_below_max_key_length, xxhash64_verify, Compression use crate::hummock::sstable::utils; use crate::hummock::sstable::utils::xxhash64_checksum; use crate::hummock::{HummockError, HummockResult}; +use crate::monitor::Hitmap; pub const DEFAULT_BLOCK_SIZE: usize = 4 * 1024; pub const DEFAULT_RESTART_INTERVAL: usize = 16; pub const DEFAULT_ENTRY_SIZE: usize = 24; // table_id(u64) + primary_key(u64) + epoch(u64) +pub const HITMAP_ELEMS: usize = 4; + #[allow(non_camel_case_types)] #[derive(Clone, Copy, PartialEq, Debug)] pub enum LenType { @@ -141,93 +143,6 @@ impl RestartPoint { } } -#[derive(Debug)] -pub struct Hitmap { - /// For [`Block`] is rarely access in multi-thread pattern, - /// the cons of false-sharing can be ignored. - data: Box<[AtomicU64; N]>, -} - -impl Default for Hitmap { - fn default() -> Self { - let data = [(); N].map(|_| AtomicU64::default()); - let data = Box::new(data); - Self { data } - } -} - -impl Hitmap { - pub const fn bits() -> usize { - N * u64::BITS as usize - } - - pub const fn bytes() -> usize { - N * 8 - } - - pub fn reset(&self) { - for elem in &*self.data { - elem.store(0, AtomicOrdering::Relaxed); - } - } - - pub fn fill(&self, start_bit: usize, end_bit: usize) { - const MASK: usize = (1 << 6) - 1; - - let end_bit = std::cmp::min(end_bit, Self::bits()); - - let head_bits = start_bit & MASK; - let tail_bits_rev = end_bit & MASK; - - let head_elem = start_bit >> 6; - let tail_elem = end_bit >> 6; - - for i in head_elem..=std::cmp::min(tail_elem, N - 1) { - let elem = &self.data[i]; - let mut umask = 0u64; - if i == head_elem { - umask |= (1u64 << head_bits) - 1; - } - if i == tail_elem { - umask |= !((1u64 << tail_bits_rev) - 1); - } - elem.fetch_or(!umask, AtomicOrdering::Relaxed); - } - } - - pub fn fill_with_ratio(&self, start: f64, end: f64) { - let start_bit = (Self::bits() as f64 * start) as usize; - let end_bit = (Self::bits() as f64 * end) as usize; - self.fill(start_bit, end_bit) - } - - pub fn ones(&self) -> usize { - let mut res = 0; - for elem in &*self.data { - res += elem.load(AtomicOrdering::Relaxed).count_ones() as usize; - } - res - } - - pub fn zeros(&self) -> usize { - Self::bits() - self.ones() - } - - pub fn ratio(&self) -> f64 { - self.ones() as f64 / Self::bits() as f64 - } - - #[cfg(test)] - pub fn to_hex_vec(&self) -> Vec { - use itertools::Itertools; - self.data - .iter() - .map(|elem| elem.load(AtomicOrdering::Relaxed)) - .map(|v| format!("{v:016x}")) - .collect_vec() - } -} - pub struct Block { /// Uncompressed entries data, with restart encoded restart points info. data: Bytes, @@ -240,7 +155,7 @@ pub struct Block { /// Restart points. restart_points: Vec, - hitmap: Hitmap<4>, + hitmap: Hitmap, } impl Clone for Block { @@ -416,10 +331,6 @@ impl Block { pub fn slice(&self, range: impl RangeBounds) -> &[u8] { let start = range.start().unwrap_or_default(); let end = range.end().unwrap_or(self.data_len); - self.hitmap.fill_with_ratio( - start as f64 / self.data_len as f64, - end as f64 / self.data_len as f64, - ); &self.data[start..end] } @@ -427,6 +338,10 @@ impl Block { &self.data[..] } + pub fn hitmap(&self) -> &Hitmap { + &self.hitmap + } + pub fn efficiency(&self) -> f64 { self.hitmap.ratio() } @@ -1042,65 +957,4 @@ mod tests { builder.clear(); } } - - #[test] - fn test_hitmap() { - // hex: high <== low - let h = Hitmap::<4>::default(); - assert_eq!( - h.to_hex_vec(), - vec![ - "0000000000000000", - "0000000000000000", - "0000000000000000", - "0000000000000000", - ] - ); - assert_eq!(h.ones(), 0); - h.fill(16, 24); - assert_eq!( - h.to_hex_vec(), - vec![ - "0000000000ff0000", - "0000000000000000", - "0000000000000000", - "0000000000000000", - ] - ); - assert_eq!(h.ones(), 8); - h.fill(32, 64); - assert_eq!( - h.to_hex_vec(), - vec![ - "ffffffff00ff0000", - "0000000000000000", - "0000000000000000", - "0000000000000000", - ] - ); - assert_eq!(h.ones(), 40); - h.fill(96, 224); - assert_eq!( - h.to_hex_vec(), - vec![ - "ffffffff00ff0000", - "ffffffff00000000", - "ffffffffffffffff", - "00000000ffffffff", - ] - ); - assert_eq!(h.ones(), 168); - h.fill(0, 256); - assert_eq!( - h.to_hex_vec(), - vec![ - "ffffffffffffffff", - "ffffffffffffffff", - "ffffffffffffffff", - "ffffffffffffffff", - ] - ); - assert_eq!(h.ones(), 256); - h.reset(); - } } diff --git a/src/storage/src/hummock/sstable/block_iterator.rs b/src/storage/src/hummock/sstable/block_iterator.rs index 9cf58e552eeee..1afc46adc6c8e 100644 --- a/src/storage/src/hummock/sstable/block_iterator.rs +++ b/src/storage/src/hummock/sstable/block_iterator.rs @@ -19,8 +19,9 @@ use bytes::BytesMut; use risingwave_common::catalog::TableId; use risingwave_hummock_sdk::key::FullKey; -use super::{KeyPrefix, LenType, RestartPoint}; +use super::{KeyPrefix, LenType, RestartPoint, HITMAP_ELEMS}; use crate::hummock::BlockHolder; +use crate::monitor::LocalHitmap; /// [`BlockIterator`] is used to read kv pairs in a block. pub struct BlockIterator { @@ -39,10 +40,14 @@ pub struct BlockIterator { last_key_len_type: LenType, last_value_len_type: LenType, + + /// NOTE: `hitmap` is supposed be updated every time when `value_range` is updated. + hitmap: LocalHitmap, } impl BlockIterator { pub fn new(block: BlockHolder) -> Self { + let hitmap = block.hitmap().local(); Self { block, offset: usize::MAX, @@ -52,6 +57,7 @@ impl BlockIterator { entry_len: 0, last_key_len_type: LenType::u8, last_value_len_type: LenType::u8, + hitmap, } } @@ -81,7 +87,6 @@ impl BlockIterator { pub fn key(&self) -> FullKey<&[u8]> { assert!(self.is_valid()); - FullKey::from_slice_without_table_id(self.table_id(), &self.key[..]) } @@ -105,13 +110,11 @@ impl BlockIterator { pub fn seek(&mut self, key: FullKey<&[u8]>) { self.seek_restart_point_by_key(key); - self.next_until_key(key); } pub fn seek_le(&mut self, key: FullKey<&[u8]>) { self.seek_restart_point_by_key(key); - self.next_until_key(key); if !self.is_valid() { self.seek_to_last(); @@ -172,6 +175,11 @@ impl BlockIterator { self.offset = offset; self.entry_len = prefix.entry_len(); + self.hitmap.fill_with_ratio( + self.offset as f64 / self.block.len() as f64, + self.value_range.end as f64 / self.block.len() as f64, + ); + true } @@ -285,6 +293,11 @@ impl BlockIterator { self.offset = offset; self.entry_len = prefix.entry_len(); self.update_restart_point(index); + + self.hitmap.fill_with_ratio( + self.offset as f64 / self.block.len() as f64, + self.value_range.end as f64 / self.block.len() as f64, + ); } fn update_restart_point(&mut self, index: usize) { diff --git a/src/storage/src/monitor/hitmap.rs b/src/storage/src/monitor/hitmap.rs new file mode 100644 index 0000000000000..58fbe555a95a4 --- /dev/null +++ b/src/storage/src/monitor/hitmap.rs @@ -0,0 +1,268 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +use risingwave_common::util::iter_util::ZipEqFast; + +#[derive(Debug, Clone)] +pub struct Hitmap { + /// For [`Block`] is rarely access in multi-thread pattern, + /// the cons of false-sharing can be ignored. + data: Arc<[AtomicU64; N]>, +} + +impl Default for Hitmap { + fn default() -> Self { + let data = [(); N].map(|_| AtomicU64::default()); + let data = Arc::new(data); + Self { data } + } +} + +impl Hitmap { + pub const fn bits() -> usize { + N * u64::BITS as usize + } + + pub const fn bytes() -> usize { + N * 8 + } + + pub fn reset(&self) { + for elem in &*self.data { + elem.store(0, Ordering::Relaxed); + } + } + + pub fn fill(&self, start_bit: usize, end_bit: usize) { + const MASK: usize = (1 << 6) - 1; + + let end_bit = std::cmp::min(end_bit, Self::bits()); + + let head_bits = start_bit & MASK; + let tail_bits_rev = end_bit & MASK; + + let head_elem = start_bit >> 6; + let tail_elem = end_bit >> 6; + + for i in head_elem..=std::cmp::min(tail_elem, N - 1) { + let elem = &self.data[i]; + let mut umask = 0u64; + if i == head_elem { + umask |= (1u64 << head_bits) - 1; + } + if i == tail_elem { + umask |= !((1u64 << tail_bits_rev) - 1); + } + elem.fetch_or(!umask, Ordering::Relaxed); + } + } + + pub fn fill_with_ratio(&self, start: f64, end: f64) { + let start_bit = (Self::bits() as f64 * start) as usize; + let end_bit = (Self::bits() as f64 * end) as usize; + self.fill(start_bit, end_bit) + } + + pub fn local(&self) -> LocalHitmap { + LocalHitmap::new(self) + } + + pub fn apply(&self, local: &LocalHitmap) { + for (global, local) in self.data.iter().zip_eq_fast(local.data.iter()) { + global.fetch_add(*local, Ordering::Relaxed); + } + } + + pub fn ones(&self) -> usize { + let mut res = 0; + for elem in &*self.data { + res += elem.load(Ordering::Relaxed).count_ones() as usize; + } + res + } + + pub fn zeros(&self) -> usize { + Self::bits() - self.ones() + } + + pub fn ratio(&self) -> f64 { + self.ones() as f64 / Self::bits() as f64 + } + + #[cfg(test)] + pub fn to_hex_vec(&self) -> Vec { + use itertools::Itertools; + self.data + .iter() + .map(|elem| elem.load(Ordering::Relaxed)) + .map(|v| format!("{v:016x}")) + .collect_vec() + } +} + +#[derive(Debug)] +pub struct LocalHitmap { + owner: Hitmap, + data: Box<[u64; N]>, +} + +impl LocalHitmap { + pub const fn bits() -> usize { + N * u64::BITS as usize + } + + pub const fn bytes() -> usize { + N * 8 + } + + pub fn new(owner: &Hitmap) -> Self { + Self { + owner: owner.clone(), + data: Box::new([0; N]), + } + } + + pub fn reset(&mut self) { + for elem in &mut *self.data { + *elem = 0; + } + } + + pub fn fill(&mut self, start_bit: usize, end_bit: usize) { + const MASK: usize = (1 << 6) - 1; + + let end_bit = std::cmp::min(end_bit, Self::bits()); + + let head_bits = start_bit & MASK; + let tail_bits_rev = end_bit & MASK; + + let head_elem = start_bit >> 6; + let tail_elem = end_bit >> 6; + + for i in head_elem..=std::cmp::min(tail_elem, N - 1) { + let elem = &mut self.data[i]; + let mut umask = 0u64; + if i == head_elem { + umask |= (1u64 << head_bits) - 1; + } + if i == tail_elem { + umask |= !((1u64 << tail_bits_rev) - 1); + } + *elem |= !umask; + } + } + + pub fn fill_with_ratio(&mut self, start: f64, end: f64) { + let start_bit = (Self::bits() as f64 * start) as usize; + let end_bit = (Self::bits() as f64 * end) as usize; + self.fill(start_bit, end_bit) + } + + pub fn ones(&self) -> usize { + let mut res = 0; + for elem in &*self.data { + res += elem.count_ones() as usize; + } + res + } + + pub fn zeros(&self) -> usize { + Self::bits() - self.ones() + } + + pub fn ratio(&self) -> f64 { + self.ones() as f64 / Self::bits() as f64 + } + + #[cfg(test)] + pub fn to_hex_vec(&self) -> Vec { + use itertools::Itertools; + self.data.iter().map(|v| format!("{v:016x}")).collect_vec() + } +} + +impl Drop for LocalHitmap { + fn drop(&mut self) { + self.owner.apply(self); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hitmap() { + // hex: high <== low + let h = Hitmap::<4>::default(); + assert_eq!( + h.to_hex_vec(), + vec![ + "0000000000000000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 0); + h.fill(16, 24); + assert_eq!( + h.to_hex_vec(), + vec![ + "0000000000ff0000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 8); + h.fill(32, 64); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffff00ff0000", + "0000000000000000", + "0000000000000000", + "0000000000000000", + ] + ); + assert_eq!(h.ones(), 40); + h.fill(96, 224); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffff00ff0000", + "ffffffff00000000", + "ffffffffffffffff", + "00000000ffffffff", + ] + ); + assert_eq!(h.ones(), 168); + h.fill(0, 256); + assert_eq!( + h.to_hex_vec(), + vec![ + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + ] + ); + assert_eq!(h.ones(), 256); + h.reset(); + } +} diff --git a/src/storage/src/monitor/mod.rs b/src/storage/src/monitor/mod.rs index 053cc72cf8130..1849f272d8473 100644 --- a/src/storage/src/monitor/mod.rs +++ b/src/storage/src/monitor/mod.rs @@ -28,6 +28,9 @@ pub use compactor_metrics::*; mod local_metrics; pub use local_metrics::*; + +mod hitmap; +pub use hitmap::*; pub use risingwave_object_store::object::object_metrics::{ ObjectStoreMetrics, GLOBAL_OBJECT_STORE_METRICS, }; From 8d12ba4055090464ced0675287526f060c6d5b24 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Tue, 23 Jan 2024 16:05:19 +0800 Subject: [PATCH 09/11] chore: fix doc Signed-off-by: MrCroxx --- src/storage/src/monitor/hitmap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storage/src/monitor/hitmap.rs b/src/storage/src/monitor/hitmap.rs index 58fbe555a95a4..35a6adafc7807 100644 --- a/src/storage/src/monitor/hitmap.rs +++ b/src/storage/src/monitor/hitmap.rs @@ -19,7 +19,7 @@ use risingwave_common::util::iter_util::ZipEqFast; #[derive(Debug, Clone)] pub struct Hitmap { - /// For [`Block`] is rarely access in multi-thread pattern, + /// For [`Hitmap`] is rarely access in multi-thread pattern, /// the cons of false-sharing can be ignored. data: Arc<[AtomicU64; N]>, } From 007fa1a3f2ca4196b2c2f622183ee3572357f388 Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Tue, 23 Jan 2024 16:30:05 +0800 Subject: [PATCH 10/11] chore: resolve some comments Signed-off-by: MrCroxx --- src/storage/src/hummock/sstable/block.rs | 9 +++------ src/storage/src/hummock/sstable/block_iterator.rs | 10 +++++----- src/storage/src/monitor/hitmap.rs | 2 +- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index 53f490cc65722..82f21d7c971b1 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -16,11 +16,10 @@ use std::cmp::Ordering; use std::fmt::Debug; use std::io::{Read, Write}; use std::mem::size_of; -use std::ops::{Range, RangeBounds}; +use std::ops::Range; use bytes::{Buf, BufMut, Bytes, BytesMut}; use risingwave_common::catalog::TableId; -use risingwave_common::range::RangeBoundsExt; use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::KeyComparator; use {lz4, zstd}; @@ -328,10 +327,8 @@ impl Block { self.restart_points.partition_point(pred) } - pub fn slice(&self, range: impl RangeBounds) -> &[u8] { - let start = range.start().unwrap_or_default(); - let end = range.end().unwrap_or(self.data_len); - &self.data[start..end] + pub fn data(&self) -> &[u8] { + &self.data[..self.data_len] } pub fn raw(&self) -> &[u8] { diff --git a/src/storage/src/hummock/sstable/block_iterator.rs b/src/storage/src/hummock/sstable/block_iterator.rs index 1afc46adc6c8e..174ac402810a6 100644 --- a/src/storage/src/hummock/sstable/block_iterator.rs +++ b/src/storage/src/hummock/sstable/block_iterator.rs @@ -92,7 +92,7 @@ impl BlockIterator { pub fn value(&self) -> &[u8] { assert!(self.is_valid()); - self.block.slice(self.value_range.clone()) + &self.block.data()[self.value_range.clone()] } pub fn is_valid(&self) -> bool { @@ -169,7 +169,7 @@ impl BlockIterator { self.decode_prefix_at(offset, self.last_key_len_type, self.last_value_len_type); self.key.truncate(prefix.overlap_len()); self.key - .extend_from_slice(self.block.slice(prefix.diff_key_range())); + .extend_from_slice(&self.block.data()[prefix.diff_key_range()]); self.value_range = prefix.value_range(); self.offset = offset; @@ -241,7 +241,7 @@ impl BlockIterator { value_len_type: LenType, ) -> KeyPrefix { KeyPrefix::decode( - &mut self.block.slice(offset..), + &mut &self.block.data()[offset..], offset, key_len_type, value_len_type, @@ -260,7 +260,7 @@ impl BlockIterator { }| { let prefix = self.decode_prefix_at(probe as usize, key_len_type, value_len_type); - let probe_key = self.block.slice(prefix.diff_key_range()); + let probe_key = &self.block.data()[prefix.diff_key_range()]; let full_probe_key = FullKey::from_slice_without_table_id(self.block.table_id(), probe_key); match full_probe_key.cmp(&key) { @@ -288,7 +288,7 @@ impl BlockIterator { restart_point.value_len_type, ); - self.key = BytesMut::from(self.block.slice(prefix.diff_key_range())); + self.key = BytesMut::from(&self.block.data()[prefix.diff_key_range()]); self.value_range = prefix.value_range(); self.offset = offset; self.entry_len = prefix.entry_len(); diff --git a/src/storage/src/monitor/hitmap.rs b/src/storage/src/monitor/hitmap.rs index 35a6adafc7807..99e0a48724d50 100644 --- a/src/storage/src/monitor/hitmap.rs +++ b/src/storage/src/monitor/hitmap.rs @@ -83,7 +83,7 @@ impl Hitmap { pub fn apply(&self, local: &LocalHitmap) { for (global, local) in self.data.iter().zip_eq_fast(local.data.iter()) { - global.fetch_add(*local, Ordering::Relaxed); + global.fetch_or(*local, Ordering::Relaxed); } } From aa6bfb64184bd882fbe390742e581b0595fead6d Mon Sep 17 00:00:00 2001 From: MrCroxx Date: Tue, 23 Jan 2024 16:52:17 +0800 Subject: [PATCH 11/11] chore: resolve comments Signed-off-by: MrCroxx --- .../src/hummock/sstable/block_iterator.rs | 16 ++--- src/storage/src/monitor/hitmap.rs | 64 +++++-------------- 2 files changed, 25 insertions(+), 55 deletions(-) diff --git a/src/storage/src/hummock/sstable/block_iterator.rs b/src/storage/src/hummock/sstable/block_iterator.rs index 174ac402810a6..4c467adc8f589 100644 --- a/src/storage/src/hummock/sstable/block_iterator.rs +++ b/src/storage/src/hummock/sstable/block_iterator.rs @@ -175,10 +175,7 @@ impl BlockIterator { self.offset = offset; self.entry_len = prefix.entry_len(); - self.hitmap.fill_with_ratio( - self.offset as f64 / self.block.len() as f64, - self.value_range.end as f64 / self.block.len() as f64, - ); + self.update_hitmap(); true } @@ -294,10 +291,7 @@ impl BlockIterator { self.entry_len = prefix.entry_len(); self.update_restart_point(index); - self.hitmap.fill_with_ratio( - self.offset as f64 / self.block.len() as f64, - self.value_range.end as f64 / self.block.len() as f64, - ); + self.update_hitmap(); } fn update_restart_point(&mut self, index: usize) { @@ -307,6 +301,12 @@ impl BlockIterator { self.last_key_len_type = restart_point.key_len_type; self.last_value_len_type = restart_point.value_len_type; } + + /// Update the local hitmap of the block based on the current iterator position. + fn update_hitmap(&mut self) { + self.hitmap + .fill_with_range(self.offset, self.value_range.end, self.block.len()); + } } #[cfg(test)] diff --git a/src/storage/src/monitor/hitmap.rs b/src/storage/src/monitor/hitmap.rs index 99e0a48724d50..6afb79b53b332 100644 --- a/src/storage/src/monitor/hitmap.rs +++ b/src/storage/src/monitor/hitmap.rs @@ -41,42 +41,6 @@ impl Hitmap { N * 8 } - pub fn reset(&self) { - for elem in &*self.data { - elem.store(0, Ordering::Relaxed); - } - } - - pub fn fill(&self, start_bit: usize, end_bit: usize) { - const MASK: usize = (1 << 6) - 1; - - let end_bit = std::cmp::min(end_bit, Self::bits()); - - let head_bits = start_bit & MASK; - let tail_bits_rev = end_bit & MASK; - - let head_elem = start_bit >> 6; - let tail_elem = end_bit >> 6; - - for i in head_elem..=std::cmp::min(tail_elem, N - 1) { - let elem = &self.data[i]; - let mut umask = 0u64; - if i == head_elem { - umask |= (1u64 << head_bits) - 1; - } - if i == tail_elem { - umask |= !((1u64 << tail_bits_rev) - 1); - } - elem.fetch_or(!umask, Ordering::Relaxed); - } - } - - pub fn fill_with_ratio(&self, start: f64, end: f64) { - let start_bit = (Self::bits() as f64 * start) as usize; - let end_bit = (Self::bits() as f64 * end) as usize; - self.fill(start_bit, end_bit) - } - pub fn local(&self) -> LocalHitmap { LocalHitmap::new(self) } @@ -136,12 +100,6 @@ impl LocalHitmap { } } - pub fn reset(&mut self) { - for elem in &mut *self.data { - *elem = 0; - } - } - pub fn fill(&mut self, start_bit: usize, end_bit: usize) { const MASK: usize = (1 << 6) - 1; @@ -166,9 +124,9 @@ impl LocalHitmap { } } - pub fn fill_with_ratio(&mut self, start: f64, end: f64) { - let start_bit = (Self::bits() as f64 * start) as usize; - let end_bit = (Self::bits() as f64 * end) as usize; + pub fn fill_with_range(&mut self, start: usize, end: usize, len: usize) { + let start_bit = Self::bits() * start / len; + let end_bit = Self::bits() * end / len; self.fill(start_bit, end_bit) } @@ -208,7 +166,9 @@ mod tests { #[test] fn test_hitmap() { // hex: high <== low - let h = Hitmap::<4>::default(); + let g = Hitmap::<4>::default(); + + let mut h = g.local(); assert_eq!( h.to_hex_vec(), vec![ @@ -263,6 +223,16 @@ mod tests { ] ); assert_eq!(h.ones(), 256); - h.reset(); + drop(h); + assert_eq!( + g.to_hex_vec(), + vec![ + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + "ffffffffffffffff", + ] + ); + assert_eq!(g.ones(), 256); } }