From 6626d76f87ef87815fcc75c0a42a9f1e2ebf9e16 Mon Sep 17 00:00:00 2001 From: Adam Reichold Date: Sat, 4 May 2024 10:40:57 +0200 Subject: [PATCH] Avoid double-indirection of Arc> by using Arc<[u8]> directly --- src/tree_store/btree_base.rs | 6 +-- src/tree_store/btree_mutator.rs | 10 ++-- src/tree_store/page_store/base.rs | 4 +- src/tree_store/page_store/cached_file.rs | 66 +++++++++++------------- 4 files changed, 40 insertions(+), 46 deletions(-) diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index d6eae791..65a6564c 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -123,7 +123,7 @@ enum EitherPage { Immutable(PageImpl), Mutable(PageMut), OwnedMemory(Vec), - ArcMemory(Arc>), + ArcMemory(Arc<[u8]>), } impl EitherPage { @@ -132,7 +132,7 @@ impl EitherPage { EitherPage::Immutable(page) => page.memory(), EitherPage::Mutable(page) => page.memory(), EitherPage::OwnedMemory(mem) => mem.as_slice(), - EitherPage::ArcMemory(mem) => mem.as_slice(), + EitherPage::ArcMemory(mem) => mem, } } } @@ -159,7 +159,7 @@ impl<'a, V: Value + 'static> AccessGuard<'a, V> { } } - pub(crate) fn with_arc_page(page: Arc>, range: Range) -> Self { + pub(crate) fn with_arc_page(page: Arc<[u8]>, range: Range) -> Self { Self { page: EitherPage::ArcMemory(page), offset: range.start, diff --git a/src/tree_store/btree_mutator.rs b/src/tree_store/btree_mutator.rs index c457b031..4cf14ace 100644 --- a/src/tree_store/btree_mutator.rs +++ b/src/tree_store/btree_mutator.rs @@ -24,7 +24,7 @@ enum DeletionResult { DeletedLeaf, // A leaf with fewer entries than desired PartialLeaf { - page: Arc>, + page: Arc<[u8]>, deleted_pair: usize, }, // A branch page subtree with fewer children than desired @@ -316,7 +316,7 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let existing_value = if found { let (start, end) = accessor.value_range(position).unwrap(); if self.modify_uncommitted && self.mem.uncommitted(page_number) { - let arc = page.to_arc_vec(); + let arc = page.to_arc(); drop(page); self.mem.free(page_number); Some(AccessGuard::with_arc_page(arc, start..end)) @@ -350,7 +350,7 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let existing_value = if found { let (start, end) = accessor.value_range(position).unwrap(); if self.modify_uncommitted && self.mem.uncommitted(page_number) { - let arc = page.to_arc_vec(); + let arc = page.to_arc(); drop(page); self.mem.free(page_number); Some(AccessGuard::with_arc_page(arc, start..end)) @@ -546,7 +546,7 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { // Merge when less than 33% full. Splits occur when a page is full and produce two 50% // full pages, so we use 33% instead of 50% to avoid oscillating PartialLeaf { - page: page.to_arc_vec(), + page: page.to_arc(), deleted_pair: position, } } else { @@ -570,7 +570,7 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { drop(accessor); let guard = if uncommitted && self.modify_uncommitted { let page_number = page.get_page_number(); - let arc = page.to_arc_vec(); + let arc = page.to_arc(); drop(page); self.mem.free(page_number); Some(AccessGuard::with_arc_page(arc, start..end)) diff --git a/src/tree_store/page_store/base.rs b/src/tree_store/page_store/base.rs index 0a245881..bcef6944 100644 --- a/src/tree_store/page_store/base.rs +++ b/src/tree_store/page_store/base.rs @@ -149,14 +149,14 @@ pub(crate) trait Page { } pub struct PageImpl { - pub(super) mem: Arc>, + pub(super) mem: Arc<[u8]>, pub(super) page_number: PageNumber, #[cfg(debug_assertions)] pub(super) open_pages: Arc>>, } impl PageImpl { - pub(crate) fn to_arc_vec(&self) -> Arc> { + pub(crate) fn to_arc(&self) -> Arc<[u8]> { self.mem.clone() } } diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index afd0348c..747609ba 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -3,7 +3,6 @@ use crate::tree_store::LEAF; use crate::{DatabaseError, Result, StorageBackend, StorageError}; use std::collections::BTreeMap; use std::io; -use std::mem; use std::ops::{Index, IndexMut}; use std::slice::SliceIndex; #[cfg(feature = "cache_metrics")] @@ -31,27 +30,27 @@ impl CachePriority { pub(super) struct WritablePage { buffer: Arc>, offset: u64, - data: Vec, + data: Option>, priority: CachePriority, } impl WritablePage { pub(super) fn mem(&self) -> &[u8] { - &self.data + self.data.as_ref().unwrap() } pub(super) fn mem_mut(&mut self) -> &mut [u8] { - &mut self.data + Arc::get_mut(self.data.as_mut().unwrap()).unwrap() } } impl Drop for WritablePage { fn drop(&mut self) { - let data = mem::take(&mut self.data); + let data = self.data.take().unwrap(); self.buffer .lock() .unwrap() - .return_value(&self.offset, Arc::new(data), self.priority); + .return_value(&self.offset, data, self.priority); } } @@ -59,20 +58,20 @@ impl> Index for WritablePage { type Output = I::Output; fn index(&self, index: I) -> &Self::Output { - self.data.index(index) + self.mem().index(index) } } impl> IndexMut for WritablePage { fn index_mut(&mut self, index: I) -> &mut Self::Output { - self.data.index_mut(index) + self.mem_mut().index_mut(index) } } #[derive(Default)] struct PrioritizedCache { - cache: BTreeMap>>, - low_pri_cache: BTreeMap>>, + cache: BTreeMap>, + low_pri_cache: BTreeMap>, } impl PrioritizedCache { @@ -83,12 +82,7 @@ impl PrioritizedCache { } } - fn insert( - &mut self, - key: u64, - value: Arc>, - priority: CachePriority, - ) -> Option>> { + fn insert(&mut self, key: u64, value: Arc<[u8]>, priority: CachePriority) -> Option> { if matches!(priority, CachePriority::Low) { debug_assert!(!self.cache.contains_key(&key)); self.low_pri_cache.insert(key, value) @@ -98,7 +92,7 @@ impl PrioritizedCache { } } - fn remove(&mut self, key: &u64) -> Option>> { + fn remove(&mut self, key: &u64) -> Option> { let result = self.cache.remove(key); if result.is_some() { return result; @@ -106,7 +100,7 @@ impl PrioritizedCache { self.low_pri_cache.remove(key) } - fn get(&self, key: &u64) -> Option<&Arc>> { + fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { let result = self.cache.get(key); if result.is_some() { return result; @@ -114,7 +108,7 @@ impl PrioritizedCache { self.low_pri_cache.get(key) } - fn pop_lowest_priority(&mut self) -> Option<(u64, Arc>)> { + fn pop_lowest_priority(&mut self) -> Option<(u64, Arc<[u8]>)> { let result = self.low_pri_cache.pop_first(); if result.is_some() { return result; @@ -125,8 +119,8 @@ impl PrioritizedCache { #[derive(Default)] struct PrioritizedWriteCache { - cache: BTreeMap>>>, - low_pri_cache: BTreeMap>>>, + cache: BTreeMap>>, + low_pri_cache: BTreeMap>>, } impl PrioritizedWriteCache { @@ -137,7 +131,7 @@ impl PrioritizedWriteCache { } } - fn insert(&mut self, key: u64, value: Arc>, priority: CachePriority) { + fn insert(&mut self, key: u64, value: Arc<[u8]>, priority: CachePriority) { if matches!(priority, CachePriority::Low) { assert!(self.low_pri_cache.insert(key, Some(value)).is_none()); debug_assert!(!self.cache.contains_key(&key)); @@ -147,7 +141,7 @@ impl PrioritizedWriteCache { } } - fn get(&self, key: &u64) -> Option<&Arc>> { + fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { let result = self.cache.get(key); if result.is_some() { return result.map(|x| x.as_ref().unwrap()); @@ -155,7 +149,7 @@ impl PrioritizedWriteCache { self.low_pri_cache.get(key).map(|x| x.as_ref().unwrap()) } - fn remove(&mut self, key: &u64) -> Option>> { + fn remove(&mut self, key: &u64) -> Option> { if let Some(value) = self.cache.remove(key) { assert!(value.is_some()); return value; @@ -167,7 +161,7 @@ impl PrioritizedWriteCache { None } - fn return_value(&mut self, key: &u64, value: Arc>, priority: CachePriority) { + fn return_value(&mut self, key: &u64, value: Arc<[u8]>, priority: CachePriority) { if matches!(priority, CachePriority::Low) { assert!(self .low_pri_cache @@ -180,7 +174,7 @@ impl PrioritizedWriteCache { } } - fn take_value(&mut self, key: &u64) -> Option>> { + fn take_value(&mut self, key: &u64) -> Option> { if let Some(value) = self.cache.get_mut(key) { let result = value.take().unwrap(); return Some(result); @@ -192,7 +186,7 @@ impl PrioritizedWriteCache { None } - fn pop_lowest_priority(&mut self) -> Option<(u64, Arc>, CachePriority)> { + fn pop_lowest_priority(&mut self) -> Option<(u64, Arc<[u8]>, CachePriority)> { for (k, v) in self.low_pri_cache.range(..) { if v.is_some() { let key = *k; @@ -344,7 +338,7 @@ impl PagedCachedFile { len: usize, hint: PageHint, cache_policy: impl Fn(&[u8]) -> CachePriority, - ) -> Result>> { + ) -> Result> { self.check_fsync_failure()?; debug_assert_eq!(0, offset % self.page_size); #[cfg(feature = "cache_metrics")] @@ -371,7 +365,7 @@ impl PagedCachedFile { } } - let buffer = Arc::new(self.read_direct(offset, len)?); + let buffer: Arc<[u8]> = self.read_direct(offset, len)?.into(); let cache_size = self.read_cache_bytes.fetch_add(len, Ordering::AcqRel); let mut write_lock = self.read_cache[cache_slot].write().unwrap(); write_lock.insert(offset, buffer.clone(), cache_policy(&buffer)); @@ -450,14 +444,14 @@ impl PagedCachedFile { ); self.read_cache_bytes .fetch_sub(removed.len(), Ordering::AcqRel); - Some(Arc::try_unwrap(removed).unwrap()) + Some(removed) } else { None } }; let data = if let Some(removed) = lock.take_value(&offset) { - Arc::try_unwrap(removed).unwrap() + removed } else { let previous = self.write_buffer_bytes.fetch_add(len, Ordering::AcqRel); if previous + len > self.max_write_buffer_bytes { @@ -481,19 +475,19 @@ impl PagedCachedFile { let result = if let Some(data) = existing { data } else if overwrite { - vec![0; len] + vec![0; len].into() } else { - self.read_direct(offset, len)? + self.read_direct(offset, len)?.into() }; let priority = cache_policy(&result); - lock.insert(offset, Arc::new(result), priority); - Arc::try_unwrap(lock.take_value(&offset).unwrap()).unwrap() + lock.insert(offset, result, priority); + lock.take_value(&offset).unwrap() }; let priority = cache_policy(&data); Ok(WritablePage { buffer: self.write_buffer.clone(), offset, - data, + data: Some(data), priority, }) }