diff --git a/fuzz/fuzz_targets/fuzz_redb.rs b/fuzz/fuzz_targets/fuzz_redb.rs index da8265e1..fd40a9cd 100644 --- a/fuzz/fuzz_targets/fuzz_redb.rs +++ b/fuzz/fuzz_targets/fuzz_redb.rs @@ -523,7 +523,9 @@ fn exec_table_crash_support(config: &FuzzConfig, apply: fn(WriteTransa } txn.commit().unwrap(); db.begin_write().unwrap().commit().unwrap(); - let baseline_allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages(); + let txn = db.begin_write().unwrap(); + let baseline_allocated_pages = txn.stats().unwrap().allocated_pages(); + txn.abort().unwrap(); countdown.store(old_countdown, Ordering::SeqCst); let txn = db.begin_write().unwrap(); @@ -680,7 +682,9 @@ fn exec_table_crash_support(config: &FuzzConfig, apply: fn(WriteTransa } } - let allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages(); + let txn = db.begin_write().unwrap(); + let allocated_pages = txn.stats().unwrap().allocated_pages(); + txn.abort().unwrap(); assert_eq!(allocated_pages, baseline_allocated_pages, "Found {} allocated pages at shutdown, expected {}", allocated_pages, baseline_allocated_pages); // TODO: enable this assert diff --git a/src/transactions.rs b/src/transactions.rs index b9681782..f761506d 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -24,6 +24,8 @@ use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Display, Formatter}; use std::marker::PhantomData; use std::ops::RangeBounds; +#[cfg(any(test, fuzzing))] +use std::ops::RangeFull; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::{panic, thread}; @@ -506,6 +508,82 @@ impl WriteTransaction { }) } + #[cfg(any(test, fuzzing))] + pub fn print_allocated_page_debug(&self) { + let mut all_allocated: HashSet = + HashSet::from_iter(self.mem.all_allocated_pages()); + + let tracker = self.mem.tracker_page(); + all_allocated.remove(&tracker); + println!("Tracker page"); + println!("{tracker:?}"); + + let table_allocators = self + .tables + .lock() + .unwrap() + .table_tree + .all_referenced_pages() + .unwrap(); + let mut table_pages = vec![]; + for (i, allocator) in table_allocators.iter().enumerate() { + allocator.get_allocated_pages(i.try_into().unwrap(), &mut table_pages); + } + println!("Tables"); + for p in table_pages { + all_allocated.remove(&p); + println!("{p:?}") + } + + let system_table_allocators = self + .system_tables + .lock() + .unwrap() + .table_tree + .all_referenced_pages() + .unwrap(); + let mut system_table_pages = vec![]; + for (i, allocator) in system_table_allocators.iter().enumerate() { + allocator.get_allocated_pages(i.try_into().unwrap(), &mut system_table_pages); + } + println!("System tables"); + for p in system_table_pages { + all_allocated.remove(&p); + println!("{p:?}") + } + + println!("Free table"); + if let Some(freed_iter) = self.freed_tree.lock().unwrap().all_pages_iter().unwrap() { + for p in freed_iter { + let p = p.unwrap(); + all_allocated.remove(&p); + println!("{p:?}") + } + } + println!("Pending free (i.e. in freed table)"); + for entry in self + .freed_tree + .lock() + .unwrap() + .range::(&(..)) + .unwrap() + { + let entry = entry.unwrap(); + let value = entry.value(); + for i in 0..value.len() { + let p = value.get(i); + all_allocated.remove(&p); + println!("{p:?}") + } + } + if !all_allocated.is_empty() { + println!("Leaked pages"); + for p in all_allocated { + println!("{p:?}"); + } + } + } + /// Creates a snapshot of the current database state, which can be used to rollback the database. /// This savepoint will exist until it is deleted with `[delete_savepoint()]`. /// diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 8b7bb192..26c52e3f 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -346,6 +346,7 @@ impl U64GroupedBitmap { U64GroupedBitmapDifference::new(&self.data, &exclusion.data) } + #[allow(dead_code)] pub fn iter(&self) -> U64GroupedBitmapIter { U64GroupedBitmapIter::new(self.len, &self.data) } diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index 3b2adfb9..8c967e29 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -245,6 +245,7 @@ impl BuddyAllocator { } } + #[cfg(any(test, fuzzing))] pub(crate) fn get_allocated_pages(&self, region: u32, output: &mut Vec) { for order in 0..=self.max_order { let allocated = self.get_order_allocated(order); diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index d00c030c..190bf952 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -261,6 +261,16 @@ impl TransactionalMemory { }) } + #[cfg(any(test, fuzzing))] + pub(crate) fn all_allocated_pages(&self) -> Vec { + self.state.lock().unwrap().allocators.all_allocated() + } + + #[cfg(any(test, fuzzing))] + pub(crate) fn tracker_page(&self) -> PageNumber { + self.state.lock().unwrap().header.region_tracker() + } + pub(crate) fn clear_read_cache(&self) { self.storage.invalidate_cache_all() } diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index e746877c..67993bac 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -151,14 +151,13 @@ impl Allocators { } } - // TODO: remove this at some point. It is useful for debugging though. - #[allow(dead_code)] - pub(super) fn print_all_allocated(&self) { + #[cfg(any(test, fuzzing))] + pub(super) fn all_allocated(&self) -> Vec { let mut pages = vec![]; for (i, allocator) in self.region_allocators.iter().enumerate() { allocator.get_allocated_pages(i.try_into().unwrap(), &mut pages); } - println!("Allocated pages: {pages:?}"); + pages } pub(crate) fn xxh3_hash(&self) -> u128 {