Skip to content

Commit

Permalink
Add some useful debug code to fuzzer
Browse files Browse the repository at this point in the history
  • Loading branch information
cberner committed Aug 17, 2024
1 parent e9ddf8f commit 2bb0084
Show file tree
Hide file tree
Showing 6 changed files with 99 additions and 6 deletions.
8 changes: 6 additions & 2 deletions fuzz/fuzz_targets/fuzz_redb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,9 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(WriteTransa
}
txn.commit().unwrap();
db.begin_write().unwrap().commit().unwrap();
let baseline_allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages();
let txn = db.begin_write().unwrap();
let baseline_allocated_pages = txn.stats().unwrap().allocated_pages();
txn.abort().unwrap();
countdown.store(old_countdown, Ordering::SeqCst);

let txn = db.begin_write().unwrap();
Expand Down Expand Up @@ -680,7 +682,9 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(WriteTransa
}
}

let allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages();
let txn = db.begin_write().unwrap();
let allocated_pages = txn.stats().unwrap().allocated_pages();
txn.abort().unwrap();
assert_eq!(allocated_pages, baseline_allocated_pages, "Found {} allocated pages at shutdown, expected {}", allocated_pages, baseline_allocated_pages);

// TODO: enable this assert
Expand Down
78 changes: 78 additions & 0 deletions src/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ use std::collections::{HashMap, HashSet};
use std::fmt::{Debug, Display, Formatter};
use std::marker::PhantomData;
use std::ops::RangeBounds;
#[cfg(any(test, fuzzing))]
use std::ops::RangeFull;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::{panic, thread};
Expand Down Expand Up @@ -506,6 +508,82 @@ impl WriteTransaction {
})
}

#[cfg(any(test, fuzzing))]
pub fn print_allocated_page_debug(&self) {
let mut all_allocated: HashSet<PageNumber> =
HashSet::from_iter(self.mem.all_allocated_pages());

let tracker = self.mem.tracker_page();
all_allocated.remove(&tracker);
println!("Tracker page");
println!("{tracker:?}");

let table_allocators = self
.tables
.lock()
.unwrap()
.table_tree
.all_referenced_pages()
.unwrap();
let mut table_pages = vec![];
for (i, allocator) in table_allocators.iter().enumerate() {
allocator.get_allocated_pages(i.try_into().unwrap(), &mut table_pages);
}
println!("Tables");
for p in table_pages {
all_allocated.remove(&p);
println!("{p:?}")
}

let system_table_allocators = self
.system_tables
.lock()
.unwrap()
.table_tree
.all_referenced_pages()
.unwrap();
let mut system_table_pages = vec![];
for (i, allocator) in system_table_allocators.iter().enumerate() {
allocator.get_allocated_pages(i.try_into().unwrap(), &mut system_table_pages);
}
println!("System tables");
for p in system_table_pages {
all_allocated.remove(&p);
println!("{p:?}")
}

println!("Free table");
if let Some(freed_iter) = self.freed_tree.lock().unwrap().all_pages_iter().unwrap() {
for p in freed_iter {
let p = p.unwrap();
all_allocated.remove(&p);
println!("{p:?}")
}
}
println!("Pending free (i.e. in freed table)");
for entry in self
.freed_tree
.lock()
.unwrap()
.range::<RangeFull, FreedTableKey>(&(..))
.unwrap()
{
let entry = entry.unwrap();
let value = entry.value();
for i in 0..value.len() {
let p = value.get(i);
all_allocated.remove(&p);
println!("{p:?}")
}
}
if !all_allocated.is_empty() {
println!("Leaked pages");
for p in all_allocated {
println!("{p:?}");
}
}
}

/// Creates a snapshot of the current database state, which can be used to rollback the database.
/// This savepoint will exist until it is deleted with `[delete_savepoint()]`.
///
Expand Down
1 change: 1 addition & 0 deletions src/tree_store/page_store/bitmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,7 @@ impl U64GroupedBitmap {
U64GroupedBitmapDifference::new(&self.data, &exclusion.data)
}

#[allow(dead_code)]
pub fn iter(&self) -> U64GroupedBitmapIter {
U64GroupedBitmapIter::new(self.len, &self.data)
}
Expand Down
1 change: 1 addition & 0 deletions src/tree_store/page_store/buddy_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ impl BuddyAllocator {
}
}

#[cfg(any(test, fuzzing))]
pub(crate) fn get_allocated_pages(&self, region: u32, output: &mut Vec<PageNumber>) {
for order in 0..=self.max_order {
let allocated = self.get_order_allocated(order);
Expand Down
10 changes: 10 additions & 0 deletions src/tree_store/page_store/page_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,16 @@ impl TransactionalMemory {
})
}

#[cfg(any(test, fuzzing))]
pub(crate) fn all_allocated_pages(&self) -> Vec<PageNumber> {
self.state.lock().unwrap().allocators.all_allocated()
}

#[cfg(any(test, fuzzing))]
pub(crate) fn tracker_page(&self) -> PageNumber {
self.state.lock().unwrap().header.region_tracker()
}

pub(crate) fn clear_read_cache(&self) {
self.storage.invalidate_cache_all()
}
Expand Down
7 changes: 3 additions & 4 deletions src/tree_store/page_store/region.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,14 +151,13 @@ impl Allocators {
}
}

// TODO: remove this at some point. It is useful for debugging though.
#[allow(dead_code)]
pub(super) fn print_all_allocated(&self) {
#[cfg(any(test, fuzzing))]
pub(super) fn all_allocated(&self) -> Vec<PageNumber> {
let mut pages = vec![];
for (i, allocator) in self.region_allocators.iter().enumerate() {
allocator.get_allocated_pages(i.try_into().unwrap(), &mut pages);
}
println!("Allocated pages: {pages:?}");
pages
}

pub(crate) fn xxh3_hash(&self) -> u128 {
Expand Down

0 comments on commit 2bb0084

Please sign in to comment.