From 78825c78209e85e3dbfca07a8f7448eb2b6153e4 Mon Sep 17 00:00:00 2001 From: Christopher Berner <me@cberner.com> Date: Sun, 22 Oct 2023 19:45:43 -0700 Subject: [PATCH 1/5] Rename create_backend() to create_with_backend() --- src/db.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/db.rs b/src/db.rs index d42f4b4f..9dd37cf1 100644 --- a/src/db.rs +++ b/src/db.rs @@ -853,7 +853,10 @@ impl Builder { } /// Open an existing or create a new database with the given backend. - pub fn create_backend(&self, backend: impl StorageBackend) -> Result<Database, DatabaseError> { + pub fn create_with_backend( + &self, + backend: impl StorageBackend, + ) -> Result<Database, DatabaseError> { Database::new( Box::new(backend), self.page_size, From 8cf6cc88360caa7637dcc6058da26a65da355b49 Mon Sep 17 00:00:00 2001 From: Christopher Berner <me@cberner.com> Date: Sun, 22 Oct 2023 19:49:01 -0700 Subject: [PATCH 2/5] Move InMemoryBackend and FileBackend exports into backends module --- src/backends.rs | 2 ++ src/db.rs | 7 ++++--- src/lib.rs | 4 ++-- src/tree_store/page_store/header.rs | 3 ++- 4 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 src/backends.rs diff --git a/src/backends.rs b/src/backends.rs new file mode 100644 index 00000000..bdbf2e57 --- /dev/null +++ b/src/backends.rs @@ -0,0 +1,2 @@ +pub use crate::tree_store::file_backend::FileBackend; +pub use crate::tree_store::InMemoryBackend; diff --git a/src/db.rs b/src/db.rs index 9dd37cf1..a5b7c44d 100644 --- a/src/db.rs +++ b/src/db.rs @@ -25,6 +25,7 @@ use crate::error::TransactionError; use crate::multimap_table::{parse_subtree_roots, DynamicCollection}; use crate::sealed::Sealed; use crate::transactions::SAVEPOINT_TABLE; +use crate::tree_store::file_backend::FileBackend; #[cfg(feature = "logging")] use log::{info, warn}; @@ -814,7 +815,7 @@ impl Builder { .open(path)?; Database::new( - Box::new(crate::FileBackend::new(file)?), + Box::new(FileBackend::new(file)?), self.page_size, self.region_size, self.read_cache_size_bytes, @@ -831,7 +832,7 @@ impl Builder { } Database::new( - Box::new(crate::FileBackend::new(file)?), + Box::new(FileBackend::new(file)?), self.page_size, None, self.read_cache_size_bytes, @@ -844,7 +845,7 @@ impl Builder { /// The file must be empty or contain a valid database. pub fn create_file(&self, file: File) -> Result<Database, DatabaseError> { Database::new( - Box::new(crate::FileBackend::new(file)?), + Box::new(FileBackend::new(file)?), self.page_size, self.region_size, self.read_cache_size_bytes, diff --git a/src/lib.rs b/src/lib.rs index 83d17b52..0e58a2d6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -66,8 +66,7 @@ pub use multimap_table::{ }; pub use table::{Drain, DrainFilter, Range, ReadOnlyTable, ReadableTable, Table}; pub use transactions::{DatabaseStats, Durability, ReadTransaction, WriteTransaction}; -pub use tree_store::file_backend::FileBackend; -pub use tree_store::{AccessGuard, AccessGuardMut, InMemoryBackend, Savepoint}; +pub use tree_store::{AccessGuard, AccessGuardMut, Savepoint}; pub use types::{RedbKey, RedbValue, TypeName}; type Result<T = (), E = StorageError> = std::result::Result<T, E>; @@ -75,6 +74,7 @@ type Result<T = (), E = StorageError> = std::result::Result<T, E>; #[cfg(feature = "python")] pub use crate::python::redb; +pub mod backends; mod complex_types; mod db; mod error; diff --git a/src/tree_store/page_store/header.rs b/src/tree_store/page_store/header.rs index 119d991f..6e76f5b1 100644 --- a/src/tree_store/page_store/header.rs +++ b/src/tree_store/page_store/header.rs @@ -405,6 +405,7 @@ impl TransactionHeader { #[cfg(test)] mod test { + use crate::backends::FileBackend; use crate::db::TableDefinition; use crate::tree_store::page_store::header::{ GOD_BYTE_OFFSET, MAGICNUMBER, PAGE_SIZE, PRIMARY_BIT, RECOVERY_REQUIRED, @@ -413,7 +414,7 @@ mod test { use crate::tree_store::page_store::TransactionalMemory; #[cfg(not(target_os = "windows"))] use crate::StorageError; - use crate::{Database, FileBackend, ReadableTable}; + use crate::{Database, ReadableTable}; use std::fs::OpenOptions; use std::io::{Read, Seek, SeekFrom, Write}; use std::mem::size_of; From dc0b54f1f9168b3db9c62658391aab9398329c1f Mon Sep 17 00:00:00 2001 From: Christopher Berner <me@cberner.com> Date: Sun, 22 Oct 2023 19:50:20 -0700 Subject: [PATCH 3/5] Add test for InMemoryBackend --- tests/basic_tests.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/basic_tests.rs b/tests/basic_tests.rs index 9192b7a4..ff9746cb 100644 --- a/tests/basic_tests.rs +++ b/tests/basic_tests.rs @@ -1,3 +1,4 @@ +use redb::backends::InMemoryBackend; use redb::{ Database, MultimapTableDefinition, MultimapTableHandle, Range, ReadableTable, RedbKey, RedbValue, TableDefinition, TableHandle, TypeName, @@ -36,6 +37,25 @@ fn len() { assert_eq!(table.len().unwrap(), 3); } +#[test] +fn in_memory() { + let db = Database::builder() + .create_with_backend(InMemoryBackend::new()) + .unwrap(); + let write_txn = db.begin_write().unwrap(); + { + let mut table = write_txn.open_table(STR_TABLE).unwrap(); + table.insert("hello", "world").unwrap(); + table.insert("hello2", "world2").unwrap(); + table.insert("hi", "world").unwrap(); + } + write_txn.commit().unwrap(); + + let read_txn = db.begin_read().unwrap(); + let table = read_txn.open_table(STR_TABLE).unwrap(); + assert_eq!(table.len().unwrap(), 3); +} + #[test] fn first_last() { let tmpfile = create_tempfile(); From a7d00e820b2852d24fb2202fb05a6acc8e620f8b Mon Sep 17 00:00:00 2001 From: Christopher Berner <me@cberner.com> Date: Sun, 22 Oct 2023 19:54:44 -0700 Subject: [PATCH 4/5] Replace redb::Result with std::result::Result in StorageBackend trait --- src/db.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/db.rs b/src/db.rs index a5b7c44d..81b00092 100644 --- a/src/db.rs +++ b/src/db.rs @@ -33,19 +33,19 @@ use log::{info, warn}; /// Implements persistent storage for a database. pub trait StorageBackend: 'static + Debug + Send + Sync { /// Gets the current length of the storage. - fn len(&self) -> Result<u64, io::Error>; + fn len(&self) -> std::result::Result<u64, io::Error>; /// Reads the specified array of bytes from the storage. - fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, io::Error>; + fn read(&self, offset: u64, len: usize) -> std::result::Result<Vec<u8>, io::Error>; /// Sets the length of the storage. - fn set_len(&self, len: u64) -> Result<(), io::Error>; + fn set_len(&self, len: u64) -> std::result::Result<(), io::Error>; /// Syncs all buffered data with the persistent storage. - fn sync_data(&self, eventual: bool) -> Result<(), io::Error>; + fn sync_data(&self, eventual: bool) -> std::result::Result<(), io::Error>; /// Writes the specified array to the storage. - fn write(&self, offset: u64, data: &[u8]) -> Result<(), io::Error>; + fn write(&self, offset: u64, data: &[u8]) -> std::result::Result<(), io::Error>; } struct AtomicTransactionId { From d51179ecffb8a181be5ae62a6cc3caad4514319d Mon Sep 17 00:00:00 2001 From: Christopher Berner <me@cberner.com> Date: Mon, 23 Oct 2023 21:14:51 -0700 Subject: [PATCH 5/5] Refactor fuzzer to use a custom StorageBackend This removes a bunch of cfg(fuzzing) statements from the main codebase --- fuzz/fuzz_targets/fuzz_redb.rs | 104 +++++++++++-- src/db.rs | 176 ++++++++++++++-------- src/error.rs | 16 -- src/tree_store/page_store/cached_file.rs | 44 +----- src/tree_store/page_store/page_manager.rs | 5 - 5 files changed, 214 insertions(+), 131 deletions(-) diff --git a/fuzz/fuzz_targets/fuzz_redb.rs b/fuzz/fuzz_targets/fuzz_redb.rs index 304c2bee..4aafd210 100644 --- a/fuzz/fuzz_targets/fuzz_redb.rs +++ b/fuzz/fuzz_targets/fuzz_redb.rs @@ -1,13 +1,16 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use redb::{AccessGuard, Database, Durability, Error, MultimapTable, MultimapTableDefinition, MultimapValue, ReadableMultimapTable, ReadableTable, Savepoint, Table, TableDefinition, WriteTransaction}; +use redb::{AccessGuard, Database, Durability, Error, MultimapTable, MultimapTableDefinition, MultimapValue, ReadableMultimapTable, ReadableTable, Savepoint, StorageBackend, Table, TableDefinition, WriteTransaction}; use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::io::{Read, Seek, SeekFrom}; +use std::fmt::Debug; +use std::io::{ErrorKind, Read, Seek, SeekFrom}; +use std::sync::atomic::{AtomicU64, Ordering}; use tempfile::NamedTempFile; mod common; use common::*; +use redb::backends::FileBackend; use crate::FuzzerSavepoint::{Ephemeral, NotYetDurablePersistent, Persistent}; // These slow down the fuzzer, so don't create too many @@ -16,6 +19,62 @@ const TABLE_DEF: TableDefinition<u64, &[u8]> = TableDefinition::new("fuzz_table" const MULTIMAP_TABLE_DEF: MultimapTableDefinition<u64, &[u8]> = MultimapTableDefinition::new("fuzz_multimap_table"); +#[derive(Debug)] +struct FuzzerBackend { + inner: FileBackend, + countdown: AtomicU64, +} + +impl FuzzerBackend { + fn new(backend: FileBackend, countdown: u64) -> Self { + Self { + inner: backend, + countdown: AtomicU64::new(countdown), + } + } + + fn check_countdown(&self) -> Result<(), std::io::Error> { + if self.countdown.load(Ordering::SeqCst) == 0 { + return Err(std::io::Error::from(ErrorKind::Other)); + } + + Ok(()) + } + + fn decrement_countdown(&self) -> Result<(), std::io::Error> { + if self.countdown.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| if x > 0 { Some(x - 1) } else { None } ).is_err() { + return Err(std::io::Error::from(ErrorKind::Other)); + } + + Ok(()) + } +} + +impl StorageBackend for FuzzerBackend { + fn len(&self) -> Result<u64, std::io::Error> { + self.inner.len() + } + + fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, std::io::Error> { + self.check_countdown()?; + self.inner.read(offset, len) + } + + fn set_len(&self, len: u64) -> Result<(), std::io::Error> { + self.inner.set_len(len) + } + + fn sync_data(&self, _eventual: bool) -> Result<(), std::io::Error> { + // No-op. The fuzzer doesn't test crashes, so fsync is unnecessary + Ok(()) + } + + fn write(&self, offset: u64, data: &[u8]) -> Result<(), std::io::Error> { + self.decrement_countdown()?; + self.inner.write(offset, data) + } +} + enum FuzzerSavepoint<T: Clone> { Ephemeral(Savepoint, BTreeMap<u64, T>), Persistent(u64, BTreeMap<u64, T>), @@ -355,7 +414,7 @@ fn handle_table_op(op: &FuzzOperation, reference: &mut BTreeMap<u64, usize>, tab } drop(reference_iter); reference.retain(|x, _| (*x < start || *x >= end) || *x % modulus != 0); - // This is basically assert!(iter.next().is_none()), but we also allow an Err such as SimulatedIOFailure + // This is basically assert!(iter.next().is_none()), but we also allow an Err such as a simulated IO error if let Some(Ok((_, _))) = iter.next() { panic!(); } @@ -390,16 +449,35 @@ fn handle_table_op(op: &FuzzOperation, reference: &mut BTreeMap<u64, usize>, tab Ok(()) } +fn is_simulated_io_error(err: &redb::Error) -> bool { + match err { + Error::Io(io_err) => { + matches!(io_err.kind(), ErrorKind::Other) + }, + _ => false + } +} + fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, &mut BTreeMap<u64, T>, &FuzzTransaction, &mut SavepointManager<T>) -> Result<(), redb::Error>) -> Result<(), redb::Error> { let mut redb_file: NamedTempFile = NamedTempFile::new().unwrap(); + let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap())?, config.crash_after_ops.value); - let mut db = Database::builder() + let result = Database::builder() .set_page_size(config.page_size.value) .set_cache_size(config.cache_size.value) .set_region_size(config.region_size.value as u64) - .create(redb_file.path()) - .unwrap(); - db.set_crash_countdown(config.crash_after_ops.value); + .create_with_backend(backend); + let mut db = match result { + Ok(db) => db, + Err(err) => { + let err: redb::Error = err.into(); + if is_simulated_io_error(&err) { + return Ok(()); + } else { + return Err(err); + } + } + }; let mut savepoint_manager = SavepointManager::new(); let mut reference = BTreeMap::new(); @@ -414,7 +492,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, } } Err(err) => { - if matches!(err, Error::SimulatedIOFailure) { + if is_simulated_io_error(&err) { drop(db); savepoint_manager.crash(); non_durable_reference = reference.clone(); @@ -426,11 +504,12 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, assert_ne!(god_byte[0] & 2, 0); // Repair the database + let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap()).unwrap(), u64::MAX); db = Database::builder() .set_page_size(config.page_size.value) .set_cache_size(config.cache_size.value) .set_region_size(config.region_size.value as u64) - .create(redb_file.path()) + .create_with_backend(backend) .unwrap(); } else { return Err(err); @@ -440,7 +519,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, let result = apply(&db, &mut non_durable_reference, transaction, &mut savepoint_manager); if result.is_err() { - if matches!(result, Err(Error::SimulatedIOFailure)) { + if is_simulated_io_error(result.as_ref().err().unwrap()) { drop(db); savepoint_manager.crash(); non_durable_reference = reference.clone(); @@ -452,11 +531,12 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, assert_ne!(god_byte[0] & 2, 0); // Repair the database + let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap()).unwrap(), u64::MAX); db = Database::builder() .set_page_size(config.page_size.value) .set_cache_size(config.cache_size.value) .set_region_size(config.region_size.value as u64) - .create(redb_file.path()) + .create_with_backend(backend) .unwrap(); } else { return result; @@ -469,7 +549,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, match run_compaction(&mut db, &mut savepoint_manager) { Ok(_) => {} Err(err) => { - if !matches!(err, Error::SimulatedIOFailure) { + if !is_simulated_io_error(&err) { return Err(err); } } diff --git a/src/db.rs b/src/db.rs index 81b00092..cf8d0b1b 100644 --- a/src/db.rs +++ b/src/db.rs @@ -36,12 +36,20 @@ pub trait StorageBackend: 'static + Debug + Send + Sync { fn len(&self) -> std::result::Result<u64, io::Error>; /// Reads the specified array of bytes from the storage. + /// + /// If `len` + `offset` exceeds the length of the storage an appropriate `Error` should be returned or a panic may occur. fn read(&self, offset: u64, len: usize) -> std::result::Result<Vec<u8>, io::Error>; /// Sets the length of the storage. + /// + /// When extending the storage the new positions should be zero initialized. fn set_len(&self, len: u64) -> std::result::Result<(), io::Error>; /// Syncs all buffered data with the persistent storage. + /// + /// If `eventual` is true, data may become persistent at some point after this call returns, + /// but the storage must gaurantee that a write barrier is inserted: i.e. all writes before this + /// call to `sync_data()` will become persistent before any writes that occur after. fn sync_data(&self, eventual: bool) -> std::result::Result<(), io::Error>; /// Writes the specified array to the storage. @@ -304,11 +312,6 @@ impl Database { &self.mem } - #[cfg(any(fuzzing, test))] - pub fn set_crash_countdown(&self, value: u64) { - self.mem.set_crash_countdown(value); - } - pub(crate) fn verify_primary_checksums(mem: &TransactionalMemory) -> Result<bool> { let fake_freed_pages = Arc::new(Mutex::new(vec![])); let table_tree = TableTree::new(mem.get_data_root(), mem, fake_freed_pages.clone()); @@ -876,10 +879,117 @@ impl std::fmt::Debug for Database { #[cfg(test)] mod test { + use crate::backends::FileBackend; use crate::{ - Database, DatabaseError, Durability, ReadableTable, StorageError, TableDefinition, + Database, DatabaseError, Durability, ReadableTable, StorageBackend, StorageError, + TableDefinition, }; use std::io::ErrorKind; + use std::sync::atomic::{AtomicU64, Ordering}; + + #[derive(Debug)] + struct FailingBackend { + inner: FileBackend, + countdown: AtomicU64, + } + + impl FailingBackend { + fn new(backend: FileBackend, countdown: u64) -> Self { + Self { + inner: backend, + countdown: AtomicU64::new(countdown), + } + } + + fn check_countdown(&self) -> Result<(), std::io::Error> { + if self.countdown.load(Ordering::SeqCst) == 0 { + return Err(std::io::Error::from(ErrorKind::Other)); + } + + Ok(()) + } + + fn decrement_countdown(&self) -> Result<(), std::io::Error> { + if self + .countdown + .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { + if x > 0 { + Some(x - 1) + } else { + None + } + }) + .is_err() + { + return Err(std::io::Error::from(ErrorKind::Other)); + } + + Ok(()) + } + } + + impl StorageBackend for FailingBackend { + fn len(&self) -> Result<u64, std::io::Error> { + self.inner.len() + } + + fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, std::io::Error> { + self.check_countdown()?; + self.inner.read(offset, len) + } + + fn set_len(&self, len: u64) -> Result<(), std::io::Error> { + self.inner.set_len(len) + } + + fn sync_data(&self, eventual: bool) -> Result<(), std::io::Error> { + self.check_countdown()?; + self.inner.sync_data(eventual) + } + + fn write(&self, offset: u64, data: &[u8]) -> Result<(), std::io::Error> { + self.decrement_countdown()?; + self.inner.write(offset, data) + } + } + + #[test] + fn crash_regression4() { + let tmpfile = crate::create_tempfile(); + + let backend = FailingBackend::new( + FileBackend::new(tmpfile.as_file().try_clone().unwrap()).unwrap(), + 23, + ); + let db = Database::builder() + .set_cache_size(12686) + .set_page_size(8 * 1024) + .set_region_size(32 * 4096) + .create_with_backend(backend) + .unwrap(); + + let table_def: TableDefinition<u64, &[u8]> = TableDefinition::new("x"); + + let tx = db.begin_write().unwrap(); + let _savepoint = tx.ephemeral_savepoint().unwrap(); + let _persistent_savepoint = tx.persistent_savepoint().unwrap(); + tx.commit().unwrap(); + let tx = db.begin_write().unwrap(); + { + let mut table = tx.open_table(table_def).unwrap(); + let _ = table.insert_reserve(118821, 360).unwrap(); + } + let result = tx.commit(); + assert!(result.is_err()); + + drop(db); + Database::builder() + .set_cache_size(1024 * 1024) + .set_page_size(8 * 1024) + .set_region_size(32 * 4096) + .create(tmpfile.path()) + .unwrap(); + } #[test] fn small_pages() { @@ -1062,60 +1172,6 @@ mod test { tx.abort().unwrap(); } - #[test] - fn crash_regression3() { - let tmpfile = crate::create_tempfile(); - - let db = Database::builder() - .set_cache_size(1024 * 1024) - .set_page_size(16 * 1024) - .set_region_size(32 * 4096) - .create(tmpfile.path()) - .unwrap(); - - let tx = db.begin_write().unwrap(); - let savepoint = tx.ephemeral_savepoint().unwrap(); - tx.commit().unwrap(); - - let mut tx = db.begin_write().unwrap(); - tx.restore_savepoint(&savepoint).unwrap(); - tx.commit().unwrap(); - } - - #[test] - fn crash_regression4() { - let tmpfile = crate::create_tempfile(); - - let db = Database::builder() - .set_cache_size(12686) - .set_page_size(8 * 1024) - .set_region_size(32 * 4096) - .create(tmpfile.path()) - .unwrap(); - db.set_crash_countdown(10); - - let table_def: TableDefinition<u64, &[u8]> = TableDefinition::new("x"); - - let tx = db.begin_write().unwrap(); - let _savepoint = tx.ephemeral_savepoint().unwrap(); - let _persistent_savepoint = tx.persistent_savepoint().unwrap(); - tx.commit().unwrap(); - let tx = db.begin_write().unwrap(); - { - let mut table = tx.open_table(table_def).unwrap(); - let _ = table.insert_reserve(118821, 360); - } - - drop(tx); - drop(db); - Database::builder() - .set_cache_size(1024 * 1024) - .set_page_size(8 * 1024) - .set_region_size(32 * 4096) - .create(tmpfile.path()) - .unwrap(); - } - #[test] fn dynamic_shrink() { let tmpfile = crate::create_tempfile(); diff --git a/src/error.rs b/src/error.rs index 639daf19..12490a05 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,9 +8,6 @@ use std::{io, panic}; #[derive(Debug)] #[non_exhaustive] pub enum StorageError { - /// For use by fuzzer only - #[cfg(any(fuzzing, test))] - SimulatedIOFailure, /// The Database is corrupted Corrupted(String), /// The value being inserted exceeds the maximum of 3GiB @@ -34,8 +31,6 @@ impl From<io::Error> for StorageError { impl From<StorageError> for Error { fn from(err: StorageError) -> Error { match err { - #[cfg(any(fuzzing, test))] - StorageError::SimulatedIOFailure => Error::SimulatedIOFailure, StorageError::Corrupted(msg) => Error::Corrupted(msg), StorageError::ValueTooLarge(x) => Error::ValueTooLarge(x), StorageError::Io(x) => Error::Io(x), @@ -47,10 +42,6 @@ impl From<StorageError> for Error { impl Display for StorageError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - #[cfg(any(fuzzing, test))] - StorageError::SimulatedIOFailure => { - write!(f, "Fuzzer: Simulated I/O error") - } StorageError::Corrupted(msg) => { write!(f, "DB corrupted: {msg}") } @@ -415,9 +406,6 @@ impl std::error::Error for CommitError {} #[derive(Debug)] #[non_exhaustive] pub enum Error { - /// For use by fuzzer only - #[cfg(any(fuzzing, test))] - SimulatedIOFailure, /// The Database is already open. Cannot acquire lock. DatabaseAlreadyOpen, /// This savepoint is invalid or cannot be created. @@ -474,10 +462,6 @@ impl From<io::Error> for Error { impl Display for Error { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - #[cfg(any(fuzzing, test))] - Error::SimulatedIOFailure => { - write!(f, "Fuzzer: Simulated I/O error") - } Error::Corrupted(msg) => { write!(f, "DB corrupted: {msg}") } diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index 1a950baf..749fb1f6 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -6,7 +6,7 @@ use std::io; use std::mem; use std::ops::{Index, IndexMut}; use std::slice::SliceIndex; -#[cfg(any(fuzzing, test, feature = "cache_metrics"))] +#[cfg(feature = "cache_metrics")] use std::sync::atomic::AtomicU64; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, RwLock}; @@ -235,8 +235,6 @@ pub(super) struct PagedCachedFile { read_cache: Vec<RwLock<PrioritizedCache>>, // TODO: maybe move this cache to WriteTransaction? write_buffer: Mutex<PrioritizedWriteCache>, - #[cfg(any(fuzzing, test))] - crash_countdown: AtomicU64, } impl PagedCachedFile { @@ -265,8 +263,6 @@ impl PagedCachedFile { fsync_failed: Default::default(), read_cache, write_buffer: Mutex::new(PrioritizedWriteCache::new()), - #[cfg(any(fuzzing, test))] - crash_countdown: AtomicU64::new(u64::MAX), }) } @@ -274,11 +270,6 @@ impl PagedCachedFile { self.file.len().map_err(StorageError::from) } - #[cfg(any(fuzzing, test))] - pub(crate) fn set_crash_countdown(&self, value: u64) { - self.crash_countdown.store(value, Ordering::Release); - } - const fn lock_stripes() -> usize { 131 } @@ -293,18 +284,11 @@ impl PagedCachedFile { } #[inline] - #[cfg(not(fuzzing))] fn set_fsync_failed(&self, failed: bool) { self.fsync_failed.store(failed, Ordering::Release); } fn flush_write_buffer(&self) -> Result { - #[cfg(any(fuzzing, test))] - { - if self.crash_countdown.load(Ordering::Acquire) == 0 { - return Err(StorageError::SimulatedIOFailure); - } - } self.check_fsync_failure()?; let mut write_buffer = self.write_buffer.lock().unwrap(); @@ -331,14 +315,11 @@ impl PagedCachedFile { pub(super) fn flush(&self, #[allow(unused_variables)] eventual: bool) -> Result { self.check_fsync_failure()?; self.flush_write_buffer()?; - // Disable fsync when fuzzing, since it doesn't test crash consistency - #[cfg(not(fuzzing))] - { - let res = self.file.sync_data(eventual).map_err(StorageError::from); - if res.is_err() { - self.set_fsync_failed(true); - return res; - } + + let res = self.file.sync_data(eventual).map_err(StorageError::from); + if res.is_err() { + self.set_fsync_failed(true); + return res; } Ok(()) @@ -351,12 +332,6 @@ impl PagedCachedFile { // Read directly from the file, ignoring any cached data pub(super) fn read_direct(&self, offset: u64, len: usize) -> Result<Vec<u8>> { - #[cfg(any(fuzzing, test))] - { - if self.crash_countdown.load(Ordering::Acquire) == 0 { - return Err(StorageError::SimulatedIOFailure); - } - } self.check_fsync_failure()?; Ok(self.file.read(offset, len)?) } @@ -486,13 +461,6 @@ impl PagedCachedFile { } else { let previous = self.write_buffer_bytes.fetch_add(len, Ordering::AcqRel); if previous + len > self.max_write_buffer_bytes { - #[cfg(any(fuzzing, test))] - { - if self.crash_countdown.load(Ordering::Acquire) == 0 { - return Err(StorageError::SimulatedIOFailure); - } - self.crash_countdown.fetch_sub(1, Ordering::AcqRel); - } let mut removed_bytes = 0; while removed_bytes < len { if let Some((offset, buffer, removed_priority)) = lock.pop_lowest_priority() { diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index 126501ce..2254897e 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -272,11 +272,6 @@ impl TransactionalMemory { }) } - #[cfg(any(fuzzing, test))] - pub(crate) fn set_crash_countdown(&self, value: u64) { - self.storage.set_crash_countdown(value); - } - pub(crate) fn clear_read_cache(&mut self) { self.storage.invalidate_cache_all() }