Skip to content

Commit

Permalink
refactor(dbs): Fix unreachable_pub lints
Browse files Browse the repository at this point in the history
This also affected a single method in `node` in order to prevent
'private type in public interface' errors.
  • Loading branch information
Chris Connelly authored and oetyng committed Jul 6, 2021
1 parent feb2a1b commit 712810f
Show file tree
Hide file tree
Showing 9 changed files with 49 additions and 39 deletions.
4 changes: 2 additions & 2 deletions src/dbs/data_store/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ use super::ToDbKey;
use crate::types::DataAddress;
use serde::{de::DeserializeOwned, Serialize};

pub trait Data: Serialize + DeserializeOwned {
pub(crate) trait Data: Serialize + DeserializeOwned {
type Id: DataId;
fn id(&self) -> &Self::Id;
}

pub trait DataId: ToDbKey + PartialEq + Eq + DeserializeOwned {
pub(crate) trait DataId: ToDbKey + PartialEq + Eq + DeserializeOwned {
fn to_data_address(&self) -> DataAddress;
}
28 changes: 14 additions & 14 deletions src/dbs/data_store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@

//! A simple, persistent, disk-based key-value store.
mod data;
pub(super) mod data;
#[cfg(test)]
mod tests;
mod to_db_key;
mod used_space;
pub(super) mod to_db_key;
pub(super) mod used_space;

pub use to_db_key::ToDbKey;
pub use used_space::UsedSpace;
use to_db_key::ToDbKey;
use used_space::UsedSpace;

use super::{encoding::serialise, Error, Result}; // TODO: FIX
pub use data::{Data, DataId};
use data::{Data, DataId};
use sled::Db;
use std::{marker::PhantomData, path::Path};
use tracing::info;
Expand Down Expand Up @@ -46,7 +46,7 @@ where
///
/// The maximum storage space is defined by `max_capacity`. This specifies the max usable by
/// _all_ `DataStores`, not per `DataStore`.
pub async fn new<P: AsRef<Path>>(root: P, used_space: UsedSpace) -> Result<Self> {
pub(crate) async fn new<P: AsRef<Path>>(root: P, used_space: UsedSpace) -> Result<Self> {
let dir = root.as_ref().join(DB_DIR).join(Self::subdir());

used_space.add_dir(&dir);
Expand All @@ -63,12 +63,12 @@ where

impl<T: Data> DataStore<T> {
///
pub async fn total_used_space(&self) -> u64 {
pub(crate) async fn total_used_space(&self) -> u64 {
self.used_space.total().await
}

/// Tests if a data chunk has been previously stored under `id`.
pub async fn has(&self, id: &T::Id) -> Result<bool> {
pub(crate) async fn has(&self, id: &T::Id) -> Result<bool> {
let key = id.to_db_key()?;
self.sled.contains_key(key).map_err(Error::from)
}
Expand All @@ -77,7 +77,7 @@ impl<T: Data> DataStore<T> {
///
/// If the data doesn't exist, it does nothing and returns `Ok`. In the case of an IO error, it
/// returns `Error::Io`.
pub async fn delete(&self, id: &T::Id) -> Result<()> {
pub(crate) async fn delete(&self, id: &T::Id) -> Result<()> {
let key = id.to_db_key()?;
self.sled.remove(key).map_err(Error::from).map(|_| ())
}
Expand All @@ -88,7 +88,7 @@ impl<T: Data> DataStore<T> {
/// an IO error, it returns `Error::Io`.
///
/// If a chunk with the same id already exists, it will be overwritten.
pub async fn put(&self, chunk: &T) -> Result<()> {
pub(crate) async fn put(&self, chunk: &T) -> Result<()> {
info!("Writing chunk");

let serialised_chunk = serialise(chunk)?.to_vec();
Expand Down Expand Up @@ -116,7 +116,7 @@ impl<T: Data> DataStore<T> {
/// Returns a data chunk previously stored under `id`.
///
/// If the data file can't be accessed, it returns `Error::NoSuchData`.
pub async fn get(&self, id: &T::Id) -> Result<T> {
pub(crate) async fn get(&self, id: &T::Id) -> Result<T> {
let key = id.to_db_key()?;
let res = self
.sled
Expand All @@ -135,7 +135,7 @@ impl<T: Data> DataStore<T> {
}

/// Used space to max capacity ratio.
pub async fn used_space_ratio(&self) -> f64 {
pub(crate) async fn used_space_ratio(&self) -> f64 {
let used = self.total_used_space().await;
let max_capacity = self.used_space.max_capacity();
let used_space_ratio = used as f64 / max_capacity as f64;
Expand All @@ -147,7 +147,7 @@ impl<T: Data> DataStore<T> {

/// Lists all keys of currently stored data.
#[cfg_attr(not(test), allow(unused))]
pub async fn keys(&self) -> Result<Vec<T::Id>> {
pub(crate) async fn keys(&self) -> Result<Vec<T::Id>> {
let keys = self
.sled
.iter()
Expand Down
4 changes: 2 additions & 2 deletions src/dbs/data_store/to_db_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::types::{
use serde::{de::DeserializeOwned, Serialize};
use xor_name::XorName;

pub trait ToDbKey: Serialize {
pub(crate) trait ToDbKey: Serialize {
/// The encoded string representation of an identifier, used as a key in the context of a
/// PickleDB <key,value> store.
fn to_db_key(&self) -> Result<String> {
Expand All @@ -22,7 +22,7 @@ pub trait ToDbKey: Serialize {
}
}

pub fn from_db_key<T: DeserializeOwned>(key: &str) -> Result<T> {
pub(crate) fn from_db_key<T: DeserializeOwned>(key: &str) -> Result<T> {
let decoded = hex::decode(key).map_err(|e| Error::InvalidOperation(e.to_string()))?;
deserialise(&decoded)
}
Expand Down
14 changes: 7 additions & 7 deletions src/dbs/data_store/used_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,40 +15,40 @@ use std::sync::Arc;
use tokio::fs;

#[derive(Clone, Debug)]
pub struct UsedSpace {
pub(crate) struct UsedSpace {
/// the maximum (inclusive) allocated space for storage
max_capacity: u64,
dirs: Arc<DashSet<PathBuf>>,
}

impl UsedSpace {
pub fn new(max_capacity: u64) -> Self {
pub(crate) fn new(max_capacity: u64) -> Self {
Self {
max_capacity,
dirs: Arc::new(DashSet::new()),
}
}

pub fn add_dir(&self, dir: &Path) {
pub(crate) fn add_dir(&self, dir: &Path) {
let _ = self.dirs.insert(dir.to_path_buf());
}

// pub fn remove_dir(&self, dir: &Path) {
// pub(crate) fn remove_dir(&self, dir: &Path) {
// let _ = self.dirs.remove(&dir.to_path_buf());
// }

pub fn max_capacity(&self) -> u64 {
pub(crate) fn max_capacity(&self) -> u64 {
self.max_capacity
}

pub async fn can_consume(&self, space: u64) -> bool {
pub(crate) async fn can_consume(&self, space: u64) -> bool {
self.total()
.await
.checked_add(space)
.map_or(false, |new_total| self.max_capacity >= new_total)
}

pub async fn total(&self) -> u64 {
pub(crate) async fn total(&self) -> u64 {
// todo: handle the error
let handles = self
.dirs
Expand Down
2 changes: 1 addition & 1 deletion src/dbs/encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use super::{Error, Result};
use serde::{Deserialize, Serialize};

/// Wrapper for raw bincode::serialise.
pub fn serialise<T: Serialize>(data: &T) -> Result<Vec<u8>> {
pub(super) fn serialise<T: Serialize>(data: &T) -> Result<Vec<u8>> {
bincode::serialize(data).map_err(|err| Error::Serialize(err.as_ref().to_string()))
}

Expand Down
2 changes: 1 addition & 1 deletion src/dbs/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::io;
use thiserror::Error;

/// Specialisation of `std::Result` for dbs.
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub(super) type Result<T, E = Error> = std::result::Result<T, E>;

#[allow(clippy::large_enum_variant)]
#[derive(Error, Debug)]
Expand Down
14 changes: 7 additions & 7 deletions src/dbs/event_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,18 @@ use xor_name::XorName;
const DB_EXTENSION: &str = ".db";

/// Disk storage for transfers.
pub struct EventStore<TEvent: Debug + Serialize + DeserializeOwned> {
pub(crate) struct EventStore<TEvent: Debug + Serialize + DeserializeOwned> {
db: PickleDb,
db_path: PathBuf,
_phantom: PhantomData<TEvent>,
}

pub struct DeletableStore {
pub(crate) struct DeletableStore {
db_path: PathBuf,
}

impl DeletableStore {
pub async fn delete(&self) -> Result<()> {
pub(crate) async fn delete(&self) -> Result<()> {
fs::remove_file(self.db_path.as_path())
.await
.map_err(Error::Io)
Expand All @@ -43,7 +43,7 @@ impl<'a, TEvent: Debug + Serialize + DeserializeOwned> EventStore<TEvent>
where
TEvent: 'a,
{
pub async fn new(id: XorName, db_dir: &Path) -> Result<Self> {
pub(crate) async fn new(id: XorName, db_dir: &Path) -> Result<Self> {
let db_name = format!("{}{}", id.to_db_key()?, DB_EXTENSION);
let db_path = db_dir.join(db_name.clone());
Ok(Self {
Expand All @@ -53,14 +53,14 @@ where
})
}

pub fn as_deletable(&self) -> DeletableStore {
pub(crate) fn as_deletable(&self) -> DeletableStore {
DeletableStore {
db_path: self.db_path.clone(),
}
}

///
pub fn get_all(&self) -> Vec<TEvent> {
pub(crate) fn get_all(&self) -> Vec<TEvent> {
let keys = self.db.get_all();

let mut events: Vec<(usize, TEvent)> = keys
Expand All @@ -86,7 +86,7 @@ where
}

///
pub fn append(&mut self, event: TEvent) -> Result<()> {
pub(crate) fn append(&mut self, event: TEvent) -> Result<()> {
let key = &self.db.total_keys().to_string();
if self.db.exists(key) {
return Err(Error::InvalidOperation(format!(
Expand Down
14 changes: 10 additions & 4 deletions src/dbs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,20 @@ mod encoding;
mod errors;
mod event_store;

pub use data_store::*;
pub use errors::{Error, Result};
pub use event_store::EventStore;
use data_store::to_db_key::ToDbKey;
pub(crate) use data_store::{
data::{Data, DataId},
used_space::UsedSpace,
DataStore, Subdir,
};
pub(crate) use errors::Error;
use errors::Result;
pub(crate) use event_store::EventStore;
use pickledb::{PickleDb, PickleDbDumpPolicy};
use std::path::Path;
use tokio::fs;
///
pub async fn new_auto_dump_db<D: AsRef<Path>, N: AsRef<Path>>(
pub(crate) async fn new_auto_dump_db<D: AsRef<Path>, N: AsRef<Path>>(
db_dir: D,
db_name: N,
) -> Result<PickleDb> {
Expand Down
6 changes: 5 additions & 1 deletion src/node/metadata/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,11 @@ pub struct Metadata {
}

impl Metadata {
pub async fn new(path: &Path, used_space: UsedSpace, capacity: Capacity) -> Result<Self> {
pub(crate) async fn new(
path: &Path,
used_space: UsedSpace,
capacity: Capacity,
) -> Result<Self> {
let chunk_records = ChunkRecords::new(capacity);
let map_storage = MapStorage::new(path, used_space.max_capacity()); // to be removed so we don't care to implement this
let sequence_storage = SequenceStorage::new(path, used_space.max_capacity()); // to be removed so we don't care to implement this
Expand Down

0 comments on commit 712810f

Please sign in to comment.