diff --git a/Cargo.toml b/Cargo.toml index 19099b9..6ad5919 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,8 @@ chacha20poly1305 = { version = "0.10", default-features = false, features = ["he serde = { version = "1.0.160", default-features = false, features = ["derive"] } rand_core = { version = "0.6.4", default-features = false } delog = "0.1.6" +littlefs2 = "0.4.0" +serde-byte-array = "0.1.2" [dev-dependencies] trussed = { version = "0.1.0", default-features = false, features = ["serde-extensions", "virt"] } @@ -24,6 +26,8 @@ trussed = { version = "0.1.0", default-features = false, features = ["serde-exte default = [] wrap-key-to-file = ["chacha20poly1305"] +chunked = [] +encrypted-chunked = ["chunked", "chacha20poly1305/stream"] virt = ["std", "trussed/virt"] std = [] diff --git a/Makefile b/Makefile index 4291166..834a01b 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ check: lint: cargo clippy --all-features --all-targets -- --deny warnings cargo fmt -- --check - RUSTDOCFLAGS='-Dwarnings' cargo doc --no-deps + RUSTDOCFLAGS='-Dwarnings' cargo doc --no-deps --all-features reuse lint .PHONY: test diff --git a/src/lib.rs b/src/lib.rs index 8f1bd46..919dbf4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,14 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 or MIT #![cfg_attr(not(any(test, feature = "std")), no_std)] -#![warn( - missing_debug_implementations, - // missing_docs, - non_ascii_idents, - trivial_casts, - unused, - unused_qualifications -)] +#![warn(non_ascii_idents, trivial_casts, unused, unused_qualifications)] #![deny(unsafe_code)] delog::generate_macros!(); @@ -22,6 +15,9 @@ pub mod virt; #[cfg(feature = "wrap-key-to-file")] pub mod wrap_key_to_file; +#[cfg(feature = "chunked")] +pub mod streaming; + #[derive(Clone, Debug, Default)] #[non_exhaustive] pub struct StagingBackend {} @@ -32,9 +28,12 @@ impl StagingBackend { } } -#[derive(Default, Debug)] +#[derive(Default)] #[non_exhaustive] -pub struct StagingContext {} +pub struct StagingContext { + #[cfg(feature = "chunked")] + chunked_io_state: Option, +} impl Backend for StagingBackend { type Context = StagingContext; diff --git a/src/streaming/mod.rs b/src/streaming/mod.rs new file mode 100644 index 0000000..c8e22e1 --- /dev/null +++ b/src/streaming/mod.rs @@ -0,0 +1,836 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +mod store; +use store::OpenSeekFrom; + +pub mod utils; + +#[cfg(feature = "encrypted-chunked")] +use chacha20poly1305::{ + aead::stream::{DecryptorLE31, EncryptorLE31, Nonce as StreamNonce, StreamLE31}, + ChaCha8Poly1305, KeyInit, +}; +use rand_core::RngCore; +use serde::{Deserialize, Serialize}; +use serde_byte_array::ByteArray; +use trussed::{ + client::FilesystemClient, + config::MAX_MESSAGE_LENGTH, + key::{Kind, Secrecy}, + serde_extensions::{Extension, ExtensionClient, ExtensionImpl, ExtensionResult}, + service::{Filestore, Keystore, ServiceResources}, + store::Store, + types::{CoreContext, KeyId, Location, Message, Path, PathBuf, UserAttribute}, + Bytes, Error, +}; + +use crate::StagingContext; + +#[derive(Debug)] +pub struct ChunkedReadState { + pub path: PathBuf, + pub location: Location, + pub offset: usize, +} + +#[derive(Debug)] +pub struct ChunkedWriteState { + pub path: PathBuf, + pub location: Location, +} + +#[cfg(feature = "encrypted-chunked")] +pub struct EncryptedChunkedReadState { + pub path: PathBuf, + pub location: Location, + pub offset: usize, + pub decryptor: DecryptorLE31, +} + +#[cfg(feature = "encrypted-chunked")] +pub struct EncryptedChunkedWriteState { + pub path: PathBuf, + pub location: Location, + pub encryptor: EncryptorLE31, +} + +#[non_exhaustive] +pub enum ChunkedIoState { + Read(ChunkedReadState), + Write(ChunkedWriteState), + #[cfg(feature = "encrypted-chunked")] + EncryptedRead(EncryptedChunkedReadState), + #[cfg(feature = "encrypted-chunked")] + EncryptedWrite(EncryptedChunkedWriteState), +} + +#[derive(Debug, Default)] +pub struct ChunkedExtension; + +impl Extension for ChunkedExtension { + type Request = ChunkedRequest; + type Reply = ChunkedReply; +} + +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] +#[allow(missing_docs, clippy::large_enum_variant)] +pub enum ChunkedRequest { + StartChunkedWrite(request::StartChunkedWrite), + #[cfg(feature = "encrypted-chunked")] + StartEncryptedChunkedWrite(request::StartEncryptedChunkedWrite), + StartChunkedRead(request::StartChunkedRead), + #[cfg(feature = "encrypted-chunked")] + StartEncryptedChunkedRead(request::StartEncryptedChunkedRead), + ReadChunk(request::ReadChunk), + WriteChunk(request::WriteChunk), + AbortChunkedWrite(request::AbortChunkedWrite), +} + +#[derive(Debug, Deserialize, Serialize)] +#[allow(missing_docs)] +pub enum ChunkedReply { + ReadChunk(reply::ReadChunk), + StartChunkedWrite(reply::StartChunkedWrite), + #[cfg(feature = "encrypted-chunked")] + StartEncryptedChunkedWrite(reply::StartEncryptedChunkedWrite), + StartChunkedRead(reply::StartChunkedRead), + #[cfg(feature = "encrypted-chunked")] + StartEncryptedChunkedRead(reply::StartEncryptedChunkedRead), + WriteChunk(reply::WriteChunk), + AbortChunkedWrite(reply::AbortChunkedWrite), +} + +mod request { + use super::*; + use serde::{Deserialize, Serialize}; + use serde_byte_array::ByteArray; + use trussed::types::{KeyId, Location, Message, PathBuf, UserAttribute}; + use trussed::Error; + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct ReadChunk {} + + impl TryFrom for ReadChunk { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::ReadChunk(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedRequest { + fn from(request: ReadChunk) -> Self { + Self::ReadChunk(request) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartChunkedWrite { + pub location: Location, + pub path: PathBuf, + pub user_attribute: Option, + } + + impl TryFrom for StartChunkedWrite { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::StartChunkedWrite(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedRequest { + fn from(request: StartChunkedWrite) -> Self { + Self::StartChunkedWrite(request) + } + } + + #[cfg(feature = "encrypted-chunked")] + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartEncryptedChunkedWrite { + pub location: Location, + pub path: PathBuf, + pub user_attribute: Option, + pub key: KeyId, + pub nonce: Option>, + } + + #[cfg(feature = "encrypted-chunked")] + impl TryFrom for StartEncryptedChunkedWrite { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::StartEncryptedChunkedWrite(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + #[cfg(feature = "encrypted-chunked")] + impl From for ChunkedRequest { + fn from(request: StartEncryptedChunkedWrite) -> Self { + Self::StartEncryptedChunkedWrite(request) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartChunkedRead { + pub location: Location, + pub path: PathBuf, + } + + impl TryFrom for StartChunkedRead { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::StartChunkedRead(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedRequest { + fn from(request: StartChunkedRead) -> Self { + Self::StartChunkedRead(request) + } + } + + #[cfg(feature = "encrypted-chunked")] + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartEncryptedChunkedRead { + pub location: Location, + pub path: PathBuf, + pub key: KeyId, + } + + #[cfg(feature = "encrypted-chunked")] + impl TryFrom for StartEncryptedChunkedRead { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::StartEncryptedChunkedRead(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + #[cfg(feature = "encrypted-chunked")] + impl From for ChunkedRequest { + fn from(request: StartEncryptedChunkedRead) -> Self { + Self::StartEncryptedChunkedRead(request) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct WriteChunk { + pub data: Message, + } + + impl TryFrom for WriteChunk { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::WriteChunk(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedRequest { + fn from(request: WriteChunk) -> Self { + Self::WriteChunk(request) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct AbortChunkedWrite {} + + impl TryFrom for AbortChunkedWrite { + type Error = Error; + fn try_from(request: ChunkedRequest) -> Result { + match request { + ChunkedRequest::AbortChunkedWrite(request) => Ok(request), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedRequest { + fn from(request: AbortChunkedWrite) -> Self { + Self::AbortChunkedWrite(request) + } + } +} + +mod reply { + use super::*; + use serde::{Deserialize, Serialize}; + use trussed::types::Message; + use trussed::Error; + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct ReadChunk { + pub data: Message, + pub len: usize, + } + + impl TryFrom for ReadChunk { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::ReadChunk(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedReply { + fn from(reply: ReadChunk) -> Self { + Self::ReadChunk(reply) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartChunkedWrite {} + + impl TryFrom for StartChunkedWrite { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::StartChunkedWrite(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedReply { + fn from(reply: StartChunkedWrite) -> Self { + Self::StartChunkedWrite(reply) + } + } + + #[cfg(feature = "encrypted-chunked")] + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartEncryptedChunkedWrite {} + + #[cfg(feature = "encrypted-chunked")] + impl TryFrom for StartEncryptedChunkedWrite { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::StartEncryptedChunkedWrite(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + #[cfg(feature = "encrypted-chunked")] + impl From for ChunkedReply { + fn from(reply: StartEncryptedChunkedWrite) -> Self { + Self::StartEncryptedChunkedWrite(reply) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartChunkedRead { + pub data: Message, + pub len: usize, + } + + impl TryFrom for StartChunkedRead { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::StartChunkedRead(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedReply { + fn from(reply: StartChunkedRead) -> Self { + Self::StartChunkedRead(reply) + } + } + + #[cfg(feature = "encrypted-chunked")] + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct StartEncryptedChunkedRead {} + + #[cfg(feature = "encrypted-chunked")] + impl TryFrom for StartEncryptedChunkedRead { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::StartEncryptedChunkedRead(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + #[cfg(feature = "encrypted-chunked")] + impl From for ChunkedReply { + fn from(reply: StartEncryptedChunkedRead) -> Self { + Self::StartEncryptedChunkedRead(reply) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct WriteChunk {} + + impl TryFrom for WriteChunk { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::WriteChunk(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedReply { + fn from(reply: WriteChunk) -> Self { + Self::WriteChunk(reply) + } + } + + #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] + pub struct AbortChunkedWrite { + pub aborted: bool, + } + + impl TryFrom for AbortChunkedWrite { + type Error = Error; + fn try_from(reply: ChunkedReply) -> Result { + match reply { + ChunkedReply::AbortChunkedWrite(reply) => Ok(reply), + _ => Err(Error::InternalError), + } + } + } + + impl From for ChunkedReply { + fn from(reply: AbortChunkedWrite) -> Self { + Self::AbortChunkedWrite(reply) + } + } +} + +impl ExtensionImpl for super::StagingBackend { + fn extension_request( + &mut self, + core_ctx: &mut CoreContext, + backend_ctx: &mut Self::Context, + request: &ChunkedRequest, + resources: &mut ServiceResources

, + ) -> Result { + let rng = &mut resources.rng()?; + let keystore = &mut resources.keystore(core_ctx)?; + let filestore = &mut resources.filestore(core_ctx); + let client_id = &core_ctx.path; + let store = resources.platform_mut().store(); + match request { + ChunkedRequest::ReadChunk(_) => { + let read_state = match &mut backend_ctx.chunked_io_state { + Some(ChunkedIoState::Read(read_state)) => read_state, + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedRead(_)) => { + return read_encrypted_chunk(store, client_id, backend_ctx) + } + _ => return Err(Error::MechanismNotAvailable), + }; + let (data, len) = store::filestore_read_chunk( + store, + client_id, + &read_state.path, + read_state.location, + OpenSeekFrom::Start(read_state.offset as u32), + )?; + + read_state.offset += data.len(); + + Ok(reply::ReadChunk { data, len }.into()) + } + ChunkedRequest::StartChunkedRead(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let (data, len) = store::filestore_read_chunk( + store, + client_id, + &request.path, + request.location, + OpenSeekFrom::Start(0), + )?; + backend_ctx.chunked_io_state = Some(ChunkedIoState::Read(ChunkedReadState { + path: request.path.clone(), + location: request.location, + offset: data.len(), + })); + Ok(reply::StartChunkedRead { data, len }.into()) + } + ChunkedRequest::WriteChunk(request) => { + let is_last = !request.data.is_full(); + if is_last { + write_last_chunk(store, client_id, backend_ctx, &request.data)?; + } else { + write_chunk(store, client_id, backend_ctx, &request.data)?; + } + Ok(reply::WriteChunk {}.into()) + } + ChunkedRequest::AbortChunkedWrite(_request) => { + let Some(ChunkedIoState::Write(ref write_state)) = backend_ctx.chunked_io_state else { + return Ok(reply::AbortChunkedWrite { aborted: false }.into()); + }; + let aborted = store::abort_chunked_write( + store, + client_id, + &write_state.path, + write_state.location, + ); + Ok(reply::AbortChunkedWrite { aborted }.into()) + } + ChunkedRequest::StartChunkedWrite(request) => { + backend_ctx.chunked_io_state = Some(ChunkedIoState::Write(ChunkedWriteState { + path: request.path.clone(), + location: request.location, + })); + store::start_chunked_write(store, client_id, &request.path, request.location, &[])?; + Ok(reply::StartChunkedWrite {}.into()) + } + #[cfg(feature = "encrypted-chunked")] + ChunkedRequest::StartEncryptedChunkedWrite(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let key = keystore.load_key( + Secrecy::Secret, + Some(Kind::Symmetric(CHACHA8_KEY_LEN)), + &request.key, + )?; + let nonce = request.nonce.map(|n| *n).unwrap_or_else(|| { + let mut nonce = [0; CHACHA8_STREAM_NONCE_LEN]; + rng.fill_bytes(&mut nonce); + nonce + }); + let nonce: &StreamNonce> = + (&nonce).into(); + let aead = ChaCha8Poly1305::new((&*key.material).into()); + let encryptor = EncryptorLE31::::from_aead(aead, nonce); + store::start_chunked_write( + store, + client_id, + &request.path, + request.location, + nonce, + )?; + backend_ctx.chunked_io_state = + Some(ChunkedIoState::EncryptedWrite(EncryptedChunkedWriteState { + path: request.path.clone(), + location: request.location, + encryptor, + })); + Ok(reply::StartEncryptedChunkedWrite {}.into()) + } + #[cfg(feature = "encrypted-chunked")] + ChunkedRequest::StartEncryptedChunkedRead(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let key = keystore.load_key( + Secrecy::Secret, + Some(Kind::Symmetric(CHACHA8_KEY_LEN)), + &request.key, + )?; + let nonce: Bytes = + filestore.read(&request.path, request.location)?; + let nonce: &StreamNonce> = (&**nonce) + .try_into() + .map_err(|_| Error::WrongMessageLength)?; + let aead = ChaCha8Poly1305::new((&*key.material).into()); + let decryptor = DecryptorLE31::::from_aead(aead, nonce); + backend_ctx.chunked_io_state = + Some(ChunkedIoState::EncryptedRead(EncryptedChunkedReadState { + path: request.path.clone(), + location: request.location, + decryptor, + offset: CHACHA8_STREAM_NONCE_LEN, + })); + Ok(reply::StartEncryptedChunkedRead {}.into()) + } + } + } +} + +fn clear_chunked_state( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, +) -> Result<(), Error> { + match ctx.chunked_io_state.take() { + Some(ChunkedIoState::Read(_)) | None => {} + Some(ChunkedIoState::Write(write_state)) => { + info!("Automatically cancelling write"); + store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedRead(_)) => {} + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(write_state)) => { + info!("Automatically cancelling encrypted write"); + store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); + } + } + Ok(()) +} + +fn write_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, + data: &Message, +) -> Result<(), Error> { + match ctx.chunked_io_state { + Some(ChunkedIoState::Write(ref write_state)) => { + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + data, + )?; + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(ref mut write_state)) => { + let mut data = + Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); + write_state + .encryptor + .encrypt_next_in_place(write_state.path.as_ref().as_bytes(), &mut *data) + .map_err(|_err| { + error!("Failed to encrypt {:?}", _err); + Error::AeadError + })?; + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + &data, + )?; + } + _ => return Err(Error::MechanismNotAvailable), + } + Ok(()) +} + +fn write_last_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, + data: &Message, +) -> Result<(), Error> { + match ctx.chunked_io_state.take() { + Some(ChunkedIoState::Write(write_state)) => { + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + data, + )?; + store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(write_state)) => { + let mut data = + Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); + write_state + .encryptor + .encrypt_last_in_place(&[write_state.location as u8], &mut *data) + .map_err(|_err| { + error!("Failed to encrypt {:?}", _err); + Error::AeadError + })?; + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + &data, + )?; + store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; + } + _ => return Err(Error::MechanismNotAvailable), + } + + Ok(()) +} + +#[cfg(feature = "encrypted-chunked")] +fn read_encrypted_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, +) -> Result { + let Some(ChunkedIoState::EncryptedRead(ref mut read_state)) = ctx.chunked_io_state else { + unreachable!("Read encrypted chunk can only be called in the context encrypted chunk reads"); + }; + let (mut data, len): (Bytes<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>, usize) = + store::filestore_read_chunk( + store, + client_id, + &read_state.path, + read_state.location, + OpenSeekFrom::Start(read_state.offset as _), + )?; + read_state.offset += data.len(); + + let is_last = !data.is_full(); + if is_last { + let Some(ChunkedIoState::EncryptedRead(read_state)) = ctx.chunked_io_state.take() else { + unreachable!(); + }; + + read_state + .decryptor + .decrypt_last_in_place(&[read_state.location as u8], &mut *data) + .map_err(|_err| { + error!("Failed to decrypt {:?}", _err); + Error::AeadError + })?; + let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); + Ok(reply::ReadChunk { + data, + len: chunked_decrypted_len(len)?, + } + .into()) + } else { + read_state + .decryptor + .decrypt_next_in_place(read_state.path.as_ref().as_bytes(), &mut *data) + .map_err(|_err| { + error!("Failed to decrypt {:?}", _err); + Error::AeadError + })?; + let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); + Ok(reply::ReadChunk { + data, + len: chunked_decrypted_len(len)?, + } + .into()) + } +} + +pub const POLY1305_TAG_LEN: usize = 16; +pub const CHACHA8_KEY_LEN: usize = 32; +pub const CHACHA8_STREAM_NONCE_LEN: usize = 8; +/// Calculate the decrypted length of a chunked encrypted file +fn chunked_decrypted_len(len: usize) -> Result { + let len = len.checked_sub(CHACHA8_STREAM_NONCE_LEN).ok_or_else(|| { + error!("File too small"); + Error::FilesystemReadFailure + })?; + const CHUNK_LEN: usize = POLY1305_TAG_LEN + MAX_MESSAGE_LENGTH; + let chunk_count = len / CHUNK_LEN; + let last_chunk_len = (len % CHUNK_LEN) + .checked_sub(POLY1305_TAG_LEN) + .ok_or_else(|| { + error!("Incorrect last chunk length"); + Error::FilesystemReadFailure + })?; + + Ok(chunk_count * MAX_MESSAGE_LENGTH + last_chunk_len) +} + +type ChunkedResult<'a, R, C> = ExtensionResult<'a, ChunkedExtension, R, C>; + +pub trait ChunkedClient: ExtensionClient + FilesystemClient { + /// Begin writing a file that can be larger than 1KiB + /// + /// More chunks can be written with [`write_file_chunk`](ChunkedClient::write_file_chunk). + /// The data is flushed and becomes readable when a chunk smaller than the maximum capacity of a `Message` is transfered. + fn start_chunked_write( + &mut self, + location: Location, + path: PathBuf, + user_attribute: Option, + ) -> ChunkedResult<'_, reply::StartChunkedWrite, Self> { + self.extension(request::StartChunkedWrite { + location, + path, + user_attribute, + }) + } + + /// Begin writing an encrypted file that can be larger than 1KiB + /// + /// More chunks can be written with [`write_file_chunk`](ChunkedClient::write_file_chunk). + /// The data is flushed and becomes readable when a chunk smaller than the maximum capacity of a [`Message`](trussed::types::Message) is transfered. + #[cfg(feature = "encrypted-chunked")] + fn start_encrypted_chunked_write( + &mut self, + location: Location, + path: PathBuf, + key: KeyId, + nonce: Option>, + user_attribute: Option, + ) -> ChunkedResult<'_, reply::StartEncryptedChunkedWrite, Self> { + self.extension(request::StartEncryptedChunkedWrite { + location, + path, + key, + user_attribute, + nonce, + }) + } + + /// Begin reading a file that can be larger than 1KiB + /// + /// More chunks can be read with [`read_file_chunk`](ChunkedClient::read_file_chunk). + /// The read is over once a chunk of size smaller than the maximum capacity of a [`Message`](trussed::types::Message) is transfered. + fn start_chunked_read( + &mut self, + location: Location, + path: PathBuf, + ) -> ChunkedResult<'_, reply::StartChunkedRead, Self> { + self.extension(request::StartChunkedRead { location, path }) + } + + /// Begin reading an encrypted file that can be larger than 1KiB + /// + /// More chunks can be read with [`read_file_chunk`](ChunkedClient::read_file_chunk). + /// The read is over once a chunk of size smaller than the maximum capacity of a [`Message`](trussed::types::Message) is transfered. + /// Only once the entire file has been read does the data have been properly authenticated. + #[cfg(feature = "encrypted-chunked")] + fn start_encrypted_chunked_read( + &mut self, + location: Location, + path: PathBuf, + key: KeyId, + ) -> ChunkedResult<'_, reply::StartEncryptedChunkedRead, Self> { + self.extension(request::StartEncryptedChunkedRead { + location, + path, + key, + }) + } + + /// Write part of a file + /// + /// See [`start_chunked_write`](ChunkedClient::start_chunked_write). + fn write_file_chunk(&mut self, data: Message) -> ChunkedResult<'_, reply::WriteChunk, Self> { + self.extension(request::WriteChunk { data }) + } + + /// Abort writes to a file opened with [`start_chunked_write`](ChunkedClient::start_chunked_write). + fn abort_chunked_write(&mut self) -> ChunkedResult<'_, reply::AbortChunkedWrite, Self> { + self.extension(request::AbortChunkedWrite {}) + } + + // Read part of a file, up to 1KiB starting at `pos` + fn read_file_chunk(&mut self) -> ChunkedResult<'_, reply::ReadChunk, Self> { + self.extension(request::ReadChunk {}) + } +} + +impl + FilesystemClient> ChunkedClient for C {} diff --git a/src/streaming/store.rs b/src/streaming/store.rs new file mode 100644 index 0000000..43d99a5 --- /dev/null +++ b/src/streaming/store.rs @@ -0,0 +1,292 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +use littlefs2::driver::Storage as LfsStorage; +use littlefs2::fs::{File, Filesystem}; +use littlefs2::io::{SeekFrom, Write}; + +use trussed::store::{create_directories, Store}; +use trussed::types::{Bytes, Location, Path, PathBuf}; +use trussed::Error; + +use serde::{Deserialize, Serialize}; + +/// Enumeration of possible methods to seek within an file that was just opened +/// Used in the [`read_chunk`](crate::store::read_chunk) and [`write_chunk`](crate::store::write_chunk) calls, +/// Where [`SeekFrom::Current`](littlefs2::io::SeekFrom::Current) would not make sense. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub enum OpenSeekFrom { + Start(u32), + End(i32), +} + +impl From for SeekFrom { + fn from(value: OpenSeekFrom) -> Self { + match value { + OpenSeekFrom::Start(o) => Self::Start(o), + OpenSeekFrom::End(o) => Self::End(o), + } + } +} + +pub fn fs_read_chunk( + fs: &Filesystem, + path: &Path, + pos: OpenSeekFrom, +) -> Result<(Bytes, usize), Error> { + let mut contents = Bytes::default(); + contents.resize_default(contents.capacity()).unwrap(); + let file_len = File::open_and_then(fs, path, |file| { + file.seek(pos.into())?; + let read_n = file.read(&mut contents)?; + contents.truncate(read_n); + file.len() + }) + .map_err(|_| Error::FilesystemReadFailure)?; + Ok((contents, file_len)) +} +/// Reads contents from path in location of store. +#[inline(never)] +pub fn read_chunk( + store: impl Store, + location: Location, + path: &Path, + pos: OpenSeekFrom, +) -> Result<(Bytes, usize), Error> { + debug_now!("reading chunk {},{:?}", &path, pos); + match location { + Location::Internal => fs_read_chunk(store.ifs(), path, pos), + Location::External => fs_read_chunk(store.efs(), path, pos), + Location::Volatile => fs_read_chunk(store.vfs(), path, pos), + } +} + +pub fn fs_write_chunk( + fs: &Filesystem, + path: &Path, + contents: &[u8], + pos: OpenSeekFrom, +) -> Result<(), Error> { + File::::with_options() + .read(true) + .write(true) + .open_and_then(fs, path, |file| { + file.seek(pos.into())?; + file.write_all(contents) + }) + .map_err(|_| Error::FilesystemReadFailure)?; + Ok(()) +} + +/// Writes contents to path in location of store. +#[inline(never)] +pub fn write_chunk( + store: impl Store, + location: Location, + path: &Path, + contents: &[u8], + pos: OpenSeekFrom, +) -> Result<(), Error> { + debug_now!("writing {}", &path); + match location { + Location::Internal => fs_write_chunk(store.ifs(), path, contents, pos), + Location::External => fs_write_chunk(store.efs(), path, contents, pos), + Location::Volatile => fs_write_chunk(store.vfs(), path, contents, pos), + } + .map_err(|_| Error::FilesystemWriteFailure) +} + +pub fn move_file( + store: impl Store, + from_location: Location, + from_path: &Path, + to_location: Location, + to_path: &Path, +) -> Result<(), Error> { + debug_now!( + "Moving {:?}({}) to {:?}({})", + from_location, + from_path, + to_location, + to_path + ); + + match to_location { + Location::Internal => create_directories(store.ifs(), to_path), + Location::External => create_directories(store.efs(), to_path), + Location::Volatile => create_directories(store.vfs(), to_path), + } + .map_err(|_err| { + error!("Failed to create directories chunks: {:?}", _err); + Error::FilesystemWriteFailure + })?; + + let on_fail = |_err| { + error!("Failed to rename file: {:?}", _err); + Error::FilesystemWriteFailure + }; + // Fast path for same-filesystem + match (from_location, to_location) { + (Location::Internal, Location::Internal) => { + return store.ifs().rename(from_path, to_path).map_err(on_fail) + } + (Location::External, Location::External) => { + return store.efs().rename(from_path, to_path).map_err(on_fail) + } + (Location::Volatile, Location::Volatile) => { + return store.vfs().rename(from_path, to_path).map_err(on_fail) + } + _ => {} + } + + match from_location { + Location::Internal => { + move_file_step1(store, &**store.ifs(), from_path, to_location, to_path) + } + Location::External => { + move_file_step1(store, &**store.efs(), from_path, to_location, to_path) + } + Location::Volatile => { + move_file_step1(store, &**store.vfs(), from_path, to_location, to_path) + } + } +} + +// Separate generic function to avoid having 9 times the same code because the filesystem types are not the same. +fn move_file_step1( + store: impl Store, + from_fs: &Filesystem, + from_path: &Path, + to_location: Location, + to_path: &Path, +) -> Result<(), Error> { + match to_location { + Location::Internal => move_file_step2(from_fs, from_path, &**store.ifs(), to_path), + Location::External => move_file_step2(from_fs, from_path, &**store.efs(), to_path), + Location::Volatile => move_file_step2(from_fs, from_path, &**store.vfs(), to_path), + } +} + +// Separate generic function to avoid having 9 times the same code because the filesystem types are not the same. +fn move_file_step2( + from_fs: &Filesystem, + from_path: &Path, + to_fs: &Filesystem, + to_path: &Path, +) -> Result<(), Error> { + File::open_and_then(from_fs, from_path, |from_file| { + File::create_and_then(to_fs, to_path, |to_file| copy_file_data(from_file, to_file)) + }) + .map_err(|_err| { + error!("Failed to flush chunks: {:?}", _err); + Error::FilesystemWriteFailure + }) +} + +fn copy_file_data( + from: &File, + to: &File, +) -> Result<(), littlefs2::io::Error> { + let mut buf = [0; 1024]; + loop { + let read = from.read(&mut buf)?; + if read == 0 { + return Ok(()); + } + + to.write_all(&buf[..read])?; + } +} + +fn chunks_path(client_id: &Path, client_path: &Path, location: Location) -> Result { + // Clients must not escape their namespace + if client_path.as_ref().contains("..") { + return Err(Error::InvalidPath); + } + + let mut path = PathBuf::new(); + path.push(client_id); + match location { + Location::Volatile => path.push(&PathBuf::from("vfs-part")), + Location::External => path.push(&PathBuf::from("efs-part")), + Location::Internal => path.push(&PathBuf::from("ifs-part")), + } + path.push(client_path); + Ok(path) +} + +fn actual_path(client_id: &Path, client_path: &Path) -> Result { + // Clients must not escape their namespace + if client_path.as_ref().contains("..") { + return Err(Error::InvalidPath); + } + + let mut path = PathBuf::new(); + path.push(client_id); + path.push(&PathBuf::from("dat")); + path.push(client_path); + Ok(path) +} + +pub fn start_chunked_write( + store: impl Store, + client_id: &Path, + path: &PathBuf, + location: Location, + data: &[u8], +) -> Result<(), Error> { + let path = chunks_path(client_id, path, location)?; + trussed::store::store(store, Location::Volatile, &path, data) +} + +pub fn filestore_write_chunk( + store: impl Store, + client_id: &Path, + path: &Path, + location: Location, + data: &[u8], +) -> Result<(), Error> { + let path = chunks_path(client_id, path, location)?; + write_chunk(store, Location::Volatile, &path, data, OpenSeekFrom::End(0)) +} + +pub fn filestore_read_chunk( + store: impl Store, + client_id: &Path, + path: &PathBuf, + location: Location, + pos: OpenSeekFrom, +) -> Result<(Bytes, usize), Error> { + let path = actual_path(client_id, path)?; + + read_chunk(store, location, &path, pos) +} + +pub fn abort_chunked_write( + store: impl Store, + client_id: &Path, + path: &PathBuf, + location: Location, +) -> bool { + let Ok(path) = chunks_path(client_id,path, location) else { + return false; + }; + trussed::store::delete(store, Location::Volatile, &path) +} + +pub fn flush_chunks( + store: impl Store, + client_id: &Path, + path: &PathBuf, + location: Location, +) -> Result<(), Error> { + let chunk_path = chunks_path(client_id, path, location)?; + let client_path = actual_path(client_id, path)?; + move_file( + store, + Location::Volatile, + &chunk_path, + location, + &client_path, + ) +} diff --git a/src/streaming/utils.rs b/src/streaming/utils.rs new file mode 100644 index 0000000..7c5a46b --- /dev/null +++ b/src/streaming/utils.rs @@ -0,0 +1,91 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +use littlefs2::path::PathBuf; +use serde_byte_array::ByteArray; + +use trussed::{ + syscall, try_syscall, + types::{KeyId, Location, Message, UserAttribute}, + Error, +}; + +use super::{ChunkedClient, CHACHA8_STREAM_NONCE_LEN}; + +#[derive(Clone, Copy)] +pub struct EncryptionData { + pub key: KeyId, + pub nonce: Option>, +} + +/// Write a large file (can be larger than 1KiB) +/// +/// This is a wrapper around the [chunked writes api](ChunkedClient) +pub fn write_all( + client: &mut impl ChunkedClient, + location: Location, + path: PathBuf, + data: &[u8], + user_attribute: Option, + encryption: Option, +) -> Result<(), Error> { + if let (Ok(msg), None) = (Message::from_slice(data), encryption) { + // Fast path for small files + try_syscall!(client.write_file(location, path, msg, user_attribute))?; + Ok(()) + } else { + write_chunked(client, location, path, data, user_attribute, encryption) + } +} + +fn write_chunked( + client: &mut impl ChunkedClient, + location: Location, + path: PathBuf, + data: &[u8], + user_attribute: Option, + encryption: Option, +) -> Result<(), Error> { + let res = write_chunked_inner(client, location, path, data, user_attribute, encryption); + if res.is_err() { + syscall!(client.abort_chunked_write()); + return res; + } + Ok(()) +} + +fn write_chunked_inner( + client: &mut impl ChunkedClient, + location: Location, + path: PathBuf, + data: &[u8], + user_attribute: Option, + encryption: Option, +) -> Result<(), Error> { + let msg = Message::new(); + let chunk_size = msg.capacity(); + let chunks = data.chunks(chunk_size).map(|chunk| { + Message::from_slice(chunk).expect("Iteration over chunks yields maximum of chunk_size") + }); + if let Some(encryption_data) = encryption { + try_syscall!(client.start_encrypted_chunked_write( + location, + path, + encryption_data.key, + encryption_data.nonce, + user_attribute, + ))?; + } else { + try_syscall!(client.start_chunked_write(location, path, user_attribute))?; + } + let mut written = 0; + for chunk in chunks { + written += chunk.len(); + try_syscall!(client.write_file_chunk(chunk))?; + } + + if { written % chunk_size } == 0 { + try_syscall!(client.write_file_chunk(Message::new()))?; + } + Ok(()) +} diff --git a/src/virt.rs b/src/virt.rs index 1b06d37..f22cf47 100644 --- a/src/virt.rs +++ b/src/virt.rs @@ -8,6 +8,9 @@ use crate::wrap_key_to_file::WrapKeyToFileExtension; use crate::{StagingBackend, StagingContext}; +#[cfg(feature = "chunked")] +use crate::streaming::ChunkedExtension; + #[derive(Default, Debug)] pub struct Dispatcher { backend: StagingBackend, @@ -22,6 +25,8 @@ pub enum BackendIds { pub enum ExtensionIds { #[cfg(feature = "wrap-key-to-file")] WrapKeyToFile, + #[cfg(feature = "chunked")] + Chunked, } #[cfg(feature = "wrap-key-to-file")] @@ -30,11 +35,19 @@ impl ExtensionId for Dispatcher { const ID: ExtensionIds = ExtensionIds::WrapKeyToFile; } +#[cfg(feature = "chunked")] +impl ExtensionId for Dispatcher { + type Id = ExtensionIds; + const ID: ExtensionIds = ExtensionIds::Chunked; +} + impl From for u8 { fn from(value: ExtensionIds) -> Self { match value { #[cfg(feature = "wrap-key-to-file")] ExtensionIds::WrapKeyToFile => 0, + #[cfg(feature = "chunked")] + ExtensionIds::Chunked => 1, } } } @@ -45,6 +58,8 @@ impl TryFrom for ExtensionIds { match value { #[cfg(feature = "wrap-key-to-file")] 0 => Ok(Self::WrapKeyToFile), + #[cfg(feature = "chunked")] + 1 => Ok(Self::Chunked), _ => Err(Error::FunctionNotSupported), } } @@ -81,12 +96,25 @@ impl ExtensionDispatch for Dispatcher { // See https://github.com/rust-lang/rust/issues/78123# match *extension { #[cfg(feature = "wrap-key-to-file")] - ExtensionIds::WrapKeyToFile => self.backend.extension_request_serialized( + ExtensionIds::WrapKeyToFile => >::extension_request_serialized( + &mut self.backend, &mut ctx.core, &mut ctx.backends, request, resources, ), + #[cfg(feature = "chunked")] + ExtensionIds::Chunked => { + >::extension_request_serialized( + &mut self.backend, + &mut ctx.core, + &mut ctx.backends, + request, + resources, + ) + } } } } diff --git a/tests/chunked.rs b/tests/chunked.rs new file mode 100644 index 0000000..21bd712 --- /dev/null +++ b/tests/chunked.rs @@ -0,0 +1,149 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +#![cfg(all(feature = "virt", feature = "chunked"))] + +use littlefs2::path::PathBuf; +use trussed::{client::FilesystemClient, syscall, try_syscall, types::Location, Bytes}; +use trussed_staging::{ + streaming::{utils, ChunkedClient}, + virt::with_ram_client, +}; +fn test_write_all(location: Location) { + with_ram_client("test chunked", |mut client| { + let path = PathBuf::from("foo"); + utils::write_all(&mut client, location, path.clone(), &[48; 1234], None, None).unwrap(); + + let data = syscall!(client.start_chunked_read(location, path)).data; + assert_eq!(&data, &[48; 1024]); + let data = syscall!(client.read_file_chunk()).data; + assert_eq!(&data, &[48; 1234 - 1024]); + }); +} + +fn test_write_all_small(location: Location) { + with_ram_client("test chunked", |mut client| { + let path = PathBuf::from("foo2"); + utils::write_all(&mut client, location, path.clone(), &[48; 1023], None, None).unwrap(); + + let data = syscall!(client.start_chunked_read(location, path)).data; + assert_eq!(&data, &[48; 1023]); + }); +} + +#[test] +fn write_all_volatile() { + test_write_all(Location::Volatile); + test_write_all_small(Location::Volatile); +} + +#[test] +fn write_all_external() { + test_write_all(Location::External); + test_write_all_small(Location::External); +} + +#[test] +fn write_all_internal() { + test_write_all(Location::Internal); + test_write_all_small(Location::Internal); +} + +#[test] +fn filesystem() { + with_ram_client("chunked-tests", |mut client| { + assert!( + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .is_none(), + ); + + let data = Bytes::from_slice(b"test data").unwrap(); + syscall!(client.write_file( + Location::Internal, + PathBuf::from("test_file"), + data.clone(), + None, + )); + + let recv_data = + syscall!(client.read_file(Location::Internal, PathBuf::from("test_file"))).data; + assert_eq!(data, recv_data); + + // ======== CHUNKED READS ======== + let first_data = + syscall!(client.start_chunked_read(Location::Internal, PathBuf::from("test_file"),)); + assert_eq!(&first_data.data, &data); + assert_eq!(first_data.len, data.len()); + + let empty_data = syscall!(client.read_file_chunk()); + assert!(empty_data.data.is_empty()); + assert_eq!(empty_data.len, data.len()); + + let large_data = Bytes::from_slice(&[0; 1024]).unwrap(); + let large_data2 = Bytes::from_slice(&[1; 1024]).unwrap(); + let more_data = Bytes::from_slice(&[2; 42]).unwrap(); + // ======== CHUNKED WRITES ======== + syscall!(client.start_chunked_write(Location::Internal, PathBuf::from("test_file"), None)); + + syscall!(client.write_file_chunk(large_data.clone())); + syscall!(client.write_file_chunk(large_data2.clone())); + syscall!(client.write_file_chunk(more_data.clone())); + + // ======== CHUNKED READS ======== + let full_len = large_data.len() + large_data2.len() + more_data.len(); + let first_data = + syscall!(client.start_chunked_read(Location::Internal, PathBuf::from("test_file"),)); + assert_eq!(&first_data.data, &large_data); + assert_eq!(first_data.len, full_len); + + let second_data = syscall!(client.read_file_chunk()); + assert_eq!(&second_data.data, &large_data2); + assert_eq!(second_data.len, full_len); + + let third_data = syscall!(client.read_file_chunk()); + assert_eq!(&third_data.data, &more_data); + assert_eq!(third_data.len, full_len); + + let empty_data = syscall!(client.read_file_chunk()); + assert!(empty_data.data.is_empty()); + assert_eq!(empty_data.len, full_len); + + let metadata = + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .unwrap(); + assert!(metadata.is_file()); + + // ======== ABORTED CHUNKED WRITES ======== + syscall!(client.start_chunked_write(Location::Internal, PathBuf::from("test_file"), None)); + + syscall!(client.write_file_chunk(large_data.clone())); + syscall!(client.write_file_chunk(large_data2)); + syscall!(client.abort_chunked_write()); + + // Old data is still there after abort + let partial_data = + syscall!(client.start_chunked_read(Location::Internal, PathBuf::from("test_file"))); + assert_eq!(&partial_data.data, &large_data); + assert_eq!(partial_data.len, full_len); + + // This returns an error because the name doesn't exist + assert!( + try_syscall!(client.remove_file(Location::Internal, PathBuf::from("bad_name"))) + .is_err() + ); + let metadata = + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .unwrap(); + assert!(metadata.is_file()); + + syscall!(client.remove_file(Location::Internal, PathBuf::from("test_file"))); + assert!( + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .is_none(), + ); + }) +} diff --git a/tests/encrypted-chunked.rs b/tests/encrypted-chunked.rs new file mode 100644 index 0000000..cd351fc --- /dev/null +++ b/tests/encrypted-chunked.rs @@ -0,0 +1,191 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +#![cfg(all(feature = "virt", feature = "encrypted-chunked"))] + +use littlefs2::path::PathBuf; +use serde_byte_array::ByteArray; +use trussed::{ + client::CryptoClient, client::FilesystemClient, syscall, try_syscall, types::Location, Bytes, + Error, +}; +use trussed_staging::{ + streaming::{ + utils::{self, EncryptionData}, + ChunkedClient, + }, + virt::with_ram_client, +}; + +fn test_write_all(location: Location) { + with_ram_client("test chunked", |mut client| { + let key = syscall!(client.generate_secret_key(32, Location::Volatile)).key; + let path = PathBuf::from("foo"); + utils::write_all( + &mut client, + location, + path.clone(), + &[48; 1234], + None, + Some(EncryptionData { key, nonce: None }), + ) + .unwrap(); + + syscall!(client.start_encrypted_chunked_read(location, path, key)); + let data = syscall!(client.read_file_chunk()).data; + assert_eq!(&data, &[48; 1024]); + let data = syscall!(client.read_file_chunk()).data; + assert_eq!(&data, &[48; 1234 - 1024]); + }); +} + +fn test_write_all_small(location: Location) { + with_ram_client("test chunked", |mut client| { + let key = syscall!(client.generate_secret_key(32, Location::Volatile)).key; + let path = PathBuf::from("foo2"); + utils::write_all( + &mut client, + location, + path.clone(), + &[48; 1023], + None, + Some(EncryptionData { key, nonce: None }), + ) + .unwrap(); + + syscall!(client.start_encrypted_chunked_read(location, path, key)); + let data = syscall!(client.read_file_chunk()).data; + assert_eq!(&data, &[48; 1023]); + }); +} + +#[test] +fn write_all_volatile() { + test_write_all(Location::Volatile); + test_write_all_small(Location::Volatile); +} + +#[test] +fn write_all_external() { + test_write_all(Location::External); + test_write_all_small(Location::External); +} + +#[test] +fn write_all_internal() { + test_write_all(Location::Internal); + test_write_all_small(Location::Internal); +} + +#[test] +fn encrypted_filesystem() { + with_ram_client("chunked-tests", |mut client| { + let key = syscall!(client.generate_secret_key(32, Location::Volatile)).key; + + assert!( + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .is_none(), + ); + + let large_data = Bytes::from_slice(&[0; 1024]).unwrap(); + let large_data2 = Bytes::from_slice(&[1; 1024]).unwrap(); + let more_data = Bytes::from_slice(&[2; 42]).unwrap(); + // ======== CHUNKED WRITES ======== + syscall!(client.start_encrypted_chunked_write( + Location::Internal, + PathBuf::from("test_file"), + key, + Some(ByteArray::from([0; 8])), + None + )); + + syscall!(client.write_file_chunk(large_data.clone())); + syscall!(client.write_file_chunk(large_data2.clone())); + syscall!(client.write_file_chunk(more_data.clone())); + + // ======== CHUNKED READS ======== + let full_len = large_data.len() + large_data2.len() + more_data.len(); + syscall!(client.start_encrypted_chunked_read( + Location::Internal, + PathBuf::from("test_file"), + key + )); + let first_data = syscall!(client.read_file_chunk()); + assert_eq!(&first_data.data, &large_data); + assert_eq!(first_data.len, full_len); + + let second_data = syscall!(client.read_file_chunk()); + assert_eq!(&second_data.data, &large_data2); + assert_eq!(second_data.len, full_len); + + let third_data = syscall!(client.read_file_chunk()); + assert_eq!(&third_data.data, &more_data); + assert_eq!(third_data.len, full_len); + + assert_eq!( + try_syscall!(client.read_file_chunk()), + Err(Error::MechanismNotAvailable) + ); + + let metadata = + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .unwrap(); + assert!(metadata.is_file()); + + // ======== ABORTED CHUNKED WRITES ======== + syscall!(client.start_encrypted_chunked_write( + Location::Internal, + PathBuf::from("test_file"), + key, + Some(ByteArray::from([1; 8])), + None + )); + + syscall!(client.write_file_chunk(large_data.clone())); + syscall!(client.write_file_chunk(large_data2.clone())); + syscall!(client.abort_chunked_write()); + + // Old data is still there after abort + syscall!(client.start_encrypted_chunked_read( + Location::Internal, + PathBuf::from("test_file"), + key + )); + let first_data = syscall!(client.read_file_chunk()); + assert_eq!(&first_data.data, &large_data); + assert_eq!(first_data.len, full_len); + + let second_data = syscall!(client.read_file_chunk()); + assert_eq!(&second_data.data, &large_data2); + assert_eq!(second_data.len, full_len); + + let third_data = syscall!(client.read_file_chunk()); + assert_eq!(&third_data.data, &more_data); + assert_eq!(third_data.len, full_len); + + assert_eq!( + try_syscall!(client.read_file_chunk()), + Err(Error::MechanismNotAvailable) + ); + + // This returns an error because the name doesn't exist + assert!( + try_syscall!(client.remove_file(Location::Internal, PathBuf::from("bad_name"))) + .is_err() + ); + let metadata = + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .unwrap(); + assert!(metadata.is_file()); + + syscall!(client.remove_file(Location::Internal, PathBuf::from("test_file"))); + assert!( + syscall!(client.entry_metadata(Location::Internal, PathBuf::from("test_file"))) + .metadata + .is_none(), + ); + }) +} diff --git a/tests/backend.rs b/tests/wrap_key_to_file.rs similarity index 100% rename from tests/backend.rs rename to tests/wrap_key_to_file.rs