diff --git a/core/src/docs/rfcs/3911_deleter_api.md b/core/src/docs/rfcs/3911_deleter_api.md index dfdc7a73cb33..f7ab31461285 100644 --- a/core/src/docs/rfcs/3911_deleter_api.md +++ b/core/src/docs/rfcs/3911_deleter_api.md @@ -128,7 +128,7 @@ And the `delete` API will be changed to return a `oio::Delete` instead: ```diff trait Accessor { -- async fn delete(&self, path: &str, args: OpDelete) -> Result; +- async fn delete(&self) -> Result<(RpDelete, Self::Deleter)>; + async fn delete(&self, args: OpDelete) -> Result<(RpDelete, Self::Deleter)>; } ``` diff --git a/core/src/layers/async_backtrace.rs b/core/src/layers/async_backtrace.rs index 65a5c3305d7d..f9dc06b47bf1 100644 --- a/core/src/layers/async_backtrace.rs +++ b/core/src/layers/async_backtrace.rs @@ -65,6 +65,8 @@ impl LayeredAccess for AsyncBacktraceAccessor { type BlockingWriter = AsyncBacktraceWrapper; type Lister = AsyncBacktraceWrapper; type BlockingLister = AsyncBacktraceWrapper; + type Deleter = AsyncBacktraceWrapper; + type BlockingDeleter = AsyncBacktraceWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -102,8 +104,11 @@ impl LayeredAccess for AsyncBacktraceAccessor { } #[async_backtrace::framed] - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.delete(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner + .delete() + .await + .map(|(rp, r)| (rp, AsyncBacktraceWrapper::new(r))) } #[async_backtrace::framed] @@ -114,11 +119,6 @@ impl LayeredAccess for AsyncBacktraceAccessor { .map(|(rp, r)| (rp, AsyncBacktraceWrapper::new(r))) } - #[async_backtrace::framed] - async fn batch(&self, args: OpBatch) -> Result { - self.inner.batch(args).await - } - #[async_backtrace::framed] async fn presign(&self, path: &str, args: OpPresign) -> Result { self.inner.presign(path, args).await @@ -141,6 +141,12 @@ impl LayeredAccess for AsyncBacktraceAccessor { .blocking_list(path, args) .map(|(rp, r)| (rp, AsyncBacktraceWrapper::new(r))) } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner + .blocking_delete() + .map(|(rp, r)| (rp, AsyncBacktraceWrapper::new(r))) + } } pub struct AsyncBacktraceWrapper { @@ -173,13 +179,13 @@ impl oio::Write for AsyncBacktraceWrapper { } #[async_backtrace::framed] - async fn abort(&mut self) -> Result<()> { - self.inner.abort().await + async fn close(&mut self) -> Result<()> { + self.inner.close().await } #[async_backtrace::framed] - async fn close(&mut self) -> Result<()> { - self.inner.close().await + async fn abort(&mut self) -> Result<()> { + self.inner.abort().await } } @@ -205,3 +211,24 @@ impl oio::BlockingList for AsyncBacktraceWrapper { self.inner.next() } } + +impl oio::Delete for AsyncBacktraceWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + #[async_backtrace::framed] + async fn flush(&mut self) -> Result { + self.inner.flush().await + } +} + +impl oio::BlockingDelete for AsyncBacktraceWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + self.inner.flush() + } +} diff --git a/core/src/layers/await_tree.rs b/core/src/layers/await_tree.rs index 3c1b17d64c01..1cf988af6ce7 100644 --- a/core/src/layers/await_tree.rs +++ b/core/src/layers/await_tree.rs @@ -76,6 +76,8 @@ impl LayeredAccess for AwaitTreeAccessor { type BlockingWriter = AwaitTreeWrapper; type Lister = AwaitTreeWrapper; type BlockingLister = AwaitTreeWrapper; + type Deleter = AwaitTreeWrapper; + type BlockingDeleter = AwaitTreeWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -118,11 +120,12 @@ impl LayeredAccess for AwaitTreeAccessor { .await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { self.inner - .delete(path, args) + .delete() .instrument_await(format!("opendal::{}", Operation::Delete)) .await + .map(|(rp, r)| (rp, AwaitTreeWrapper::new(r))) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -140,13 +143,6 @@ impl LayeredAccess for AwaitTreeAccessor { .await } - async fn batch(&self, args: OpBatch) -> Result { - self.inner - .batch(args) - .instrument_await(format!("opendal::{}", Operation::Batch)) - .await - } - fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { self.inner .blocking_read(path, args) @@ -164,6 +160,12 @@ impl LayeredAccess for AwaitTreeAccessor { .blocking_list(path, args) .map(|(rp, r)| (rp, AwaitTreeWrapper::new(r))) } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner + .blocking_delete() + .map(|(rp, r)| (rp, AwaitTreeWrapper::new(r))) + } } pub struct AwaitTreeWrapper { @@ -235,3 +237,26 @@ impl oio::BlockingList for AwaitTreeWrapper { self.inner.next() } } + +impl oio::Delete for AwaitTreeWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + async fn flush(&mut self) -> Result { + self.inner + .flush() + .instrument_await(format!("opendal::{}", Operation::DeleterFlush)) + .await + } +} + +impl oio::BlockingDelete for AwaitTreeWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + self.inner.flush() + } +} diff --git a/core/src/layers/blocking.rs b/core/src/layers/blocking.rs index 6972849799d5..fc97a33703af 100644 --- a/core/src/layers/blocking.rs +++ b/core/src/layers/blocking.rs @@ -168,6 +168,8 @@ impl LayeredAccess for BlockingAccessor { type BlockingWriter = BlockingWrapper; type Lister = A::Lister; type BlockingLister = BlockingWrapper; + type Deleter = A::Deleter; + type BlockingDeleter = BlockingWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -203,8 +205,8 @@ impl LayeredAccess for BlockingAccessor { self.inner.stat(path, args).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.delete(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -215,10 +217,6 @@ impl LayeredAccess for BlockingAccessor { self.inner.presign(path, args).await } - async fn batch(&self, args: OpBatch) -> Result { - self.inner.batch(args).await - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.handle.block_on(self.inner.create_dir(path, args)) } @@ -252,8 +250,12 @@ impl LayeredAccess for BlockingAccessor { self.handle.block_on(self.inner.stat(path, args)) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.handle.block_on(self.inner.delete(path, args)) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.handle.block_on(async { + let (rp, writer) = self.inner.delete().await?; + let blocking_deleter = Self::BlockingDeleter::new(self.handle.clone(), writer); + Ok((rp, blocking_deleter)) + }) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -298,6 +300,16 @@ impl oio::BlockingList for BlockingWrapper { } } +impl oio::BlockingDelete for BlockingWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + self.handle.block_on(self.inner.flush()) + } +} + #[cfg(test)] mod tests { use once_cell::sync::Lazy; diff --git a/core/src/layers/capability_check.rs b/core/src/layers/capability_check.rs index 0066e031b3f1..7a5064bc6b27 100644 --- a/core/src/layers/capability_check.rs +++ b/core/src/layers/capability_check.rs @@ -82,11 +82,13 @@ impl Debug for CapabilityAccessor { impl LayeredAccess for CapabilityAccessor { type Inner = A; type Reader = A::Reader; - type BlockingReader = A::BlockingReader; type Writer = A::Writer; - type BlockingWriter = A::BlockingWriter; type Lister = A::Lister; + type Deleter = A::Deleter; + type BlockingReader = A::BlockingReader; + type BlockingWriter = A::BlockingWriter; type BlockingLister = A::BlockingLister; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -123,6 +125,10 @@ impl LayeredAccess for CapabilityAccessor { self.inner.write(path, args).await } + async fn delete(&self) -> crate::Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await + } + async fn list(&self, path: &str, args: OpList) -> crate::Result<(RpList, Self::Lister)> { let capability = self.info.full_capability(); if !capability.list_with_version && args.version() { @@ -175,6 +181,10 @@ impl LayeredAccess for CapabilityAccessor { self.inner.blocking_write(path, args) } + fn blocking_delete(&self) -> crate::Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() + } + fn blocking_list( &self, path: &str, @@ -207,9 +217,11 @@ mod tests { type Reader = oio::Reader; type Writer = oio::Writer; type Lister = oio::Lister; + type Deleter = oio::Deleter; type BlockingReader = oio::BlockingReader; type BlockingWriter = oio::BlockingWriter; type BlockingLister = oio::BlockingLister; + type BlockingDeleter = oio::BlockingDeleter; fn info(&self) -> Arc { let mut info = AccessorInfo::default(); diff --git a/core/src/layers/chaos.rs b/core/src/layers/chaos.rs index 2432d21e08e5..342e56857b92 100644 --- a/core/src/layers/chaos.rs +++ b/core/src/layers/chaos.rs @@ -105,6 +105,8 @@ impl LayeredAccess for ChaosAccessor { type BlockingWriter = A::BlockingWriter; type Lister = A::Lister; type BlockingLister = A::BlockingLister; + type Deleter = A::Deleter; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -138,6 +140,14 @@ impl LayeredAccess for ChaosAccessor { fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.inner.blocking_list(path, args) } + + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await + } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() + } } /// ChaosReader will inject error into read operations. diff --git a/core/src/layers/complete.rs b/core/src/layers/complete.rs index 31a3fb03e273..cdabe3fcc035 100644 --- a/core/src/layers/complete.rs +++ b/core/src/layers/complete.rs @@ -336,6 +336,8 @@ impl LayeredAccess for CompleteAccessor { type BlockingWriter = CompleteWriter; type Lister = CompleteLister; type BlockingLister = CompleteLister; + type Deleter = A::Deleter; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -373,10 +375,18 @@ impl LayeredAccess for CompleteAccessor { self.complete_stat(path, args).await } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner().delete().await + } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { self.complete_list(path, args).await } + async fn presign(&self, path: &str, args: OpPresign) -> Result { + self.inner.presign(path, args).await + } + fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.complete_blocking_create_dir(path, args) } @@ -398,6 +408,10 @@ impl LayeredAccess for CompleteAccessor { self.complete_blocking_stat(path, args) } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner().blocking_delete() + } + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.complete_blocking_list(path, args) } diff --git a/core/src/layers/concurrent_limit.rs b/core/src/layers/concurrent_limit.rs index bd17666442b3..2fca982aacd9 100644 --- a/core/src/layers/concurrent_limit.rs +++ b/core/src/layers/concurrent_limit.rs @@ -84,6 +84,8 @@ impl LayeredAccess for ConcurrentLimitAccessor { type BlockingWriter = ConcurrentLimitWrapper; type Lister = ConcurrentLimitWrapper; type BlockingLister = ConcurrentLimitWrapper; + type Deleter = ConcurrentLimitWrapper; + type BlockingDeleter = ConcurrentLimitWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -137,14 +139,18 @@ impl LayeredAccess for ConcurrentLimitAccessor { self.inner.stat(path, args).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let _permit = self + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + let permit = self .semaphore - .acquire() + .clone() + .acquire_owned() .await .expect("semaphore must be valid"); - self.inner.delete(path, args).await + self.inner + .delete() + .await + .map(|(rp, w)| (rp, ConcurrentLimitWrapper::new(w, permit))) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -161,16 +167,6 @@ impl LayeredAccess for ConcurrentLimitAccessor { .map(|(rp, s)| (rp, ConcurrentLimitWrapper::new(s, permit))) } - async fn batch(&self, args: OpBatch) -> Result { - let _permit = self - .semaphore - .acquire() - .await - .expect("semaphore must be valid"); - - self.inner.batch(args).await - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { let _permit = self .semaphore @@ -213,13 +209,16 @@ impl LayeredAccess for ConcurrentLimitAccessor { self.inner.blocking_stat(path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let _permit = self + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + let permit = self .semaphore - .try_acquire() + .clone() + .try_acquire_owned() .expect("semaphore must be valid"); - self.inner.blocking_delete(path, args) + self.inner + .blocking_delete() + .map(|(rp, w)| (rp, ConcurrentLimitWrapper::new(w, permit))) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -298,3 +297,23 @@ impl oio::BlockingList for ConcurrentLimitWrapper { self.inner.next() } } + +impl oio::Delete for ConcurrentLimitWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + async fn flush(&mut self) -> Result { + self.inner.flush().await + } +} + +impl oio::BlockingDelete for ConcurrentLimitWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + self.inner.flush() + } +} diff --git a/core/src/layers/correctness_check.rs b/core/src/layers/correctness_check.rs index a2368d54d154..b3a08f8970fe 100644 --- a/core/src/layers/correctness_check.rs +++ b/core/src/layers/correctness_check.rs @@ -16,6 +16,7 @@ // under the License. use std::fmt::{Debug, Formatter}; +use std::future::Future; use std::sync::Arc; use crate::raw::*; @@ -76,11 +77,13 @@ impl Debug for CorrectnessAccessor { impl LayeredAccess for CorrectnessAccessor { type Inner = A; type Reader = A::Reader; - type BlockingReader = A::BlockingReader; type Writer = A::Writer; - type BlockingWriter = A::BlockingWriter; type Lister = A::Lister; + type Deleter = CheckWrapper; + type BlockingReader = A::BlockingReader; + type BlockingWriter = A::BlockingWriter; type BlockingLister = A::BlockingLister; + type BlockingDeleter = CheckWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -143,17 +146,11 @@ impl LayeredAccess for CorrectnessAccessor { self.inner.stat(path, args).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let capability = self.info.full_capability(); - if !capability.delete_with_version && args.version().is_some() { - return Err(new_unsupported_error( - self.info.as_ref(), - Operation::Delete, - "version", - )); - } - - self.inner.delete(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await.map(|(rp, deleter)| { + let deleter = CheckWrapper::new(deleter, self.info.clone()); + (rp, deleter) + }) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -213,21 +210,60 @@ impl LayeredAccess for CorrectnessAccessor { self.inner.blocking_stat(path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let capability = self.info.full_capability(); - if !capability.delete_with_version && args.version().is_some() { + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete().map(|(rp, deleter)| { + let deleter = CheckWrapper::new(deleter, self.info.clone()); + (rp, deleter) + }) + } + + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { + self.inner.blocking_list(path, args) + } +} + +pub struct CheckWrapper { + info: Arc, + inner: T, +} + +impl CheckWrapper { + fn new(inner: T, info: Arc) -> Self { + Self { inner, info } + } + + fn check_delete(&self, args: &OpDelete) -> Result<()> { + if args.version().is_some() && !self.info.full_capability().delete_with_version { return Err(new_unsupported_error( - self.info.as_ref(), - Operation::BlockingDelete, + &self.info, + Operation::DeleterDelete, "version", )); } - self.inner().blocking_delete(path, args) + Ok(()) } +} - fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { - self.inner.blocking_list(path, args) +impl oio::Delete for CheckWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.check_delete(&args)?; + self.inner.delete(path, args) + } + + fn flush(&mut self) -> impl Future> + MaybeSend { + self.inner.flush() + } +} + +impl oio::BlockingDelete for CheckWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.check_delete(&args)?; + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + self.inner.flush() } } @@ -246,9 +282,11 @@ mod tests { type Reader = oio::Reader; type Writer = oio::Writer; type Lister = oio::Lister; + type Deleter = oio::Deleter; type BlockingReader = oio::BlockingReader; type BlockingWriter = oio::BlockingWriter; type BlockingLister = oio::BlockingLister; + type BlockingDeleter = oio::BlockingDeleter; fn info(&self) -> Arc { let mut info = AccessorInfo::default(); @@ -269,12 +307,24 @@ mod tests { Ok((RpWrite::new(), Box::new(()))) } - async fn delete(&self, _: &str, _: OpDelete) -> Result { - Ok(RpDelete {}) + async fn list(&self, _: &str, _: OpList) -> Result<(RpList, Self::Lister)> { + Ok((RpList::default(), Box::new(()))) } - async fn list(&self, _: &str, _: OpList) -> Result<(RpList, Self::Lister)> { - Ok((RpList {}, Box::new(()))) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok((RpDelete::default(), Box::new(MockDeleter))) + } + } + + struct MockDeleter; + + impl oio::Delete for MockDeleter { + fn delete(&mut self, _: &str, _: OpDelete) -> Result<()> { + Ok(()) + } + + async fn flush(&mut self) -> Result { + Ok(1) } } diff --git a/core/src/layers/dtrace.rs b/core/src/layers/dtrace.rs index 7cfa8ac55dd8..b3c181ce1e04 100644 --- a/core/src/layers/dtrace.rs +++ b/core/src/layers/dtrace.rs @@ -68,34 +68,21 @@ use crate::*; /// 1. reader_read_start, arguments: path /// 2. reader_read_ok, arguments: path, length /// 3. reader_read_error, arguments: path -/// 4. reader_seek_start, arguments: path -/// 5. reader_seek_ok, arguments: path, offset -/// 6. reader_seek_error, arguments: path -/// 7. reader_next_start, arguments: path -/// 8. reader_next_ok, arguments: path, length -/// 9. reader_next_error, arguments: path -/// 10. reader_next_end, arguments: path /// /// ### For BlockingReader /// /// 1. blocking_reader_read_start, arguments: path /// 2. blocking_reader_read_ok, arguments: path, length /// 3. blocking_reader_read_error, arguments: path -/// 4. blocking_reader_seek_start, arguments: path -/// 5. blocking_reader_seek_ok, arguments: path, offset -/// 6. blocking_reader_seek_error, arguments: path -/// 7. blocking_reader_next_start, arguments: path -/// 8. blocking_reader_next_ok, arguments: path, length -/// 9. blocking_reader_next_error, arguments: path /// /// ### For Writer /// /// 1. writer_write_start, arguments: path /// 2. writer_write_ok, arguments: path, length /// 3. writer_write_error, arguments: path -/// 4. writer_poll_abort_start, arguments: path -/// 5. writer_poll_abort_ok, arguments: path -/// 6. writer_poll_abort_error, arguments: path +/// 4. writer_abort_start, arguments: path +/// 5. writer_abort_ok, arguments: path +/// 6. writer_abort_error, arguments: path /// 7. writer_close_start, arguments: path /// 8. writer_close_ok, arguments: path /// 9. writer_close_error, arguments: path @@ -192,6 +179,8 @@ impl LayeredAccess for DTraceAccessor { type BlockingWriter = DtraceLayerWrapper; type Lister = A::Lister; type BlockingLister = A::BlockingLister; + type Deleter = A::Deleter; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -238,12 +227,8 @@ impl LayeredAccess for DTraceAccessor { result } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let c_path = CString::new(path).unwrap(); - probe_lazy!(opendal, delete_start, c_path.as_ptr()); - let result = self.inner.delete(path, args).await; - probe_lazy!(opendal, delete_end, c_path.as_ptr()); - result + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -254,10 +239,6 @@ impl LayeredAccess for DTraceAccessor { result } - async fn batch(&self, args: OpBatch) -> Result { - self.inner.batch(args).await - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { let c_path = CString::new(path).unwrap(); probe_lazy!(opendal, presign_start, c_path.as_ptr()); @@ -304,12 +285,8 @@ impl LayeredAccess for DTraceAccessor { result } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let c_path = CString::new(path).unwrap(); - probe_lazy!(opendal, blocking_delete_start, c_path.as_ptr()); - let result = self.inner.blocking_delete(path, args); - probe_lazy!(opendal, blocking_delete_end, c_path.as_ptr()); - result + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { diff --git a/core/src/layers/error_context.rs b/core/src/layers/error_context.rs index a160a63772ec..305fc82d35f3 100644 --- a/core/src/layers/error_context.rs +++ b/core/src/layers/error_context.rs @@ -39,6 +39,7 @@ use crate::*; /// - `size`: The size of the current write operation. /// - `written`: The already written size in given writer. /// - `listed`: The already listed size in given lister. +/// - `deleted`: The already deleted size in given deleter. pub struct ErrorContextLayer; impl Layer for ErrorContextLayer { @@ -70,6 +71,8 @@ impl LayeredAccess for ErrorContextAccessor { type BlockingWriter = ErrorContextWrapper; type Lister = ErrorContextWrapper; type BlockingLister = ErrorContextWrapper; + type Deleter = ErrorContextWrapper; + type BlockingDeleter = ErrorContextWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -150,54 +153,36 @@ impl LayeredAccess for ErrorContextAccessor { }) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.delete(path, args).await.map_err(|err| { - err.with_operation(Operation::Delete) - .with_context("service", self.info.scheme()) - .with_context("path", path) - }) - } - - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { self.inner - .list(path, args) + .delete() .await - .map(|(rp, p)| { + .map(|(rp, w)| { ( rp, - ErrorContextWrapper::new(self.info.scheme(), path.to_string(), p), + ErrorContextWrapper::new(self.info.scheme(), "".to_string(), w), ) }) .map_err(|err| { - err.with_operation(Operation::List) + err.with_operation(Operation::Delete) .with_context("service", self.info.scheme()) - .with_context("path", path) }) } - async fn batch(&self, args: OpBatch) -> Result { + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { self.inner - .batch(args) + .list(path, args) .await - .map(|v| { - let res = v - .into_results() - .into_iter() - .map(|(path, res)| { - let res = res.map_err(|err| { - err.with_operation(Operation::Delete) - .with_context("service", self.info.scheme()) - .with_context("path", &path) - }); - (path, res) - }) - .collect(); - - RpBatch::new(res) + .map(|(rp, p)| { + ( + rp, + ErrorContextWrapper::new(self.info.scheme(), path.to_string(), p), + ) }) .map_err(|err| { - err.with_operation(Operation::Batch) + err.with_operation(Operation::List) .with_context("service", self.info.scheme()) + .with_context("path", path) }) } @@ -278,12 +263,19 @@ impl LayeredAccess for ErrorContextAccessor { }) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.blocking_delete(path, args).map_err(|err| { - err.with_operation(Operation::BlockingDelete) - .with_context("service", self.info.scheme()) - .with_context("path", path) - }) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner + .blocking_delete() + .map(|(rp, w)| { + ( + rp, + ErrorContextWrapper::new(self.info.scheme(), "".to_string(), w), + ) + }) + .map_err(|err| { + err.with_operation(Operation::BlockingDelete) + .with_context("service", self.info.scheme()) + }) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -463,3 +455,54 @@ impl oio::BlockingList for ErrorContextWrapper { }) } } + +impl oio::Delete for ErrorContextWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args).map_err(|err| { + err.with_operation(Operation::DeleterDelete) + .with_context("service", self.scheme) + .with_context("path", path) + .with_context("deleted", self.processed.to_string()) + }) + } + + async fn flush(&mut self) -> Result { + self.inner + .flush() + .await + .map(|n| { + self.processed += n as u64; + n + }) + .map_err(|err| { + err.with_operation(Operation::DeleterFlush) + .with_context("service", self.scheme) + .with_context("deleted", self.processed.to_string()) + }) + } +} + +impl oio::BlockingDelete for ErrorContextWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args).map_err(|err| { + err.with_operation(Operation::DeleterDelete) + .with_context("service", self.scheme) + .with_context("path", path) + .with_context("deleted", self.processed.to_string()) + }) + } + + fn flush(&mut self) -> Result { + self.inner + .flush() + .map(|n| { + self.processed += n as u64; + n + }) + .map_err(|err| { + err.with_operation(Operation::DeleterFlush) + .with_context("service", self.scheme) + .with_context("deleted", self.processed.to_string()) + }) + } +} diff --git a/core/src/layers/fastrace.rs b/core/src/layers/fastrace.rs index 51153d616cc7..56470c0f87d9 100644 --- a/core/src/layers/fastrace.rs +++ b/core/src/layers/fastrace.rs @@ -126,6 +126,8 @@ impl LayeredAccess for FastraceAccessor { type BlockingWriter = FastraceWrapper; type Lister = FastraceWrapper; type BlockingLister = FastraceWrapper; + type Deleter = FastraceWrapper; + type BlockingDeleter = FastraceWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -136,7 +138,7 @@ impl LayeredAccess for FastraceAccessor { self.inner.info() } - #[trace(name = "create", enter_on_poll = true)] + #[trace(enter_on_poll = true)] async fn create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.inner.create_dir(path, args).await } @@ -146,7 +148,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.read(path, args).await.map(|(rp, r)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("ReadOperation"), r), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::Read.into_static()), + r, + ), ) }) } @@ -156,7 +161,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.write(path, args).await.map(|(rp, r)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("WriteOperation"), r), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::Write.into_static()), + r, + ), ) }) } @@ -177,8 +185,16 @@ impl LayeredAccess for FastraceAccessor { } #[trace(enter_on_poll = true)] - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.delete(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await.map(|(rp, r)| { + ( + rp, + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::Delete.into_static()), + r, + ), + ) + }) } #[trace(enter_on_poll = true)] @@ -186,7 +202,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.list(path, args).await.map(|(rp, s)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("ListOperation"), s), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::List.into_static()), + s, + ), ) }) } @@ -196,11 +215,6 @@ impl LayeredAccess for FastraceAccessor { self.inner.presign(path, args).await } - #[trace(enter_on_poll = true)] - async fn batch(&self, args: OpBatch) -> Result { - self.inner.batch(args).await - } - #[trace] fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.inner.blocking_create_dir(path, args) @@ -211,7 +225,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.blocking_read(path, args).map(|(rp, r)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("ReadOperation"), r), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::BlockingRead.into_static()), + r, + ), ) }) } @@ -221,7 +238,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.blocking_write(path, args).map(|(rp, r)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("WriteOperation"), r), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::BlockingWrite.into_static()), + r, + ), ) }) } @@ -242,8 +262,16 @@ impl LayeredAccess for FastraceAccessor { } #[trace] - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.blocking_delete(path, args) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete().map(|(rp, r)| { + ( + rp, + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::BlockingDelete.into_static()), + r, + ), + ) + }) } #[trace] @@ -251,7 +279,10 @@ impl LayeredAccess for FastraceAccessor { self.inner.blocking_list(path, args).map(|(rp, it)| { ( rp, - FastraceWrapper::new(Span::enter_with_local_parent("PageOperation"), it), + FastraceWrapper::new( + Span::enter_with_local_parent(Operation::BlockingList.into_static()), + it, + ), ) }) } @@ -333,3 +364,32 @@ impl oio::BlockingList for FastraceWrapper { self.inner.next() } } + +impl oio::Delete for FastraceWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let _g = self.span.set_local_parent(); + let _span = LocalSpan::enter_with_local_parent(Operation::DeleterDelete.into_static()); + self.inner.delete(path, args) + } + + #[trace(enter_on_poll = true)] + async fn flush(&mut self) -> Result { + self.inner.flush().await + } +} + +impl oio::BlockingDelete for FastraceWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let _g = self.span.set_local_parent(); + let _span = + LocalSpan::enter_with_local_parent(Operation::BlockingDeleterDelete.into_static()); + self.inner.delete(path, args) + } + + fn flush(&mut self) -> Result { + let _g = self.span.set_local_parent(); + let _span = + LocalSpan::enter_with_local_parent(Operation::BlockingDeleterFlush.into_static()); + self.inner.flush() + } +} diff --git a/core/src/layers/immutable_index.rs b/core/src/layers/immutable_index.rs index 4a54adb81658..5a41019d431c 100644 --- a/core/src/layers/immutable_index.rs +++ b/core/src/layers/immutable_index.rs @@ -138,11 +138,13 @@ impl ImmutableIndexAccessor { impl LayeredAccess for ImmutableIndexAccessor { type Inner = A; type Reader = A::Reader; - type BlockingReader = A::BlockingReader; type Writer = A::Writer; - type BlockingWriter = A::BlockingWriter; type Lister = ImmutableDir; + type Deleter = A::Deleter; + type BlockingReader = A::BlockingReader; + type BlockingWriter = A::BlockingWriter; type BlockingLister = ImmutableDir; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -163,6 +165,10 @@ impl LayeredAccess for ImmutableIndexAccessor { self.inner.read(path, args).await } + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + self.inner.write(path, args).await + } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { let mut path = path; if path == "/" { @@ -178,12 +184,12 @@ impl LayeredAccess for ImmutableIndexAccessor { Ok((RpList::default(), ImmutableDir::new(idx))) } - fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { - self.inner.blocking_read(path, args) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await } - async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - self.inner.write(path, args).await + fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { + self.inner.blocking_read(path, args) } fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { @@ -204,6 +210,10 @@ impl LayeredAccess for ImmutableIndexAccessor { Ok((RpList::default(), ImmutableDir::new(idx))) } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() + } } pub struct ImmutableDir { diff --git a/core/src/layers/logging.rs b/core/src/layers/logging.rs index 30ec40ebb689..f7723bc90290 100644 --- a/core/src/layers/logging.rs +++ b/core/src/layers/logging.rs @@ -261,6 +261,8 @@ impl LayeredAccess for LoggingAccessor { type BlockingWriter = LoggingWriter; type Lister = LoggingLister; type BlockingLister = LoggingLister; + type Deleter = LoggingDeleter; + type BlockingDeleter = LoggingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -486,36 +488,22 @@ impl LayeredAccess for LoggingAccessor { }) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.logger.log( - &self.info, - Operation::Delete, - &[("path", path)], - "started", - None, - ); + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.logger + .log(&self.info, Operation::Delete, &[], "started", None); self.inner - .delete(path, args.clone()) + .delete() .await - .map(|v| { - self.logger.log( - &self.info, - Operation::Delete, - &[("path", path)], - "finished", - None, - ); - v + .map(|(rp, d)| { + self.logger + .log(&self.info, Operation::Delete, &[], "finished", None); + let d = LoggingDeleter::new(self.info.clone(), self.logger.clone(), d); + (rp, d) }) .map_err(|err| { - self.logger.log( - &self.info, - Operation::Delete, - &[("path", path)], - "failed", - Some(&err), - ); + self.logger + .log(&self.info, Operation::Delete, &[], "failed", Some(&err)); err }) } @@ -589,47 +577,6 @@ impl LayeredAccess for LoggingAccessor { }) } - async fn batch(&self, args: OpBatch) -> Result { - let (op, count) = (args.operation()[0].1.operation(), args.operation().len()); - - self.logger.log( - &self.info, - Operation::Batch, - &[("op", op.into_static()), ("count", &count.to_string())], - "started", - None, - ); - - self.inner - .batch(args) - .await - .map(|v| { - self.logger.log( - &self.info, - Operation::Batch, - &[("op", op.into_static()), ("count", &count.to_string())], - &format!( - "finished: {}, succeed: {}, failed: {}", - v.results().len(), - v.results().iter().filter(|(_, v)| v.is_ok()).count(), - v.results().iter().filter(|(_, v)| v.is_err()).count(), - ), - None, - ); - v - }) - .map_err(|err| { - self.logger.log( - &self.info, - Operation::Batch, - &[("op", op.into_static()), ("count", &count.to_string())], - "failed", - Some(&err), - ); - err - }) - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.logger.log( &self.info, @@ -830,32 +777,23 @@ impl LayeredAccess for LoggingAccessor { }) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.logger.log( - &self.info, - Operation::BlockingDelete, - &[("path", path)], - "started", - None, - ); + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.logger + .log(&self.info, Operation::BlockingDelete, &[], "started", None); self.inner - .blocking_delete(path, args) - .map(|v| { - self.logger.log( - &self.info, - Operation::BlockingDelete, - &[("path", path)], - "finished", - None, - ); - v + .blocking_delete() + .map(|(rp, d)| { + self.logger + .log(&self.info, Operation::BlockingDelete, &[], "finished", None); + let d = LoggingDeleter::new(self.info.clone(), self.logger.clone(), d); + (rp, d) }) .map_err(|err| { self.logger.log( &self.info, Operation::BlockingDelete, - &[("path", path)], + &[], "failed", Some(&err), ); @@ -1351,3 +1289,223 @@ impl oio::BlockingList for LoggingL res } } + +pub struct LoggingDeleter { + info: Arc, + logger: I, + + queued: usize, + deleted: usize, + inner: D, +} + +impl LoggingDeleter { + fn new(info: Arc, logger: I, inner: D) -> Self { + Self { + info, + logger, + + queued: 0, + deleted: 0, + inner, + } + } +} + +impl oio::Delete for LoggingDeleter { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let version = args + .version() + .map(|v| v.to_string()) + .unwrap_or_else(|| "".to_string()); + + self.logger.log( + &self.info, + Operation::DeleterDelete, + &[("path", path), ("version", &version)], + "started", + None, + ); + + let res = self.inner.delete(path, args); + + match &res { + Ok(_) => { + self.queued += 1; + self.logger.log( + &self.info, + Operation::DeleterDelete, + &[ + ("path", path), + ("version", &version), + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "succeeded", + None, + ); + } + Err(err) => { + self.logger.log( + &self.info, + Operation::DeleterDelete, + &[ + ("path", path), + ("version", &version), + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "failed", + Some(err), + ); + } + }; + + res + } + + async fn flush(&mut self) -> Result { + self.logger.log( + &self.info, + Operation::DeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "started", + None, + ); + + let res = self.inner.flush().await; + + match &res { + Ok(flushed) => { + self.queued -= flushed; + self.deleted += flushed; + self.logger.log( + &self.info, + Operation::DeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "succeeded", + None, + ); + } + Err(err) => { + self.logger.log( + &self.info, + Operation::DeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "failed", + Some(err), + ); + } + }; + + res + } +} + +impl oio::BlockingDelete for LoggingDeleter { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let version = args + .version() + .map(|v| v.to_string()) + .unwrap_or_else(|| "".to_string()); + + self.logger.log( + &self.info, + Operation::BlockingDeleterDelete, + &[("path", path), ("version", &version)], + "started", + None, + ); + + let res = self.inner.delete(path, args); + + match &res { + Ok(_) => { + self.queued += 1; + self.logger.log( + &self.info, + Operation::BlockingDeleterDelete, + &[ + ("path", path), + ("version", &version), + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "succeeded", + None, + ); + } + Err(err) => { + self.logger.log( + &self.info, + Operation::BlockingDeleterDelete, + &[ + ("path", path), + ("version", &version), + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "failed", + Some(err), + ); + } + }; + + res + } + + fn flush(&mut self) -> Result { + self.logger.log( + &self.info, + Operation::BlockingDeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "started", + None, + ); + + let res = self.inner.flush(); + + match &res { + Ok(flushed) => { + self.queued -= flushed; + self.deleted += flushed; + self.logger.log( + &self.info, + Operation::BlockingDeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "succeeded", + None, + ); + } + Err(err) => { + self.logger.log( + &self.info, + Operation::BlockingDeleterFlush, + &[ + ("queued", &self.queued.to_string()), + ("deleted", &self.deleted.to_string()), + ], + "failed", + Some(err), + ); + } + }; + + res + } +} diff --git a/core/src/layers/mime_guess.rs b/core/src/layers/mime_guess.rs index b9fa1dc8ccf1..245aecf55ad5 100644 --- a/core/src/layers/mime_guess.rs +++ b/core/src/layers/mime_guess.rs @@ -106,27 +106,28 @@ fn rpstat_with_mime(path: &str, rp: RpStat) -> RpStat { impl LayeredAccess for MimeGuessAccessor { type Inner = A; type Reader = A::Reader; - type BlockingReader = A::BlockingReader; type Writer = A::Writer; - type BlockingWriter = A::BlockingWriter; type Lister = A::Lister; + type Deleter = A::Deleter; + type BlockingReader = A::BlockingReader; + type BlockingWriter = A::BlockingWriter; type BlockingLister = A::BlockingLister; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.0 } + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + self.inner().read(path, args).await + } + async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { self.inner() .write(path, opwrite_with_mime(path, args)) .await } - fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { - self.inner() - .blocking_write(path, opwrite_with_mime(path, args)) - } - async fn stat(&self, path: &str, args: OpStat) -> Result { self.inner() .stat(path, args) @@ -134,14 +135,8 @@ impl LayeredAccess for MimeGuessAccessor { .map(|rp| rpstat_with_mime(path, rp)) } - fn blocking_stat(&self, path: &str, args: OpStat) -> Result { - self.inner() - .blocking_stat(path, args) - .map(|rp| rpstat_with_mime(path, rp)) - } - - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { - self.inner().read(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner().delete().await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -152,6 +147,21 @@ impl LayeredAccess for MimeGuessAccessor { self.inner().blocking_read(path, args) } + fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { + self.inner() + .blocking_write(path, opwrite_with_mime(path, args)) + } + + fn blocking_stat(&self, path: &str, args: OpStat) -> Result { + self.inner() + .blocking_stat(path, args) + .map(|rp| rpstat_with_mime(path, rp)) + } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner().blocking_delete() + } + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.inner().blocking_list(path, args) } diff --git a/core/src/layers/observe/metrics.rs b/core/src/layers/observe/metrics.rs index 40d7ddf1bedb..a6562734743b 100644 --- a/core/src/layers/observe/metrics.rs +++ b/core/src/layers/observe/metrics.rs @@ -179,6 +179,8 @@ impl LayeredAccess for MetricsAccessor { type BlockingWriter = MetricsWrapper; type Lister = MetricsWrapper; type BlockingLister = MetricsWrapper; + type Deleter = MetricsWrapper; + type BlockingDeleter = MetricsWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -396,35 +398,48 @@ impl LayeredAccess for MetricsAccessor { }) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { let op = Operation::Delete; let start = Instant::now(); - self.inner() - .delete(path, args) + let (rp, writer) = self + .inner + .delete() .await .map(|v| { self.interceptor.observe_operation_duration_seconds( self.scheme, self.namespace.clone(), self.root.clone(), - path, + "", op, start.elapsed(), ); v }) - .map_err(move |err| { + .map_err(|err| { self.interceptor.observe_operation_errors_total( self.scheme, self.namespace.clone(), self.root.clone(), - path, + "", op, err.kind(), ); err - }) + })?; + + Ok(( + rp, + MetricsWrapper::new( + writer, + self.interceptor.clone(), + self.scheme, + self.namespace.clone(), + self.root.clone(), + "".to_string(), + ), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -471,37 +486,6 @@ impl LayeredAccess for MetricsAccessor { )) } - async fn batch(&self, args: OpBatch) -> Result { - let op = Operation::Batch; - - let start = Instant::now(); - self.inner() - .batch(args) - .await - .map(|v| { - self.interceptor.observe_operation_duration_seconds( - self.scheme, - self.namespace.clone(), - self.root.clone(), - "", - op, - start.elapsed(), - ); - v - }) - .map_err(move |err| { - self.interceptor.observe_operation_errors_total( - self.scheme, - self.namespace.clone(), - self.root.clone(), - "", - op, - err.kind(), - ); - err - }) - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { let op = Operation::Presign; @@ -739,34 +723,47 @@ impl LayeredAccess for MetricsAccessor { }) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { let op = Operation::BlockingDelete; let start = Instant::now(); - self.inner() - .blocking_delete(path, args) + let (rp, writer) = self + .inner + .blocking_delete() .map(|v| { self.interceptor.observe_operation_duration_seconds( self.scheme, self.namespace.clone(), self.root.clone(), - path, + "", op, start.elapsed(), ); v }) - .map_err(move |err| { + .map_err(|err| { self.interceptor.observe_operation_errors_total( self.scheme, self.namespace.clone(), self.root.clone(), - path, + "", op, err.kind(), ); err - }) + })?; + + Ok(( + rp, + MetricsWrapper::new( + writer, + self.interceptor.clone(), + self.scheme, + self.namespace.clone(), + self.root.clone(), + "".to_string(), + ), + )) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -1166,3 +1163,127 @@ impl oio::BlockingList for MetricsWra res } } + +impl oio::Delete for MetricsWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let op = Operation::DeleterDelete; + + let start = Instant::now(); + + let res = match self.inner.delete(path, args) { + Ok(entry) => Ok(entry), + Err(err) => { + self.interceptor.observe_operation_errors_total( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + err.kind(), + ); + Err(err) + } + }; + self.interceptor.observe_operation_duration_seconds( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + start.elapsed(), + ); + res + } + + async fn flush(&mut self) -> Result { + let op = Operation::DeleterFlush; + + let start = Instant::now(); + + let res = match self.inner.flush().await { + Ok(entry) => Ok(entry), + Err(err) => { + self.interceptor.observe_operation_errors_total( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + err.kind(), + ); + Err(err) + } + }; + self.interceptor.observe_operation_duration_seconds( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + start.elapsed(), + ); + res + } +} + +impl oio::BlockingDelete for MetricsWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + let op = Operation::BlockingDeleterDelete; + + let start = Instant::now(); + + let res = match self.inner.delete(path, args) { + Ok(entry) => Ok(entry), + Err(err) => { + self.interceptor.observe_operation_errors_total( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + err.kind(), + ); + Err(err) + } + }; + self.interceptor.observe_operation_duration_seconds( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + start.elapsed(), + ); + res + } + + fn flush(&mut self) -> Result { + let op = Operation::BlockingDeleterFlush; + + let start = Instant::now(); + + let res = match self.inner.flush() { + Ok(entry) => Ok(entry), + Err(err) => { + self.interceptor.observe_operation_errors_total( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + err.kind(), + ); + Err(err) + } + }; + self.interceptor.observe_operation_duration_seconds( + self.scheme, + self.namespace.clone(), + self.root.clone(), + &self.path, + op, + start.elapsed(), + ); + res + } +} diff --git a/core/src/layers/oteltrace.rs b/core/src/layers/oteltrace.rs index 58cb7e4dfd4a..bdbc80279d8e 100644 --- a/core/src/layers/oteltrace.rs +++ b/core/src/layers/oteltrace.rs @@ -72,6 +72,8 @@ impl LayeredAccess for OtelTraceAccessor { type BlockingWriter = OtelTraceWrapper; type Lister = OtelTraceWrapper; type BlockingLister = OtelTraceWrapper; + type Deleter = A::Deleter; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -142,13 +144,8 @@ impl LayeredAccess for OtelTraceAccessor { self.inner().stat(path, args).with_context(cx).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let tracer = global::tracer("opendal"); - let mut span = tracer.start("delete"); - span.set_attribute(KeyValue::new("path", path.to_string())); - span.set_attribute(KeyValue::new("args", format!("{:?}", args))); - let cx = TraceContext::current_with_span(span); - self.inner().delete(path, args).with_context(cx).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner().delete().await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -162,14 +159,6 @@ impl LayeredAccess for OtelTraceAccessor { .map(|(rp, s)| (rp, OtelTraceWrapper::new(span, s))) } - async fn batch(&self, args: OpBatch) -> Result { - let tracer = global::tracer("opendal"); - let mut span = tracer.start("batch"); - span.set_attribute(KeyValue::new("args", format!("{:?}", args))); - let cx = TraceContext::current_with_span(span); - self.inner().batch(args).with_context(cx).await - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { let tracer = global::tracer("opendal"); let mut span = tracer.start("presign"); @@ -241,14 +230,8 @@ impl LayeredAccess for OtelTraceAccessor { }) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let tracer = global::tracer("opendal"); - tracer.in_span("blocking_delete", |cx| { - let span = cx.span(); - span.set_attribute(KeyValue::new("path", path.to_string())); - span.set_attribute(KeyValue::new("args", format!("{:?}", args))); - self.inner().blocking_delete(path, args) - }) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner().blocking_delete() } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { diff --git a/core/src/layers/retry.rs b/core/src/layers/retry.rs index af1883ac845f..ee8b925b9cb0 100644 --- a/core/src/layers/retry.rs +++ b/core/src/layers/retry.rs @@ -299,6 +299,8 @@ impl LayeredAccess for RetryAccessor { type BlockingWriter = RetryWrapper; type Lister = RetryWrapper; type BlockingLister = RetryWrapper; + type Deleter = RetryWrapper; + type BlockingDeleter = RetryWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -346,12 +348,13 @@ impl LayeredAccess for RetryAccessor { .map_err(|e| e.set_persistent()) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - { || self.inner.delete(path, args.clone()) } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + { || self.inner.delete() } .retry(self.builder) .when(|e| e.is_temporary()) .notify(|err, dur| self.notify.intercept(err, dur)) .await + .map(|(rp, r)| (rp, RetryWrapper::new(r, self.notify.clone(), self.builder))) .map_err(|e| e.set_persistent()) } @@ -379,32 +382,10 @@ impl LayeredAccess for RetryAccessor { .when(|e| e.is_temporary()) .notify(|err, dur| self.notify.intercept(err, dur)) .await - .map(|(l, p)| { - let lister = RetryWrapper::new(p, self.notify.clone(), self.builder); - (l, lister) - }) + .map(|(rp, r)| (rp, RetryWrapper::new(r, self.notify.clone(), self.builder))) .map_err(|e| e.set_persistent()) } - async fn batch(&self, args: OpBatch) -> Result { - { - || async { - let rp = self.inner.batch(args.clone()).await?; - let mut nrp = Vec::with_capacity(rp.results().len()); - for (path, result) in rp.into_results() { - let result = result?; - nrp.push((path, Ok(result))) - } - Ok(RpBatch::new(nrp)) - } - } - .retry(self.builder) - .when(|e: &Error| e.is_temporary()) - .notify(|err, dur| self.notify.intercept(err, dur)) - .await - .map_err(|e| e.set_persistent()) - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { { || self.inner.blocking_create_dir(path, args.clone()) } .retry(self.builder) @@ -447,12 +428,13 @@ impl LayeredAccess for RetryAccessor { .map_err(|e| e.set_persistent()) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - { || self.inner.blocking_delete(path, args.clone()) } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + { || self.inner.blocking_delete() } .retry(self.builder) .when(|e| e.is_temporary()) .notify(|err, dur| self.notify.intercept(err, dur)) .call() + .map(|(rp, r)| (rp, RetryWrapper::new(r, self.notify.clone(), self.builder))) .map_err(|e| e.set_persistent()) } @@ -751,13 +733,73 @@ impl oio::BlockingList for RetryWrapp } } +impl oio::Delete for RetryWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + { || self.inner.as_mut().unwrap().delete(path, args.clone()) } + .retry(self.builder) + .when(|e| e.is_temporary()) + .notify(|err, dur| { + self.notify.intercept(err, dur); + }) + .call() + .map_err(|e| e.set_persistent()) + } + + async fn flush(&mut self) -> Result { + use backon::RetryableWithContext; + + let inner = self.take_inner()?; + + let (inner, res) = { + |mut p: P| async move { + let res = p.flush().await; + + (p, res) + } + } + .retry(self.builder) + .when(|e| e.is_temporary()) + .context(inner) + .notify(|err, dur| self.notify.intercept(err, dur)) + .await; + + self.inner = Some(inner); + res.map_err(|err| err.set_persistent()) + } +} + +impl oio::BlockingDelete for RetryWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + { || self.inner.as_mut().unwrap().delete(path, args.clone()) } + .retry(self.builder) + .when(|e| e.is_temporary()) + .notify(|err, dur| { + self.notify.intercept(err, dur); + }) + .call() + .map_err(|e| e.set_persistent()) + } + + fn flush(&mut self) -> Result { + { || self.inner.as_mut().unwrap().flush() } + .retry(self.builder) + .when(|e| e.is_temporary()) + .notify(|err, dur| { + self.notify.intercept(err, dur); + }) + .call() + .map_err(|e| e.set_persistent()) + } +} + #[cfg(test)] mod tests { + use std::mem; use std::sync::Arc; use std::sync::Mutex; use bytes::Bytes; - use futures::TryStreamExt; + use futures::{stream, TryStreamExt}; use tracing_subscriber::filter::LevelFilter; use super::*; @@ -788,9 +830,11 @@ mod tests { type Reader = MockReader; type Writer = MockWriter; type Lister = MockLister; + type Deleter = MockDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -798,10 +842,11 @@ mod tests { read: true, write: true, write_can_multi: true, + delete: true, + delete_max_size: Some(10), stat: true, list: true, list_with_recursive: true, - batch: true, ..Default::default() }); @@ -825,6 +870,16 @@ mod tests { )) } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + MockDeleter { + size: 0, + attempt: self.attempt.clone(), + }, + )) + } + async fn write(&self, _: &str, _: OpWrite) -> Result<(RpWrite, Self::Writer)> { Ok((RpWrite::new(), MockWriter {})) } @@ -833,63 +888,6 @@ mod tests { let lister = MockLister::default(); Ok((RpList::default(), lister)) } - - async fn batch(&self, op: OpBatch) -> Result { - let mut attempt = self.attempt.lock().unwrap(); - *attempt += 1; - - match *attempt { - 1 => Err( - Error::new(ErrorKind::Unexpected, "retryable_error from reader") - .set_temporary(), - ), - 2 => Ok(RpBatch::new( - op.into_operation() - .into_iter() - .map(|(s, _)| { - ( - s, - Err(Error::new( - ErrorKind::Unexpected, - "retryable_error from reader", - ) - .set_temporary()), - ) - }) - .collect(), - )), - 3 => Ok(RpBatch::new( - op.into_operation() - .into_iter() - .enumerate() - .map(|(i, (s, _))| { - ( - s, - match i { - 0 => Err(Error::new( - ErrorKind::Unexpected, - "retryable_error from reader", - ) - .set_temporary()), - _ => Ok(RpDelete {}.into()), - }, - ) - }) - .collect(), - )), - 4 => Err( - Error::new(ErrorKind::Unexpected, "retryable_error from reader") - .set_temporary(), - ), - 5 => Ok(RpBatch::new( - op.into_operation() - .into_iter() - .map(|(s, _)| (s, Ok(RpDelete {}.into()))) - .collect(), - )), - _ => unreachable!(), - } - } } #[derive(Debug, Clone, Default)] @@ -985,6 +983,48 @@ mod tests { } } + #[derive(Debug, Clone, Default)] + struct MockDeleter { + size: usize, + attempt: Arc>, + } + + impl oio::Delete for MockDeleter { + fn delete(&mut self, _: &str, _: OpDelete) -> Result<()> { + self.size += 1; + Ok(()) + } + + async fn flush(&mut self) -> Result { + let mut attempt = self.attempt.lock().unwrap(); + *attempt += 1; + + match *attempt { + 1 => Err( + Error::new(ErrorKind::Unexpected, "retryable_error from deleter") + .set_temporary(), + ), + 2 => { + self.size -= 1; + Ok(1) + } + 3 => Err( + Error::new(ErrorKind::Unexpected, "retryable_error from deleter") + .set_temporary(), + ), + 4 => Err( + Error::new(ErrorKind::Unexpected, "retryable_error from deleter") + .set_temporary(), + ), + 5 => { + let s = mem::take(&mut self.size); + Ok(s) + } + _ => unreachable!(), + } + } + } + #[tokio::test] async fn test_retry_read() { let _ = tracing_subscriber::fmt() @@ -1083,13 +1123,8 @@ mod tests { ) .finish(); - let paths = vec![ - "hello".into(), - "world".into(), - "test".into(), - "batch".into(), - ]; - op.remove(paths).await.expect("batch must succeed"); + let paths = vec!["hello", "world", "test", "batch"]; + op.delete_stream(stream::iter(paths)).await.unwrap(); assert_eq!(*builder.attempt.lock().unwrap(), 5); } } diff --git a/core/src/layers/throttle.rs b/core/src/layers/throttle.rs index 06a2f631320d..5b7154c92aa8 100644 --- a/core/src/layers/throttle.rs +++ b/core/src/layers/throttle.rs @@ -114,11 +114,13 @@ pub struct ThrottleAccessor { impl LayeredAccess for ThrottleAccessor { type Inner = A; type Reader = ThrottleWrapper; - type BlockingReader = ThrottleWrapper; type Writer = ThrottleWrapper; - type BlockingWriter = ThrottleWrapper; type Lister = A::Lister; + type Deleter = A::Deleter; + type BlockingReader = ThrottleWrapper; + type BlockingWriter = ThrottleWrapper; type BlockingLister = A::BlockingLister; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -142,6 +144,10 @@ impl LayeredAccess for ThrottleAccessor { .map(|(rp, w)| (rp, ThrottleWrapper::new(w, limiter))) } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner.delete().await + } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { self.inner.list(path, args).await } @@ -162,6 +168,10 @@ impl LayeredAccess for ThrottleAccessor { .map(|(rp, w)| (rp, ThrottleWrapper::new(w, limiter))) } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() + } + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.inner.blocking_list(path, args) } diff --git a/core/src/layers/timeout.rs b/core/src/layers/timeout.rs index 41f6e0ab5c35..af8e298145f0 100644 --- a/core/src/layers/timeout.rs +++ b/core/src/layers/timeout.rs @@ -220,6 +220,8 @@ impl LayeredAccess for TimeoutAccessor { type BlockingWriter = A::BlockingWriter; type Lister = TimeoutWrapper; type BlockingLister = A::BlockingLister; + type Deleter = TimeoutWrapper; + type BlockingDeleter = A::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -271,9 +273,10 @@ impl LayeredAccess for TimeoutAccessor { .await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.timeout(Operation::Delete, self.inner.delete(path, args)) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.timeout(Operation::Delete, self.inner.delete()) .await + .map(|(rp, r)| (rp, TimeoutWrapper::new(r, self.io_timeout))) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -282,10 +285,6 @@ impl LayeredAccess for TimeoutAccessor { .map(|(rp, r)| (rp, TimeoutWrapper::new(r, self.io_timeout))) } - async fn batch(&self, args: OpBatch) -> Result { - self.timeout(Operation::Batch, self.inner.batch(args)).await - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { self.timeout(Operation::Presign, self.inner.presign(path, args)) .await @@ -302,6 +301,10 @@ impl LayeredAccess for TimeoutAccessor { fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.inner.blocking_list(path, args) } + + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner.blocking_delete() + } } pub struct TimeoutExecutor { @@ -382,6 +385,17 @@ impl oio::List for TimeoutWrapper { } } +impl oio::Delete for TimeoutWrapper { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + async fn flush(&mut self) -> Result { + let fut = self.inner.flush(); + Self::io_timeout(self.timeout, Operation::DeleterFlush.into_static(), fut).await + } +} + #[cfg(test)] mod tests { use std::future::pending; @@ -408,6 +422,8 @@ mod tests { type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type Deleter = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -426,10 +442,10 @@ mod tests { } /// This function will never return. - async fn delete(&self, _: &str, _: OpDelete) -> Result { + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { sleep(Duration::from_secs(u64::MAX)).await; - Ok(RpDelete::default()) + Ok((RpDelete::default(), ())) } async fn list(&self, _: &str, _: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/layers/tracing.rs b/core/src/layers/tracing.rs index c59f8d5fc204..2a33c0c2a29e 100644 --- a/core/src/layers/tracing.rs +++ b/core/src/layers/tracing.rs @@ -154,11 +154,13 @@ pub struct TracingAccessor { impl LayeredAccess for TracingAccessor { type Inner = A; type Reader = TracingWrapper; - type BlockingReader = TracingWrapper; type Writer = TracingWrapper; - type BlockingWriter = TracingWrapper; type Lister = TracingWrapper; + type Deleter = TracingWrapper; + type BlockingReader = TracingWrapper; + type BlockingWriter = TracingWrapper; type BlockingLister = TracingWrapper; + type BlockingDeleter = TracingWrapper; fn inner(&self) -> &Self::Inner { &self.inner @@ -206,8 +208,11 @@ impl LayeredAccess for TracingAccessor { } #[tracing::instrument(level = "debug", skip(self))] - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.delete(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner + .delete() + .await + .map(|(rp, r)| (rp, TracingWrapper::new(Span::current(), r))) } #[tracing::instrument(level = "debug", skip(self))] @@ -223,11 +228,6 @@ impl LayeredAccess for TracingAccessor { self.inner.presign(path, args).await } - #[tracing::instrument(level = "debug", skip(self))] - async fn batch(&self, args: OpBatch) -> Result { - self.inner.batch(args).await - } - #[tracing::instrument(level = "debug", skip(self))] fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.inner.blocking_create_dir(path, args) @@ -263,8 +263,10 @@ impl LayeredAccess for TracingAccessor { } #[tracing::instrument(level = "debug", skip(self))] - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.inner.blocking_delete(path, args) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner + .blocking_delete() + .map(|(rp, r)| (rp, TracingWrapper::new(Span::current(), r))) } #[tracing::instrument(level = "debug", skip(self))] @@ -363,3 +365,27 @@ impl oio::BlockingList for TracingWrapper { self.inner.next() } } + +impl oio::Delete for TracingWrapper { + #[tracing::instrument(parent = &self.span, level = "debug", skip_all)] + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + #[tracing::instrument(parent = &self.span, level = "debug", skip_all)] + async fn flush(&mut self) -> Result { + self.inner.flush().await + } +} + +impl oio::BlockingDelete for TracingWrapper { + #[tracing::instrument(parent = &self.span, level = "debug", skip_all)] + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.inner.delete(path, args) + } + + #[tracing::instrument(parent = &self.span, level = "debug", skip_all)] + fn flush(&mut self) -> Result { + self.inner.flush() + } +} diff --git a/core/src/layers/type_eraser.rs b/core/src/layers/type_eraser.rs index 09e0a7fb19df..cfe161aaedc1 100644 --- a/core/src/layers/type_eraser.rs +++ b/core/src/layers/type_eraser.rs @@ -15,11 +15,10 @@ // specific language governing permissions and limitations // under the License. -use std::fmt::Debug; -use std::fmt::Formatter; - use crate::raw::*; use crate::*; +use std::fmt::Debug; +use std::fmt::Formatter; /// TypeEraseLayer will erase the types on internal accessor. /// @@ -53,11 +52,13 @@ impl Debug for TypeEraseAccessor { impl LayeredAccess for TypeEraseAccessor { type Inner = A; type Reader = oio::Reader; - type BlockingReader = oio::BlockingReader; type Writer = oio::Writer; - type BlockingWriter = oio::BlockingWriter; type Lister = oio::Lister; + type Deleter = oio::Deleter; + type BlockingReader = oio::BlockingReader; + type BlockingWriter = oio::BlockingWriter; type BlockingLister = oio::BlockingLister; + type BlockingDeleter = oio::BlockingDeleter; fn inner(&self) -> &Self::Inner { &self.inner @@ -77,6 +78,13 @@ impl LayeredAccess for TypeEraseAccessor { .map(|(rp, w)| (rp, Box::new(w) as oio::Writer)) } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.inner + .delete() + .await + .map(|(rp, p)| (rp, Box::new(p) as oio::Deleter)) + } + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { self.inner .list(path, args) @@ -96,6 +104,12 @@ impl LayeredAccess for TypeEraseAccessor { .map(|(rp, w)| (rp, Box::new(w) as oio::BlockingWriter)) } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.inner + .blocking_delete() + .map(|(rp, p)| (rp, Box::new(p) as oio::BlockingDeleter)) + } + fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { self.inner .blocking_list(path, args) diff --git a/core/src/raw/accessor.rs b/core/src/raw/accessor.rs index e9b6e1f6e22a..bc9c40a4a98c 100644 --- a/core/src/raw/accessor.rs +++ b/core/src/raw/accessor.rs @@ -61,6 +61,8 @@ pub trait Access: Send + Sync + Debug + Unpin + 'static { type Writer: oio::Write; /// Lister is the associated lister returned in `list` operation. type Lister: oio::List; + /// Deleter is the associated deleter returned in `delete` operation. + type Deleter: oio::Delete; /// BlockingReader is the associated reader returned `blocking_read` operation. type BlockingReader: oio::BlockingRead; @@ -68,6 +70,8 @@ pub trait Access: Send + Sync + Debug + Unpin + 'static { type BlockingWriter: oio::BlockingWrite; /// BlockingLister is the associated lister returned `blocking_list` operation. type BlockingLister: oio::BlockingList; + /// BlockingDeleter is the associated deleter returned `blocking_delete` operation. + type BlockingDeleter: oio::BlockingDelete; /// Invoke the `info` operation to get metadata of accessor. /// @@ -172,13 +176,7 @@ pub trait Access: Send + Sync + Debug + Unpin + 'static { /// /// - `delete` is an idempotent operation, it's safe to call `Delete` on the same path multiple times. /// - `delete` SHOULD return `Ok(())` if the path is deleted successfully or not exist. - fn delete( - &self, - path: &str, - args: OpDelete, - ) -> impl Future> + MaybeSend { - let (_, _) = (path, args); - + fn delete(&self) -> impl Future> + MaybeSend { ready(Err(Error::new( ErrorKind::Unsupported, "operation is not supported", @@ -266,18 +264,6 @@ pub trait Access: Send + Sync + Debug + Unpin + 'static { ))) } - /// Invoke the `batch` operations. - /// - /// Require [`Capability::batch`] - fn batch(&self, args: OpBatch) -> impl Future> + MaybeSend { - let _ = args; - - ready(Err(Error::new( - ErrorKind::Unsupported, - "operation is not supported", - ))) - } - /// Invoke the `blocking_create` operation on the specified path. /// /// This operation is the blocking version of [`Accessor::create_dir`] @@ -339,9 +325,7 @@ pub trait Access: Send + Sync + Debug + Unpin + 'static { /// This operation is the blocking version of [`Accessor::delete`] /// /// Require [`Capability::write`] and [`Capability::blocking`] - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - let (_, _) = (path, args); - + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { Err(Error::new( ErrorKind::Unsupported, "operation is not supported", @@ -421,8 +405,7 @@ pub trait AccessDyn: Send + Sync + Debug + Unpin { args: OpWrite, ) -> BoxedFuture<'a, Result<(RpWrite, oio::Writer)>>; /// Dyn version of [`Accessor::delete`] - fn delete_dyn<'a>(&'a self, path: &'a str, args: OpDelete) - -> BoxedFuture<'a, Result>; + fn delete_dyn(&self) -> BoxedFuture>; /// Dyn version of [`Accessor::list`] fn list_dyn<'a>( &'a self, @@ -449,8 +432,6 @@ pub trait AccessDyn: Send + Sync + Debug + Unpin { path: &'a str, args: OpPresign, ) -> BoxedFuture<'a, Result>; - /// Dyn version of [`Accessor::batch`] - fn batch_dyn(&self, args: OpBatch) -> BoxedFuture<'_, Result>; /// Dyn version of [`Accessor::blocking_create_dir`] fn blocking_create_dir_dyn(&self, path: &str, args: OpCreateDir) -> Result; /// Dyn version of [`Accessor::blocking_stat`] @@ -464,7 +445,7 @@ pub trait AccessDyn: Send + Sync + Debug + Unpin { args: OpWrite, ) -> Result<(RpWrite, oio::BlockingWriter)>; /// Dyn version of [`Accessor::blocking_delete`] - fn blocking_delete_dyn(&self, path: &str, args: OpDelete) -> Result; + fn blocking_delete_dyn(&self) -> Result<(RpDelete, oio::BlockingDeleter)>; /// Dyn version of [`Accessor::blocking_list`] fn blocking_list_dyn(&self, path: &str, args: OpList) -> Result<(RpList, oio::BlockingLister)>; /// Dyn version of [`Accessor::blocking_copy`] @@ -482,6 +463,8 @@ where BlockingWriter = oio::BlockingWriter, Lister = oio::Lister, BlockingLister = oio::BlockingLister, + Deleter = oio::Deleter, + BlockingDeleter = oio::BlockingDeleter, >, { fn info_dyn(&self) -> Arc { @@ -516,12 +499,8 @@ where Box::pin(self.write(path, args)) } - fn delete_dyn<'a>( - &'a self, - path: &'a str, - args: OpDelete, - ) -> BoxedFuture<'a, Result> { - Box::pin(self.delete(path, args)) + fn delete_dyn(&self) -> BoxedFuture> { + Box::pin(self.delete()) } fn list_dyn<'a>( @@ -558,10 +537,6 @@ where Box::pin(self.presign(path, args)) } - fn batch_dyn(&self, args: OpBatch) -> BoxedFuture<'_, Result> { - Box::pin(self.batch(args)) - } - fn blocking_create_dir_dyn(&self, path: &str, args: OpCreateDir) -> Result { self.blocking_create_dir(path, args) } @@ -582,8 +557,8 @@ where self.blocking_write(path, args) } - fn blocking_delete_dyn(&self, path: &str, args: OpDelete) -> Result { - self.blocking_delete(path, args) + fn blocking_delete_dyn(&self) -> Result<(RpDelete, oio::BlockingDeleter)> { + self.blocking_delete() } fn blocking_list_dyn(&self, path: &str, args: OpList) -> Result<(RpList, oio::BlockingLister)> { @@ -603,9 +578,11 @@ impl Access for dyn AccessDyn { type Reader = oio::Reader; type BlockingReader = oio::BlockingReader; type Writer = oio::Writer; + type Deleter = oio::Deleter; type BlockingWriter = oio::BlockingWriter; type Lister = oio::Lister; type BlockingLister = oio::BlockingLister; + type BlockingDeleter = oio::BlockingDeleter; fn info(&self) -> Arc { self.info_dyn() @@ -627,8 +604,8 @@ impl Access for dyn AccessDyn { self.write_dyn(path, args).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - self.delete_dyn(path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + self.delete_dyn().await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -647,10 +624,6 @@ impl Access for dyn AccessDyn { self.presign_dyn(path, args).await } - fn batch(&self, args: OpBatch) -> impl Future> + MaybeSend { - self.batch_dyn(args) - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.blocking_create_dir_dyn(path, args) } @@ -667,8 +640,8 @@ impl Access for dyn AccessDyn { self.blocking_write_dyn(path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.blocking_delete_dyn(path, args) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.blocking_delete_dyn() } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -689,9 +662,11 @@ impl Access for () { type Reader = (); type Writer = (); type Lister = (); + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { AccessorInfo { @@ -714,9 +689,11 @@ impl Access for Arc { type Reader = T::Reader; type Writer = T::Writer; type Lister = T::Lister; + type Deleter = T::Deleter; type BlockingReader = T::BlockingReader; type BlockingWriter = T::BlockingWriter; type BlockingLister = T::BlockingLister; + type BlockingDeleter = T::BlockingDeleter; fn info(&self) -> Arc { self.as_ref().info() @@ -750,12 +727,8 @@ impl Access for Arc { async move { self.as_ref().write(path, args).await } } - fn delete( - &self, - path: &str, - args: OpDelete, - ) -> impl Future> + MaybeSend { - async move { self.as_ref().delete(path, args).await } + fn delete(&self) -> impl Future> + MaybeSend { + async move { self.as_ref().delete().await } } fn list( @@ -792,10 +765,6 @@ impl Access for Arc { async move { self.as_ref().presign(path, args).await } } - fn batch(&self, args: OpBatch) -> impl Future> + MaybeSend { - async move { self.as_ref().batch(args).await } - } - fn blocking_create_dir(&self, path: &str, args: OpCreateDir) -> Result { self.as_ref().blocking_create_dir(path, args) } @@ -812,8 +781,8 @@ impl Access for Arc { self.as_ref().blocking_write(path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.as_ref().blocking_delete(path, args) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + self.as_ref().blocking_delete() } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { diff --git a/core/src/raw/adapters/kv/backend.rs b/core/src/raw/adapters/kv/backend.rs index a2f378a46bca..56146aef0a70 100644 --- a/core/src/raw/adapters/kv/backend.rs +++ b/core/src/raw/adapters/kv/backend.rs @@ -65,11 +65,13 @@ where impl Access for Backend { type Reader = Buffer; - type BlockingReader = Buffer; type Writer = KvWriter; - type BlockingWriter = KvWriter; type Lister = HierarchyLister>; + type Deleter = oio::OneShotDeleter>; + type BlockingReader = Buffer; + type BlockingWriter = KvWriter; type BlockingLister = HierarchyLister; + type BlockingDeleter = oio::OneShotDeleter>; fn info(&self) -> Arc { let kv_info = self.kv.info(); @@ -97,18 +99,25 @@ impl Access for Backend { am.into() } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + async fn stat(&self, path: &str, _: OpStat) -> Result { let p = build_abs_path(&self.root, path); - let bs = match self.kv.get(&p).await? { - Some(bs) => bs, - None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), - }; - Ok((RpRead::new(), bs.slice(args.range().to_range_as_usize()))) + + if p == build_abs_path(&self.root, "") { + Ok(RpStat::new(Metadata::new(EntryMode::DIR))) + } else { + let bs = self.kv.get(&p).await?; + match bs { + Some(bs) => Ok(RpStat::new( + Metadata::new(EntryMode::FILE).with_content_length(bs.len() as u64), + )), + None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), + } + } } - fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let p = build_abs_path(&self.root, path); - let bs = match self.kv.blocking_get(&p)? { + let bs = match self.kv.get(&p).await? { Some(bs) => bs, None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), }; @@ -121,26 +130,20 @@ impl Access for Backend { Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p))) } - fn blocking_write(&self, path: &str, _: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { - let p = build_abs_path(&self.root, path); - - Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p))) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(KvDeleter::new(self.kv.clone(), self.root.clone())), + )) } - async fn stat(&self, path: &str, _: OpStat) -> Result { + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { let p = build_abs_path(&self.root, path); + let res = self.kv.scan(&p).await?; + let lister = KvLister::new(&self.root, res); + let lister = HierarchyLister::new(lister, path, args.recursive()); - if p == build_abs_path(&self.root, "") { - Ok(RpStat::new(Metadata::new(EntryMode::DIR))) - } else { - let bs = self.kv.get(&p).await?; - match bs { - Some(bs) => Ok(RpStat::new( - Metadata::new(EntryMode::FILE).with_content_length(bs.len() as u64), - )), - None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), - } - } + Ok((RpList::default(), lister)) } fn blocking_stat(&self, path: &str, _: OpStat) -> Result { @@ -159,27 +162,26 @@ impl Access for Backend { } } - async fn delete(&self, path: &str, _: OpDelete) -> Result { + fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { let p = build_abs_path(&self.root, path); - - self.kv.delete(&p).await?; - Ok(RpDelete::default()) + let bs = match self.kv.blocking_get(&p)? { + Some(bs) => bs, + None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), + }; + Ok((RpRead::new(), bs.slice(args.range().to_range_as_usize()))) } - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { + fn blocking_write(&self, path: &str, _: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { let p = build_abs_path(&self.root, path); - self.kv.blocking_delete(&p)?; - Ok(RpDelete::default()) + Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p))) } - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let p = build_abs_path(&self.root, path); - let res = self.kv.scan(&p).await?; - let lister = KvLister::new(&self.root, res); - let lister = HierarchyLister::new(lister, path, args.recursive()); - - Ok((RpList::default(), lister)) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(KvDeleter::new(self.kv.clone(), self.root.clone())), + )) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -318,3 +320,32 @@ impl oio::BlockingWrite for KvWriter { Ok(()) } } + +pub struct KvDeleter { + kv: Arc, + root: String, +} + +impl KvDeleter { + fn new(kv: Arc, root: String) -> Self { + KvDeleter { kv, root } + } +} + +impl oio::OneShotDelete for KvDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_abs_path(&self.root, &path); + + self.kv.delete(&p).await?; + Ok(()) + } +} + +impl oio::BlockingOneShotDelete for KvDeleter { + fn blocking_delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_abs_path(&self.root, &path); + + self.kv.blocking_delete(&p)?; + Ok(()) + } +} diff --git a/core/src/raw/adapters/typed_kv/backend.rs b/core/src/raw/adapters/typed_kv/backend.rs index fca684772f5e..1005811c9443 100644 --- a/core/src/raw/adapters/typed_kv/backend.rs +++ b/core/src/raw/adapters/typed_kv/backend.rs @@ -53,11 +53,13 @@ where impl Access for Backend { type Reader = Buffer; - type BlockingReader = Buffer; type Writer = KvWriter; - type BlockingWriter = KvWriter; type Lister = HierarchyLister; + type Deleter = oio::OneShotDeleter>; + type BlockingReader = Buffer; + type BlockingWriter = KvWriter; type BlockingLister = HierarchyLister; + type BlockingDeleter = oio::OneShotDeleter>; fn info(&self) -> Arc { let kv_info = self.kv.info(); @@ -98,22 +100,24 @@ impl Access for Backend { am.into() } - async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { + async fn stat(&self, path: &str, _: OpStat) -> Result { let p = build_abs_path(&self.root, path); - let bs = match self.kv.get(&p).await? { - // TODO: we can reuse the metadata in value to build content range. - Some(bs) => bs.value, - None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), - }; - - Ok((RpRead::new(), bs.slice(args.range().to_range_as_usize()))) + if p == build_abs_path(&self.root, "") { + Ok(RpStat::new(Metadata::new(EntryMode::DIR))) + } else { + let bs = self.kv.get(&p).await?; + match bs { + Some(bs) => Ok(RpStat::new(bs.metadata)), + None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), + } + } } - fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { + async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let p = build_abs_path(&self.root, path); - let bs = match self.kv.blocking_get(&p)? { + let bs = match self.kv.get(&p).await? { // TODO: we can reuse the metadata in value to build content range. Some(bs) => bs.value, None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), @@ -128,24 +132,20 @@ impl Access for Backend { Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p, args))) } - fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { - let p = build_abs_path(&self.root, path); - - Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p, args))) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(KvDeleter::new(self.kv.clone(), self.root.clone())), + )) } - async fn stat(&self, path: &str, _: OpStat) -> Result { + async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { let p = build_abs_path(&self.root, path); + let res = self.kv.scan(&p).await?; + let lister = KvLister::new(&self.root, res); + let lister = HierarchyLister::new(lister, path, args.recursive()); - if p == build_abs_path(&self.root, "") { - Ok(RpStat::new(Metadata::new(EntryMode::DIR))) - } else { - let bs = self.kv.get(&p).await?; - match bs { - Some(bs) => Ok(RpStat::new(bs.metadata)), - None => Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), - } - } + Ok((RpList::default(), lister)) } fn blocking_stat(&self, path: &str, _: OpStat) -> Result { @@ -162,27 +162,29 @@ impl Access for Backend { } } - async fn delete(&self, path: &str, _: OpDelete) -> Result { + fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> { let p = build_abs_path(&self.root, path); - self.kv.delete(&p).await?; - Ok(RpDelete::default()) + let bs = match self.kv.blocking_get(&p)? { + // TODO: we can reuse the metadata in value to build content range. + Some(bs) => bs.value, + None => return Err(Error::new(ErrorKind::NotFound, "kv doesn't have this path")), + }; + + Ok((RpRead::new(), bs.slice(args.range().to_range_as_usize()))) } - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { + fn blocking_write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::BlockingWriter)> { let p = build_abs_path(&self.root, path); - self.kv.blocking_delete(&p)?; - Ok(RpDelete::default()) + Ok((RpWrite::new(), KvWriter::new(self.kv.clone(), p, args))) } - async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { - let p = build_abs_path(&self.root, path); - let res = self.kv.scan(&p).await?; - let lister = KvLister::new(&self.root, res); - let lister = HierarchyLister::new(lister, path, args.recursive()); - - Ok((RpList::default(), lister)) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(KvDeleter::new(self.kv.clone(), self.root.clone())), + )) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -331,3 +333,32 @@ impl oio::BlockingWrite for KvWriter { Ok(()) } } + +pub struct KvDeleter { + kv: Arc, + root: String, +} + +impl KvDeleter { + fn new(kv: Arc, root: String) -> Self { + KvDeleter { kv, root } + } +} + +impl oio::OneShotDelete for KvDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_abs_path(&self.root, &path); + + self.kv.delete(&p).await?; + Ok(()) + } +} + +impl oio::BlockingOneShotDelete for KvDeleter { + fn blocking_delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_abs_path(&self.root, &path); + + self.kv.blocking_delete(&p)?; + Ok(()) + } +} diff --git a/core/src/raw/http_util/header.rs b/core/src/raw/http_util/header.rs index 49dff81a0ff7..6c8ba65dbab9 100644 --- a/core/src/raw/http_util/header.rs +++ b/core/src/raw/http_util/header.rs @@ -110,6 +110,11 @@ pub fn parse_content_disposition(headers: &HeaderMap) -> Result> { parse_header_to_str(headers, CONTENT_DISPOSITION) } +/// Parse multipart boundary from header map. +pub fn parse_multipart_boundary(headers: &HeaderMap) -> Result> { + parse_header_to_str(headers, CONTENT_TYPE).map(|v| v.and_then(|v| v.split("boundary=").nth(1))) +} + /// Parse header value to string according to name. #[inline] pub fn parse_header_to_str(headers: &HeaderMap, name: K) -> Result> @@ -324,4 +329,24 @@ mod tests { assert_eq!(actual, expected) } } + + #[test] + fn test_parse_multipart_boundary() { + let cases = vec![ + ( + "multipart/mixed; boundary=gc0p4Jq0M2Yt08jU534c0p", + Some("gc0p4Jq0M2Yt08jU534c0p"), + ), + ("multipart/mixed", None), + ]; + + for (input, expected) in cases { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_str(input).unwrap()); + + let actual = parse_multipart_boundary(&headers).expect("parse must success"); + + assert_eq!(actual, expected) + } + } } diff --git a/core/src/raw/http_util/mod.rs b/core/src/raw/http_util/mod.rs index 8999b6b807f0..c90b1e485845 100644 --- a/core/src/raw/http_util/mod.rs +++ b/core/src/raw/http_util/mod.rs @@ -49,6 +49,7 @@ pub use header::parse_header_to_str; pub use header::parse_into_metadata; pub use header::parse_last_modified; pub use header::parse_location; +pub use header::parse_multipart_boundary; pub use header::parse_prefixed_headers; mod uri; diff --git a/core/src/raw/layer.rs b/core/src/raw/layer.rs index 6e6d2b802b45..f4922acfc4d1 100644 --- a/core/src/raw/layer.rs +++ b/core/src/raw/layer.rs @@ -62,6 +62,8 @@ use crate::*; /// type BlockingWriter = A::BlockingWriter; /// type Lister = A::Lister; /// type BlockingLister = A::BlockingLister; +/// type Deleter = A::Deleter; +/// type BlockingDeleter = A::BlockingDeleter; /// /// fn inner(&self) -> &Self::Inner { /// &self.inner @@ -102,6 +104,14 @@ use crate::*; /// ) -> Result<(RpList, Self::BlockingLister)> { /// self.inner.blocking_list(path, args) /// } +/// +/// async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { +/// self.inner.delete().await +/// } +/// +/// fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { +/// self.inner.blocking_delete() +/// } /// } /// /// /// The public struct that exposed to users. @@ -130,12 +140,15 @@ pub trait Layer { #[allow(missing_docs)] pub trait LayeredAccess: Send + Sync + Debug + Unpin + 'static { type Inner: Access; + type Reader: oio::Read; - type BlockingReader: oio::BlockingRead; type Writer: oio::Write; - type BlockingWriter: oio::BlockingWrite; type Lister: oio::List; + type Deleter: oio::Delete; + type BlockingReader: oio::BlockingRead; + type BlockingWriter: oio::BlockingWrite; type BlockingLister: oio::BlockingList; + type BlockingDeleter: oio::BlockingDelete; fn inner(&self) -> &Self::Inner; @@ -185,13 +198,7 @@ pub trait LayeredAccess: Send + Sync + Debug + Unpin + 'static { self.inner().stat(path, args) } - fn delete( - &self, - path: &str, - args: OpDelete, - ) -> impl Future> + MaybeSend { - self.inner().delete(path, args) - } + fn delete(&self) -> impl Future> + MaybeSend; fn list( &self, @@ -199,10 +206,6 @@ pub trait LayeredAccess: Send + Sync + Debug + Unpin + 'static { args: OpList, ) -> impl Future> + MaybeSend; - fn batch(&self, args: OpBatch) -> impl Future> + MaybeSend { - self.inner().batch(args) - } - fn presign( &self, path: &str, @@ -231,9 +234,7 @@ pub trait LayeredAccess: Send + Sync + Debug + Unpin + 'static { self.inner().blocking_stat(path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - self.inner().blocking_delete(path, args) - } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)>; fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)>; } @@ -242,9 +243,12 @@ impl Access for L { type Reader = L::Reader; type Writer = L::Writer; type Lister = L::Lister; + type Deleter = L::Deleter; + type BlockingReader = L::BlockingReader; type BlockingWriter = L::BlockingWriter; type BlockingLister = L::BlockingLister; + type BlockingDeleter = L::BlockingDeleter; fn info(&self) -> Arc { LayeredAccess::info(self) @@ -274,18 +278,14 @@ impl Access for L { LayeredAccess::stat(self, path, args).await } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - LayeredAccess::delete(self, path, args).await + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + LayeredAccess::delete(self).await } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { LayeredAccess::list(self, path, args).await } - async fn batch(&self, args: OpBatch) -> Result { - LayeredAccess::batch(self, args).await - } - async fn presign(&self, path: &str, args: OpPresign) -> Result { LayeredAccess::presign(self, path, args).await } @@ -314,8 +314,8 @@ impl Access for L { LayeredAccess::blocking_stat(self, path, args) } - fn blocking_delete(&self, path: &str, args: OpDelete) -> Result { - LayeredAccess::blocking_delete(self, path, args) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + LayeredAccess::blocking_delete(self) } fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingLister)> { @@ -336,7 +336,7 @@ mod tests { struct Test { #[allow(dead_code)] inner: Option, - deleted: Arc>, + stated: Arc>, } impl Layer for &Test { @@ -345,7 +345,7 @@ mod tests { fn layer(&self, inner: A) -> Self::LayeredAccess { Test { inner: Some(inner), - deleted: self.deleted.clone(), + stated: self.stated.clone(), } } } @@ -357,6 +357,8 @@ mod tests { type BlockingWriter = (); type Lister = (); type BlockingLister = (); + type Deleter = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -364,14 +366,14 @@ mod tests { am.into() } - async fn delete(&self, _: &str, _: OpDelete) -> Result { - let mut x = self.deleted.lock().await; + async fn stat(&self, _: &str, _: OpStat) -> Result { + let mut x = self.stated.lock().await; *x = true; assert!(self.inner.is_some()); // We will not call anything here to test the layer. - Ok(RpDelete::default()) + Ok(RpStat::new(Metadata::new(EntryMode::DIR))) } } @@ -379,7 +381,7 @@ mod tests { async fn test_layer() { let test = Test { inner: None, - deleted: Arc::new(Mutex::new(false)), + stated: Arc::new(Mutex::new(false)), }; let op = Operator::new(Memory::default()) @@ -387,8 +389,8 @@ mod tests { .layer(&test) .finish(); - op.delete("xxxxx").await.unwrap(); + op.stat("xxxxx").await.unwrap(); - assert!(*test.deleted.clone().lock().await); + assert!(*test.stated.clone().lock().await); } } diff --git a/core/src/raw/oio/delete/api.rs b/core/src/raw/oio/delete/api.rs new file mode 100644 index 000000000000..e0db7733b4b5 --- /dev/null +++ b/core/src/raw/oio/delete/api.rs @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::{BoxedFuture, MaybeSend, OpDelete}; +use crate::*; +use std::future::Future; +use std::ops::DerefMut; + +/// Deleter is a type erased [`Delete`] +pub type Deleter = Box; + +/// The Delete trait defines interfaces for performing deletion operations. +pub trait Delete: Unpin + Send + Sync { + /// Requests deletion of a resource at the specified path with optional arguments + /// + /// # Parameters + /// - `path`: The path of the resource to delete + /// - `args`: Additional arguments for the delete operation + /// + /// # Returns + /// - `Ok(())`: The deletion request has been successfully queued (does not guarantee actual deletion) + /// - `Err(err)`: An error occurred and the deletion request was not queued + /// + /// # Notes + /// This method just queue the delete request. The actual deletion will be + /// performed when `flush` is called. + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()>; + + /// Flushes the deletion queue to ensure queued deletions are executed + /// + /// # Returns + /// - `Ok(0)`: All queued deletions have been processed or the queue is empty. + /// - `Ok(count)`: The number of resources successfully deleted. Implementations should + /// return an error if the queue is non-empty but no resources were deleted + /// - `Err(err)`: An error occurred while performing the deletions + /// + /// # Notes + /// - This method is asynchronous and will wait for queued deletions to complete + fn flush(&mut self) -> impl Future> + MaybeSend; +} + +impl Delete for () { + fn delete(&mut self, _: &str, _: OpDelete) -> Result<()> { + Err(Error::new( + ErrorKind::Unsupported, + "output deleter doesn't support delete", + )) + } + + async fn flush(&mut self) -> Result { + Err(Error::new( + ErrorKind::Unsupported, + "output deleter doesn't support flush", + )) + } +} + +/// The dyn version of [`Delete`] +pub trait DeleteDyn: Unpin + Send + Sync { + /// The dyn version of [`Delete::delete`] + fn delete_dyn(&mut self, path: &str, args: OpDelete) -> Result<()>; + + /// The dyn version of [`Delete::flush`] + fn flush_dyn(&mut self) -> BoxedFuture>; +} + +impl DeleteDyn for T { + fn delete_dyn(&mut self, path: &str, args: OpDelete) -> Result<()> { + Delete::delete(self, path, args) + } + + fn flush_dyn(&mut self) -> BoxedFuture> { + Box::pin(self.flush()) + } +} + +impl Delete for Box { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.deref_mut().delete_dyn(path, args) + } + + async fn flush(&mut self) -> Result { + self.deref_mut().flush_dyn().await + } +} + +/// BlockingDeleter is a type erased [`BlockingDelete`] +pub type BlockingDeleter = Box; + +/// BlockingDelete is the trait to perform delete operations. +pub trait BlockingDelete: Send + Sync + 'static { + /// Delete given path with optional arguments. + /// + /// # Behavior + /// + /// - `Ok(())` means the path has been queued for deletion. + /// - `Err(err)` means error happens and no deletion has been queued. + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()>; + + /// Flushes the deletion queue to ensure queued deletions are executed + /// + /// # Returns + /// - `Ok(0)`: All queued deletions have been processed or the queue is empty. + /// - `Ok(count)`: The number of resources successfully deleted. Implementations should + /// return an error if the queue is non-empty but no resources were deleted + /// - `Err(err)`: An error occurred while performing the deletions + fn flush(&mut self) -> Result; +} + +impl BlockingDelete for () { + fn delete(&mut self, _: &str, _: OpDelete) -> Result<()> { + Err(Error::new( + ErrorKind::Unsupported, + "output deleter doesn't support delete", + )) + } + + fn flush(&mut self) -> Result { + Err(Error::new( + ErrorKind::Unsupported, + "output deleter doesn't support flush", + )) + } +} + +/// `Box` won't implement `BlockingDelete` automatically. +/// +/// To make BlockingWriter work as expected, we must add this impl. +impl BlockingDelete for Box { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + (**self).delete(path, args) + } + + fn flush(&mut self) -> Result { + (**self).flush() + } +} diff --git a/core/src/raw/oio/delete/batch_delete.rs b/core/src/raw/oio/delete/batch_delete.rs new file mode 100644 index 000000000000..0c5583bdf80a --- /dev/null +++ b/core/src/raw/oio/delete/batch_delete.rs @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::*; +use crate::*; +use std::collections::HashSet; +use std::future::Future; + +/// BatchDelete is used to implement [`oio::Delete`] based on batch delete operation. +/// +/// OneShotDeleter will perform delete operation while calling `flush`. +pub trait BatchDelete: Send + Sync + Unpin + 'static { + /// delete_once delete one path at once. + /// + /// Implementations should make sure that the data is deleted correctly at once. + /// + /// BatchDeleter may call this method while there are only one path to delete. + fn delete_once( + &self, + path: String, + args: OpDelete, + ) -> impl Future> + MaybeSend; + + /// delete_batch delete multiple paths at once. + /// + /// - Implementations should make sure that the length of `batch` equals to the return result's length. + /// - Implementations should return error no path is deleted. + fn delete_batch( + &self, + batch: Vec<(String, OpDelete)>, + ) -> impl Future> + MaybeSend; +} + +/// BatchDeleteResult is the result of batch delete operation. +#[derive(Default)] +pub struct BatchDeleteResult { + /// Collection of successful deletions, containing tuples of (path, args) + pub succeeded: Vec<(String, OpDelete)>, + /// Collection of failed deletions, containing tuples of (path, args, error) + pub failed: Vec<(String, OpDelete, Error)>, +} + +/// BatchDeleter is used to implement [`oio::Delete`] based on batch delete. +pub struct BatchDeleter { + inner: D, + buffer: HashSet<(String, OpDelete)>, +} + +impl BatchDeleter { + /// Create a new batch deleter. + pub fn new(inner: D) -> Self { + Self { + inner, + buffer: HashSet::default(), + } + } +} + +impl oio::Delete for BatchDeleter { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.buffer.insert((path.to_string(), args)); + Ok(()) + } + + async fn flush(&mut self) -> Result { + if self.buffer.is_empty() { + return Ok(0); + } + if self.buffer.len() == 1 { + let (path, args) = self + .buffer + .iter() + .next() + .expect("the delete buffer size must be 1") + .clone(); + self.inner.delete_once(path, args).await?; + self.buffer.clear(); + return Ok(1); + } + + let batch = self.buffer.iter().cloned().collect(); + let result = self.inner.delete_batch(batch).await?; + debug_assert!( + !result.succeeded.is_empty(), + "the number of succeeded operations must be greater than 0" + ); + debug_assert_eq!( + result.succeeded.len() + result.failed.len(), + self.buffer.len(), + "the number of succeeded and failed operations must be equal to the input batch size" + ); + + // Remove all succeeded operations from the buffer. + let deleted = result.succeeded.len(); + for i in result.succeeded { + self.buffer.remove(&i); + } + + // Return directly if there are non-temporary errors. + for (path, op, err) in result.failed { + if !err.is_temporary() { + return Err(err + .with_context("path", path) + .with_context("version", op.version().unwrap_or(""))); + } + } + + // Return the number of succeeded operations to allow users to decide whether + // to retry or push more delete operations. + Ok(deleted) + } +} diff --git a/core/src/raw/oio/delete/mod.rs b/core/src/raw/oio/delete/mod.rs new file mode 100644 index 000000000000..192a4235b05d --- /dev/null +++ b/core/src/raw/oio/delete/mod.rs @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +mod api; +pub use api::BlockingDelete; +pub use api::BlockingDeleter; +pub use api::Delete; +pub use api::DeleteDyn; +pub use api::Deleter; + +mod batch_delete; +pub use batch_delete::BatchDelete; +pub use batch_delete::BatchDeleteResult; +pub use batch_delete::BatchDeleter; + +mod one_shot_delete; +pub use one_shot_delete::BlockingOneShotDelete; +pub use one_shot_delete::OneShotDelete; +pub use one_shot_delete::OneShotDeleter; diff --git a/core/src/raw/oio/delete/one_shot_delete.rs b/core/src/raw/oio/delete/one_shot_delete.rs new file mode 100644 index 000000000000..84edc2ad5117 --- /dev/null +++ b/core/src/raw/oio/delete/one_shot_delete.rs @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::*; +use crate::*; +use std::future::Future; + +/// OneShotDelete is used to implement [`oio::Delete`] based on one shot operation. +/// +/// OneShotDeleter will perform delete operation while calling `flush`. +pub trait OneShotDelete: Send + Sync + Unpin + 'static { + /// delete_once delete one path at once. + /// + /// Implementations should make sure that the data is deleted correctly at once. + fn delete_once( + &self, + path: String, + args: OpDelete, + ) -> impl Future> + MaybeSend; +} + +/// BlockingOneShotDelete is used to implement [`oio::BlockingDelete`] based on one shot operation. +/// +/// BlockingOneShotDeleter will perform delete operation while calling `flush`. +pub trait BlockingOneShotDelete: Send + Sync + 'static { + /// delete_once delete one path at once. + /// + /// Implementations should make sure that the data is deleted correctly at once. + fn blocking_delete_once(&self, path: String, args: OpDelete) -> Result<()>; +} + +/// OneShotDelete is used to implement [`oio::Delete`] based on one shot. +pub struct OneShotDeleter { + inner: D, + delete: Option<(String, OpDelete)>, +} + +impl OneShotDeleter { + /// Create a new one shot deleter. + pub fn new(inner: D) -> Self { + Self { + inner, + delete: None, + } + } + + fn delete_inner(&mut self, path: String, args: OpDelete) -> Result<()> { + if self.delete.is_some() { + return Err(Error::new( + ErrorKind::Unsupported, + "OneShotDeleter doesn't support batch delete", + )); + } + + self.delete = Some((path, args)); + Ok(()) + } +} + +impl oio::Delete for OneShotDeleter { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.delete_inner(path.to_string(), args) + } + + async fn flush(&mut self) -> Result { + let Some((path, args)) = self.delete.clone() else { + return Ok(0); + }; + + self.inner.delete_once(path, args).await?; + self.delete = None; + Ok(1) + } +} + +impl oio::BlockingDelete for OneShotDeleter { + fn delete(&mut self, path: &str, args: OpDelete) -> Result<()> { + self.delete_inner(path.to_string(), args) + } + + fn flush(&mut self) -> Result { + let Some((path, args)) = self.delete.clone() else { + return Ok(0); + }; + + self.inner.blocking_delete_once(path, args)?; + self.delete = None; + Ok(1) + } +} diff --git a/core/src/raw/oio/list/flat_list.rs b/core/src/raw/oio/list/flat_list.rs index 7a7096308762..51a9e7bd3a49 100644 --- a/core/src/raw/oio/list/flat_list.rs +++ b/core/src/raw/oio/list/flat_list.rs @@ -207,6 +207,8 @@ mod tests { type BlockingWriter = (); type Lister = (); type BlockingLister = MockLister; + type Deleter = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); diff --git a/core/src/raw/oio/mod.rs b/core/src/raw/oio/mod.rs index 466d5c7a767e..c443406466e4 100644 --- a/core/src/raw/oio/mod.rs +++ b/core/src/raw/oio/mod.rs @@ -19,8 +19,10 @@ //! output. //! //! Those types should only be used internally and we don't want users to -//! depend on them. So we should also implement trait like `AsyncRead` for -//! our `output` traits. +//! depend on them. + +mod delete; +pub use delete::*; mod read; pub use read::*; diff --git a/core/src/raw/operation.rs b/core/src/raw/operation.rs index c5ef76047404..dc03db26dfe3 100644 --- a/core/src/raw/operation.rs +++ b/core/src/raw/operation.rs @@ -47,12 +47,14 @@ pub enum Operation { Stat, /// Operation for [`crate::raw::Access::delete`] Delete, + /// Operation for [`crate::raw::oio::Delete::delete`] + DeleterDelete, + /// Operation for [`crate::raw::oio::Delete::flush`] + DeleterFlush, /// Operation for [`crate::raw::Access::list`] List, /// Operation for [`crate::raw::oio::List::next`] ListerNext, - /// Operation for [`crate::raw::Access::batch`] - Batch, /// Operation for [`crate::raw::Access::presign`] Presign, /// Operation for [`crate::raw::Access::blocking_create_dir`] @@ -75,6 +77,10 @@ pub enum Operation { BlockingStat, /// Operation for [`crate::raw::Access::blocking_delete`] BlockingDelete, + /// Operation for [`crate::raw::oio::BlockingDelete::delete`] + BlockingDeleterDelete, + /// Operation for [`crate::raw::oio::BlockingDelete::flush`] + BlockingDeleterFlush, /// Operation for [`crate::raw::Access::blocking_list`] BlockingList, /// Operation for [`crate::raw::oio::BlockingList::next`] @@ -98,9 +104,11 @@ impl Operation { Operation::ReaderRead | Operation::WriterWrite | Operation::ListerNext + | Operation::DeleterDelete | Operation::BlockingReaderRead | Operation::BlockingWriterWrite | Operation::BlockingListerNext + | Operation::BlockingDeleterDelete ) } } @@ -129,7 +137,6 @@ impl From for &'static str { Operation::List => "list", Operation::ListerNext => "List::next", Operation::Presign => "presign", - Operation::Batch => "batch", Operation::BlockingCreateDir => "blocking_create_dir", Operation::BlockingRead => "blocking_read", Operation::BlockingReaderRead => "BlockingReader::read", @@ -142,6 +149,10 @@ impl From for &'static str { Operation::BlockingDelete => "blocking_delete", Operation::BlockingList => "blocking_list", Operation::BlockingListerNext => "BlockingLister::next", + Operation::DeleterDelete => "Deleter::delete", + Operation::DeleterFlush => "Deleter::flush", + Operation::BlockingDeleterDelete => "BlockingDeleter::delete", + Operation::BlockingDeleterFlush => "BlockingDeleter::flush", } } } diff --git a/core/src/raw/ops.rs b/core/src/raw/ops.rs index 60a3b619ae04..7cdac88a588e 100644 --- a/core/src/raw/ops.rs +++ b/core/src/raw/ops.rs @@ -41,7 +41,7 @@ impl OpCreateDir { /// Args for `delete` operation. /// /// The path must be normalized. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Eq, Hash, PartialEq)] pub struct OpDelete { version: Option, } @@ -66,6 +66,19 @@ impl OpDelete { } } +/// Args for `delete` operation. +/// +/// The path must be normalized. +#[derive(Debug, Clone, Default)] +pub struct OpDeleter {} + +impl OpDeleter { + /// Create a new `OpDelete`. + pub fn new() -> Self { + Self::default() + } +} + /// Args for `list` operation. #[derive(Debug, Clone)] pub struct OpList { @@ -247,53 +260,6 @@ impl From for PresignOperation { } } -/// Args for `batch` operation. -#[derive(Debug, Clone)] -pub struct OpBatch { - ops: Vec<(String, BatchOperation)>, -} - -impl OpBatch { - /// Create a new batch options. - pub fn new(ops: Vec<(String, BatchOperation)>) -> Self { - Self { ops } - } - - /// Get operation from op. - pub fn operation(&self) -> &[(String, BatchOperation)] { - &self.ops - } - - /// Consume OpBatch into BatchOperation - pub fn into_operation(self) -> Vec<(String, BatchOperation)> { - self.ops - } -} - -/// Batch operation used for batch. -#[derive(Debug, Clone)] -#[non_exhaustive] -pub enum BatchOperation { - /// Batch delete operation. - Delete(OpDelete), -} - -impl From for BatchOperation { - fn from(op: OpDelete) -> Self { - Self::Delete(op) - } -} - -impl BatchOperation { - /// Return the operation of this batch. - pub fn operation(&self) -> Operation { - use BatchOperation::*; - match self { - Delete(_) => Operation::Delete, - } - } -} - /// Args for `read` operation. #[derive(Debug, Clone, Default)] pub struct OpRead { diff --git a/core/src/raw/rps.rs b/core/src/raw/rps.rs index a3073dbd4b74..62de328790fd 100644 --- a/core/src/raw/rps.rs +++ b/core/src/raw/rps.rs @@ -153,40 +153,6 @@ impl RpRead { } } -/// Reply for `batch` operation. -pub struct RpBatch { - results: Vec<(String, Result)>, -} - -impl RpBatch { - /// Create a new RpBatch. - pub fn new(results: Vec<(String, Result)>) -> Self { - Self { results } - } - - /// Get the results from RpBatch. - pub fn results(&self) -> &[(String, Result)] { - &self.results - } - - /// Consume RpBatch to get the batched results. - pub fn into_results(self) -> Vec<(String, Result)> { - self.results - } -} - -/// Batch results of `batch` operations. -pub enum BatchedReply { - /// results of `delete batch` operation - Delete(RpDelete), -} - -impl From for BatchedReply { - fn from(rp: RpDelete) -> Self { - Self::Delete(rp) - } -} - /// Reply for `stat` operation. #[derive(Debug, Clone)] pub struct RpStat { diff --git a/core/src/services/aliyun_drive/backend.rs b/core/src/services/aliyun_drive/backend.rs index c9103cc14a8b..5a5669c0d55a 100644 --- a/core/src/services/aliyun_drive/backend.rs +++ b/core/src/services/aliyun_drive/backend.rs @@ -29,6 +29,7 @@ use log::debug; use tokio::sync::Mutex; use super::core::*; +use super::delete::AliyunDriveDeleter; use super::error::parse_error; use super::lister::AliyunDriveLister; use super::lister::AliyunDriveParent; @@ -203,9 +204,11 @@ impl Access for AliyunDriveBackend { type Reader = HttpBody; type Writer = AliyunDriveWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -376,18 +379,11 @@ impl Access for AliyunDriveBackend { } } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { - let res = match self.core.get_by_path(path).await { - Ok(output) => Some(output), - Err(err) if err.kind() == ErrorKind::NotFound => None, - Err(err) => return Err(err), - }; - if let Some(res) = res { - let file: AliyunDriveFile = - serde_json::from_reader(res.reader()).map_err(new_json_serialize_error)?; - self.core.delete_path(&file.file_id).await?; - } - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(AliyunDriveDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/aliyun_drive/delete.rs b/core/src/services/aliyun_drive/delete.rs new file mode 100644 index 000000000000..0eb9c8be22eb --- /dev/null +++ b/core/src/services/aliyun_drive/delete.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::{AliyunDriveCore, AliyunDriveFile}; +use crate::raw::*; +use crate::*; +use bytes::Buf; +use std::sync::Arc; + +pub struct AliyunDriveDeleter { + core: Arc, +} + +impl AliyunDriveDeleter { + pub fn new(core: Arc) -> Self { + AliyunDriveDeleter { core } + } +} + +impl oio::OneShotDelete for AliyunDriveDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let res = match self.core.get_by_path(&path).await { + Ok(output) => Some(output), + Err(err) if err.kind() == ErrorKind::NotFound => None, + Err(err) => return Err(err), + }; + if let Some(res) = res { + let file: AliyunDriveFile = + serde_json::from_reader(res.reader()).map_err(new_json_serialize_error)?; + self.core.delete_path(&file.file_id).await?; + } + Ok(()) + } +} diff --git a/core/src/services/aliyun_drive/mod.rs b/core/src/services/aliyun_drive/mod.rs index 5481bf4fbe9a..19c725b423ce 100644 --- a/core/src/services/aliyun_drive/mod.rs +++ b/core/src/services/aliyun_drive/mod.rs @@ -21,6 +21,8 @@ mod core; #[cfg(feature = "services-aliyun-drive")] mod backend; #[cfg(feature = "services-aliyun-drive")] +mod delete; +#[cfg(feature = "services-aliyun-drive")] mod error; #[cfg(feature = "services-aliyun-drive")] mod lister; @@ -30,4 +32,5 @@ mod writer; pub use backend::AliyunDriveBuilder as AliyunDrive; mod config; + pub use config::AliyunDriveConfig; diff --git a/core/src/services/aliyun_drive/writer.rs b/core/src/services/aliyun_drive/writer.rs index 5021d8cffaa9..31bf74fad8c7 100644 --- a/core/src/services/aliyun_drive/writer.rs +++ b/core/src/services/aliyun_drive/writer.rs @@ -20,11 +20,11 @@ use std::sync::Arc; use bytes::Buf; use super::core::AliyunDriveCore; +use super::core::CheckNameMode; +use super::core::CreateResponse; +use super::core::CreateType; use super::core::UploadUrlResponse; use crate::raw::*; -use crate::services::aliyun_drive::core::CheckNameMode; -use crate::services::aliyun_drive::core::CreateResponse; -use crate::services::aliyun_drive::core::CreateType; use crate::*; pub struct AliyunDriveWriter { diff --git a/core/src/services/alluxio/backend.rs b/core/src/services/alluxio/backend.rs index bea21a364012..e3ff2acb170c 100644 --- a/core/src/services/alluxio/backend.rs +++ b/core/src/services/alluxio/backend.rs @@ -23,6 +23,7 @@ use http::Response; use log::debug; use super::core::AlluxioCore; +use super::delete::AlluxioDeleter; use super::error::parse_error; use super::lister::AlluxioLister; use super::writer::AlluxioWriter; @@ -144,9 +145,11 @@ impl Access for AlluxioBackend { type Reader = HttpBody; type Writer = AlluxioWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -206,10 +209,11 @@ impl Access for AlluxioBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - self.core.delete(path).await?; - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(AlluxioDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/alluxio/delete.rs b/core/src/services/alluxio/delete.rs new file mode 100644 index 000000000000..21837cb920b8 --- /dev/null +++ b/core/src/services/alluxio/delete.rs @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct AlluxioDeleter { + core: Arc, +} + +impl AlluxioDeleter { + pub fn new(core: Arc) -> Self { + AlluxioDeleter { core } + } +} + +impl oio::OneShotDelete for AlluxioDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + self.core.delete(&path).await + } +} diff --git a/core/src/services/alluxio/mod.rs b/core/src/services/alluxio/mod.rs index aa2c88b6fe35..611a8a227e2e 100644 --- a/core/src/services/alluxio/mod.rs +++ b/core/src/services/alluxio/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-alluxio")] mod core; #[cfg(feature = "services-alluxio")] +mod delete; +#[cfg(feature = "services-alluxio")] mod error; #[cfg(feature = "services-alluxio")] mod lister; @@ -30,4 +32,5 @@ mod backend; pub use backend::AlluxioBuilder as Alluxio; mod config; + pub use config::AlluxioConfig; diff --git a/core/src/services/azblob/backend.rs b/core/src/services/azblob/backend.rs index e2ab452ddf1c..20ac5ef3e188 100644 --- a/core/src/services/azblob/backend.rs +++ b/core/src/services/azblob/backend.rs @@ -22,8 +22,6 @@ use std::sync::Arc; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use bytes::Buf; -use http::header::CONTENT_TYPE; use http::Response; use http::StatusCode; use log::debug; @@ -34,12 +32,13 @@ use sha2::Digest; use sha2::Sha256; use super::core::constants::X_MS_META_PREFIX; +use super::core::AzblobCore; +use super::delete::AzblobDeleter; use super::error::parse_error; use super::lister::AzblobLister; use super::writer::AzblobWriter; +use super::writer::AzblobWriters; use crate::raw::*; -use crate::services::azblob::core::AzblobCore; -use crate::services::azblob::writer::AzblobWriters; use crate::services::AzblobConfig; use crate::*; @@ -433,11 +432,6 @@ impl Builder for AzblobBuilder { let signer = AzureStorageSigner::new(); - let batch_max_operations = self - .config - .batch_max_operations - .unwrap_or(AZBLOB_BATCH_LIMIT); - Ok(AzblobBackend { core: Arc::new(AzblobCore { root, @@ -450,7 +444,6 @@ impl Builder for AzblobBuilder { client, loader: cred_loader, signer, - batch_max_operations, }), has_sas_token: self.config.sas_token.is_some(), }) @@ -492,9 +485,11 @@ impl Access for AzblobBackend { type Reader = HttpBody; type Writer = AzblobWriters; type Lister = oio::PageLister; + type Deleter = oio::BatchDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -523,6 +518,8 @@ impl Access for AzblobBackend { write_with_user_metadata: true, delete: true, + delete_max_size: Some(AZBLOB_BATCH_LIMIT), + copy: true, list: true, @@ -533,10 +530,6 @@ impl Access for AzblobBackend { presign_read: self.has_sas_token, presign_write: self.has_sas_token, - batch: true, - batch_delete: true, - batch_max_operations: Some(self.core.batch_max_operations), - shared: true, ..Default::default() @@ -595,15 +588,11 @@ impl Access for AzblobBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.azblob_delete_blob(path).await?; - - let status = resp.status(); - - match status { - StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::BatchDeleter::new(AzblobDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -651,77 +640,6 @@ impl Access for AzblobBackend { parts.headers, ))) } - - async fn batch(&self, args: OpBatch) -> Result { - let ops = args.into_operation(); - let paths = ops.into_iter().map(|(p, _)| p).collect::>(); - if paths.len() > AZBLOB_BATCH_LIMIT { - return Err(Error::new( - ErrorKind::Unsupported, - "batch delete limit exceeded", - )); - } - - // construct and complete batch request - let resp = self.core.azblob_batch_delete(&paths).await?; - - // check response status - if resp.status() != StatusCode::ACCEPTED { - return Err(parse_error(resp)); - } - - // get boundary from response header - let content_type = resp.headers().get(CONTENT_TYPE).ok_or_else(|| { - Error::new( - ErrorKind::Unexpected, - "response data should have CONTENT_TYPE header", - ) - })?; - let content_type = content_type - .to_str() - .map(|ty| ty.to_string()) - .map_err(|e| { - Error::new( - ErrorKind::Unexpected, - format!("get invalid CONTENT_TYPE header in response: {:?}", e), - ) - })?; - let splits = content_type.split("boundary=").collect::>(); - let boundary = splits.get(1).to_owned().ok_or_else(|| { - Error::new( - ErrorKind::Unexpected, - "No boundary message provided in CONTENT_TYPE", - ) - })?; - - let mut bs = resp.into_body(); - let bs = bs.copy_to_bytes(bs.remaining()); - - let multipart: Multipart = Multipart::new().with_boundary(boundary).parse(bs)?; - let parts = multipart.into_parts(); - - if paths.len() != parts.len() { - return Err(Error::new( - ErrorKind::Unexpected, - "invalid batch response, paths and response parts don't match", - )); - } - - let mut results = Vec::with_capacity(parts.len()); - - for (i, part) in parts.into_iter().enumerate() { - let resp = part.into_response(); - let path = paths[i].clone(); - - // deleting not existing objects is ok - if resp.status() == StatusCode::ACCEPTED || resp.status() == StatusCode::NOT_FOUND { - results.push((path, Ok(RpDelete::default().into()))); - } else { - results.push((path, Err(parse_error(resp)))); - } - } - Ok(RpBatch::new(results)) - } } #[cfg(test)] diff --git a/core/src/services/azblob/core.rs b/core/src/services/azblob/core.rs index 4093422423a4..1202ae8ba4bf 100644 --- a/core/src/services/azblob/core.rs +++ b/core/src/services/azblob/core.rs @@ -68,7 +68,6 @@ pub struct AzblobCore { pub client: HttpClient, pub loader: AzureStorageLoader, pub signer: AzureStorageSigner, - pub batch_max_operations: usize, } impl Debug for AzblobCore { diff --git a/core/src/services/azblob/delete.rs b/core/src/services/azblob/delete.rs new file mode 100644 index 000000000000..1cc8750ad793 --- /dev/null +++ b/core/src/services/azblob/delete.rs @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::oio::BatchDeleteResult; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct AzblobDeleter { + core: Arc, +} + +impl AzblobDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::BatchDelete for AzblobDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.azblob_delete_blob(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } + + async fn delete_batch(&self, batch: Vec<(String, OpDelete)>) -> Result { + // TODO: Add remove version support. + let paths = batch.into_iter().map(|(p, _)| p).collect::>(); + + // construct and complete batch request + let resp = self.core.azblob_batch_delete(&paths).await?; + + // check response status + if resp.status() != StatusCode::ACCEPTED { + return Err(parse_error(resp)); + } + + // get boundary from response header + let boundary = parse_multipart_boundary(resp.headers())? + .ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "invalid response: no boundary provided in header", + ) + })? + .to_string(); + + let bs = resp.into_body().to_bytes(); + let multipart: Multipart = + Multipart::new().with_boundary(&boundary).parse(bs)?; + let parts = multipart.into_parts(); + + if paths.len() != parts.len() { + return Err(Error::new( + ErrorKind::Unexpected, + "invalid batch response, paths and response parts don't match", + )); + } + + let mut batched_result = BatchDeleteResult::default(); + + for (i, part) in parts.into_iter().enumerate() { + let resp = part.into_response(); + let path = paths[i].clone(); + + // deleting not existing objects is ok + if resp.status() == StatusCode::ACCEPTED || resp.status() == StatusCode::NOT_FOUND { + batched_result.succeeded.push((path, OpDelete::default())); + } else { + batched_result + .failed + .push((path, OpDelete::default(), parse_error(resp))); + } + } + + // If no object is deleted, return directly. + if batched_result.succeeded.is_empty() { + let err = batched_result.failed.remove(0).2; + return Err(err); + } + + Ok(batched_result) + } +} diff --git a/core/src/services/azblob/mod.rs b/core/src/services/azblob/mod.rs index ba65d343939e..4b571a40f565 100644 --- a/core/src/services/azblob/mod.rs +++ b/core/src/services/azblob/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-azblob")] mod core; #[cfg(feature = "services-azblob")] +mod delete; +#[cfg(feature = "services-azblob")] mod error; #[cfg(feature = "services-azblob")] mod lister; diff --git a/core/src/services/azdls/backend.rs b/core/src/services/azdls/backend.rs index 8710077e394c..b2a6b493d318 100644 --- a/core/src/services/azdls/backend.rs +++ b/core/src/services/azdls/backend.rs @@ -27,6 +27,7 @@ use reqsign::AzureStorageLoader; use reqsign::AzureStorageSigner; use super::core::AzdlsCore; +use super::delete::AzdlsDeleter; use super::error::parse_error; use super::lister::AzdlsLister; use super::writer::AzdlsWriter; @@ -217,9 +218,11 @@ impl Access for AzdlsBackend { type Reader = HttpBody; type Writer = AzdlsWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -340,15 +343,11 @@ impl Access for AzdlsBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.azdls_delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(AzdlsDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/azdls/delete.rs b/core/src/services/azdls/delete.rs new file mode 100644 index 000000000000..3fe7a557c854 --- /dev/null +++ b/core/src/services/azdls/delete.rs @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct AzdlsDeleter { + core: Arc, +} + +impl AzdlsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for AzdlsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.azdls_delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/azdls/mod.rs b/core/src/services/azdls/mod.rs index 321899ccc386..7b981f5ee7ba 100644 --- a/core/src/services/azdls/mod.rs +++ b/core/src/services/azdls/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-azdls")] mod core; #[cfg(feature = "services-azdls")] +mod delete; +#[cfg(feature = "services-azdls")] mod error; #[cfg(feature = "services-azdls")] mod lister; diff --git a/core/src/services/azfile/backend.rs b/core/src/services/azfile/backend.rs index 1f68439b5511..46ebb283bd2f 100644 --- a/core/src/services/azfile/backend.rs +++ b/core/src/services/azfile/backend.rs @@ -27,11 +27,12 @@ use reqsign::AzureStorageLoader; use reqsign::AzureStorageSigner; use super::core::AzfileCore; +use super::delete::AzfileDeleter; use super::error::parse_error; +use super::lister::AzfileLister; use super::writer::AzfileWriter; use super::writer::AzfileWriters; use crate::raw::*; -use crate::services::azfile::lister::AzfileLister; use crate::services::AzfileConfig; use crate::*; @@ -233,9 +234,11 @@ impl Access for AzfileBackend { type Reader = HttpBody; type Writer = AzfileWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -331,18 +334,11 @@ impl Access for AzfileBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = if path.ends_with('/') { - self.core.azfile_delete_dir(path).await? - } else { - self.core.azfile_delete_file(path).await? - }; - - let status = resp.status(); - match status { - StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(AzfileDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/azfile/core.rs b/core/src/services/azfile/core.rs index eaa7eb103021..dcebfb3b247d 100644 --- a/core/src/services/azfile/core.rs +++ b/core/src/services/azfile/core.rs @@ -33,8 +33,8 @@ use reqsign::AzureStorageCredential; use reqsign::AzureStorageLoader; use reqsign::AzureStorageSigner; +use super::error::parse_error; use crate::raw::*; -use crate::services::azfile::error::parse_error; use crate::*; const X_MS_VERSION: &str = "x-ms-version"; diff --git a/core/src/services/azfile/delete.rs b/core/src/services/azfile/delete.rs new file mode 100644 index 000000000000..562b7d2da449 --- /dev/null +++ b/core/src/services/azfile/delete.rs @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct AzfileDeleter { + core: Arc, +} + +impl AzfileDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for AzfileDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = if path.ends_with('/') { + self.core.azfile_delete_dir(&path).await? + } else { + self.core.azfile_delete_file(&path).await? + }; + + let status = resp.status(); + match status { + StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/azfile/mod.rs b/core/src/services/azfile/mod.rs index 493f8cfb7d3b..38c9ba0419d3 100644 --- a/core/src/services/azfile/mod.rs +++ b/core/src/services/azfile/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-azfile")] mod core; #[cfg(feature = "services-azfile")] +mod delete; +#[cfg(feature = "services-azfile")] mod error; #[cfg(feature = "services-azfile")] mod lister; @@ -30,4 +32,5 @@ mod backend; pub use backend::AzfileBuilder as Azfile; mod config; + pub use config::AzfileConfig; diff --git a/core/src/services/b2/backend.rs b/core/src/services/b2/backend.rs index b785af8bd0ba..be3f61435bd2 100644 --- a/core/src/services/b2/backend.rs +++ b/core/src/services/b2/backend.rs @@ -29,13 +29,14 @@ use tokio::sync::RwLock; use super::core::constants; use super::core::parse_file_info; use super::core::B2Core; +use super::core::B2Signer; +use super::core::ListFileNamesResponse; +use super::delete::B2Deleter; use super::error::parse_error; use super::lister::B2Lister; use super::writer::B2Writer; use super::writer::B2Writers; use crate::raw::*; -use crate::services::b2::core::B2Signer; -use crate::services::b2::core::ListFileNamesResponse; use crate::services::B2Config; use crate::*; @@ -216,9 +217,11 @@ impl Access for B2Backend { type Reader = HttpBody; type Writer = B2Writers; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -328,23 +331,11 @@ impl Access for B2Backend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.hide_file(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - _ => { - let err = parse_error(resp); - match err.kind() { - ErrorKind::NotFound => Ok(RpDelete::default()), - // Representative deleted - ErrorKind::AlreadyExists => Ok(RpDelete::default()), - _ => Err(err), - } - } - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(B2Deleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/b2/core.rs b/core/src/services/b2/core.rs index 2c0256eda268..08ad14388546 100644 --- a/core/src/services/b2/core.rs +++ b/core/src/services/b2/core.rs @@ -33,9 +33,9 @@ use tokio::sync::RwLock; use self::constants::X_BZ_CONTENT_SHA1; use self::constants::X_BZ_FILE_NAME; +use super::core::constants::X_BZ_PART_NUMBER; +use super::error::parse_error; use crate::raw::*; -use crate::services::b2::core::constants::X_BZ_PART_NUMBER; -use crate::services::b2::error::parse_error; use crate::*; pub(super) mod constants { diff --git a/core/src/services/b2/delete.rs b/core/src/services/b2/delete.rs new file mode 100644 index 000000000000..a6db1b4225ee --- /dev/null +++ b/core/src/services/b2/delete.rs @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct B2Deleter { + core: Arc, +} + +impl B2Deleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for B2Deleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.hide_file(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + _ => { + let err = parse_error(resp); + match err.kind() { + ErrorKind::NotFound => Ok(()), + // Representative deleted + ErrorKind::AlreadyExists => Ok(()), + _ => Err(err), + } + } + } + } +} diff --git a/core/src/services/b2/lister.rs b/core/src/services/b2/lister.rs index 1a35c77938b1..efc44b78683f 100644 --- a/core/src/services/b2/lister.rs +++ b/core/src/services/b2/lister.rs @@ -22,8 +22,8 @@ use bytes::Buf; use super::core::parse_file_info; use super::core::B2Core; use super::core::ListFileNamesResponse; +use super::error::parse_error; use crate::raw::*; -use crate::services::b2::error::parse_error; use crate::*; pub struct B2Lister { diff --git a/core/src/services/b2/mod.rs b/core/src/services/b2/mod.rs index ec294579e4d6..5193f960c72e 100644 --- a/core/src/services/b2/mod.rs +++ b/core/src/services/b2/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-b2")] mod core; #[cfg(feature = "services-b2")] +mod delete; +#[cfg(feature = "services-b2")] mod error; #[cfg(feature = "services-b2")] mod lister; @@ -30,4 +32,5 @@ mod backend; pub use backend::B2Builder as B2; mod config; + pub use config::B2Config; diff --git a/core/src/services/chainsafe/backend.rs b/core/src/services/chainsafe/backend.rs index e5373f4c3471..80a403a89c9d 100644 --- a/core/src/services/chainsafe/backend.rs +++ b/core/src/services/chainsafe/backend.rs @@ -27,6 +27,7 @@ use log::debug; use super::core::parse_info; use super::core::ChainsafeCore; use super::core::ObjectInfoResponse; +use super::delete::ChainsafeDeleter; use super::error::parse_error; use super::lister::ChainsafeLister; use super::writer::ChainsafeWriter; @@ -166,9 +167,11 @@ impl Access for ChainsafeBackend { type Reader = HttpBody; type Writer = ChainsafeWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -247,17 +250,11 @@ impl Access for ChainsafeBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.delete_object(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - // Allow 404 when deleting a non-existing object - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(ChainsafeDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/chainsafe/delete.rs b/core/src/services/chainsafe/delete.rs new file mode 100644 index 000000000000..436d2b3485c8 --- /dev/null +++ b/core/src/services/chainsafe/delete.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct ChainsafeDeleter { + core: Arc, +} + +impl ChainsafeDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for ChainsafeDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.delete_object(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + // Allow 404 when deleting a non-existing object + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/chainsafe/mod.rs b/core/src/services/chainsafe/mod.rs index 86a7ff7bb554..fa455cb38344 100644 --- a/core/src/services/chainsafe/mod.rs +++ b/core/src/services/chainsafe/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-chainsafe")] mod core; #[cfg(feature = "services-chainsafe")] +mod delete; +#[cfg(feature = "services-chainsafe")] mod error; #[cfg(feature = "services-chainsafe")] mod lister; diff --git a/core/src/services/compfs/backend.rs b/core/src/services/compfs/backend.rs index 9c43a82d4c80..ba323b50f84f 100644 --- a/core/src/services/compfs/backend.rs +++ b/core/src/services/compfs/backend.rs @@ -22,9 +22,11 @@ use compio::dispatcher::Dispatcher; use compio::fs::OpenOptions; use super::core::CompfsCore; +use super::delete::CompfsDeleter; use super::lister::CompfsLister; use super::reader::CompfsReader; use super::writer::CompfsWriter; +use crate::raw::oio::OneShotDeleter; use crate::raw::*; use crate::services::CompfsConfig; use crate::*; @@ -106,9 +108,11 @@ impl Access for CompfsBackend { type Reader = CompfsReader; type Writer = CompfsWriter; type Lister = Option; + type Deleter = OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -169,20 +173,11 @@ impl Access for CompfsBackend { Ok(RpStat::new(ret)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - if path.ends_with('/') { - let path = self.core.prepare_path(path); - self.core - .exec(move || async move { compio::fs::remove_dir(path).await }) - .await?; - } else { - let path = self.core.prepare_path(path); - self.core - .exec(move || async move { compio::fs::remove_file(path).await }) - .await?; - } - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + OneShotDeleter::new(CompfsDeleter::new(self.core.clone())), + )) } async fn copy(&self, from: &str, to: &str, _: OpCopy) -> Result { diff --git a/core/src/services/compfs/delete.rs b/core/src/services/compfs/delete.rs new file mode 100644 index 000000000000..b62abee9b4f9 --- /dev/null +++ b/core/src/services/compfs/delete.rs @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct CompfsDeleter { + core: Arc, +} + +impl CompfsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for CompfsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + if path.ends_with('/') { + let path = self.core.prepare_path(&path); + self.core + .exec(move || async move { compio::fs::remove_dir(path).await }) + .await?; + } else { + let path = self.core.prepare_path(&path); + self.core + .exec(move || async move { compio::fs::remove_file(path).await }) + .await?; + } + + Ok(()) + } +} diff --git a/core/src/services/compfs/mod.rs b/core/src/services/compfs/mod.rs index fe99a6d00562..53b7f746f1e9 100644 --- a/core/src/services/compfs/mod.rs +++ b/core/src/services/compfs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-compfs")] mod core; #[cfg(feature = "services-compfs")] +mod delete; +#[cfg(feature = "services-compfs")] mod lister; #[cfg(feature = "services-compfs")] mod reader; diff --git a/core/src/services/cos/backend.rs b/core/src/services/cos/backend.rs index 1e27d7f6dfa0..e7efbc078e17 100644 --- a/core/src/services/cos/backend.rs +++ b/core/src/services/cos/backend.rs @@ -27,6 +27,7 @@ use reqsign::TencentCosCredentialLoader; use reqsign::TencentCosSigner; use super::core::*; +use super::delete::CosDeleter; use super::error::parse_error; use super::lister::CosLister; use super::writer::CosWriter; @@ -232,9 +233,11 @@ impl Access for CosBackend { type Reader = HttpBody; type Writer = CosWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -336,17 +339,11 @@ impl Access for CosBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.cos_delete_object(path).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { - Ok(RpDelete::default()) - } - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(CosDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/cos/delete.rs b/core/src/services/cos/delete.rs new file mode 100644 index 000000000000..17319ba91cd1 --- /dev/null +++ b/core/src/services/cos/delete.rs @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct CosDeleter { + core: Arc, +} + +impl CosDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for CosDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.cos_delete_object(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/cos/mod.rs b/core/src/services/cos/mod.rs index 9713dc39dc5b..7aff3973824d 100644 --- a/core/src/services/cos/mod.rs +++ b/core/src/services/cos/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-cos")] mod core; #[cfg(feature = "services-cos")] +mod delete; +#[cfg(feature = "services-cos")] mod error; #[cfg(feature = "services-cos")] mod lister; @@ -30,4 +32,5 @@ mod backend; pub use backend::CosBuilder as Cos; mod config; + pub use config::CosConfig; diff --git a/core/src/services/dbfs/backend.rs b/core/src/services/dbfs/backend.rs index f3ae3399363c..93c2ec36f24c 100644 --- a/core/src/services/dbfs/backend.rs +++ b/core/src/services/dbfs/backend.rs @@ -25,6 +25,7 @@ use log::debug; use serde::Deserialize; use super::core::DbfsCore; +use super::delete::DbfsDeleter; use super::error::parse_error; use super::lister::DbfsLister; use super::writer::DbfsWriter; @@ -145,9 +146,11 @@ impl Access for DbfsBackend { type Reader = (); type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -223,16 +226,11 @@ impl Access for DbfsBackend { )) } - /// NOTE: Server will return 200 even if the path doesn't exist. - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.dbfs_delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(DbfsDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/dbfs/delete.rs b/core/src/services/dbfs/delete.rs new file mode 100644 index 000000000000..089ec756df6a --- /dev/null +++ b/core/src/services/dbfs/delete.rs @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct DbfsDeleter { + core: Arc, +} + +impl DbfsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for DbfsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.dbfs_delete(&path).await?; + + let status = resp.status(); + + match status { + // NOTE: Server will return 200 even if the path doesn't exist. + StatusCode::OK => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/dbfs/lister.rs b/core/src/services/dbfs/lister.rs index 58f8fc81694a..077c2a6585df 100644 --- a/core/src/services/dbfs/lister.rs +++ b/core/src/services/dbfs/lister.rs @@ -21,9 +21,9 @@ use bytes::Buf; use http::StatusCode; use serde::Deserialize; +use super::core::DbfsCore; use super::error::parse_error; use crate::raw::*; -use crate::services::dbfs::core::DbfsCore; use crate::*; pub struct DbfsLister { diff --git a/core/src/services/dbfs/mod.rs b/core/src/services/dbfs/mod.rs index de4774f7d1a5..b2b1a470bf8e 100644 --- a/core/src/services/dbfs/mod.rs +++ b/core/src/services/dbfs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-dbfs")] mod core; #[cfg(feature = "services-dbfs")] +mod delete; +#[cfg(feature = "services-dbfs")] mod error; #[cfg(feature = "services-dbfs")] mod lister; diff --git a/core/src/services/dbfs/writer.rs b/core/src/services/dbfs/writer.rs index ff6d64201858..1a3963b8c712 100644 --- a/core/src/services/dbfs/writer.rs +++ b/core/src/services/dbfs/writer.rs @@ -19,9 +19,9 @@ use std::sync::Arc; use http::StatusCode; +use super::core::DbfsCore; use super::error::parse_error; use crate::raw::*; -use crate::services::dbfs::core::DbfsCore; use crate::*; pub struct DbfsWriter { diff --git a/core/src/services/dropbox/backend.rs b/core/src/services/dropbox/backend.rs index 2544ac3ac238..c7cd15572231 100644 --- a/core/src/services/dropbox/backend.rs +++ b/core/src/services/dropbox/backend.rs @@ -18,12 +18,12 @@ use std::fmt::Debug; use std::sync::Arc; -use backon::Retryable; use bytes::Buf; use http::Response; use http::StatusCode; use super::core::*; +use super::delete::DropboxDeleter; use super::error::*; use super::lister::DropboxLister; use super::writer::DropboxWriter; @@ -39,9 +39,11 @@ impl Access for DropboxBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); @@ -65,9 +67,6 @@ impl Access for DropboxBackend { rename: true, - batch: true, - batch_delete: true, - shared: true, ..Default::default() @@ -93,16 +92,7 @@ impl Access for DropboxBackend { } } - // Dropbox has very, very, very strong limitation on the create_folder requests. - // - // Let's try our best to make sure it won't failed for rate limited issues. - let res = { || self.core.dropbox_create_folder(path) } - .retry(*BACKOFF) - .when(|e| e.is_temporary()) - .await - // Set this error to permanent to avoid retrying. - .map_err(|e| e.set_permanent())?; - + let res = self.core.dropbox_create_folder(path).await?; Ok(res) } @@ -171,21 +161,11 @@ impl Access for DropboxBackend { )) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.dropbox_delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - _ => { - let err = parse_error(resp); - match err.kind() { - ErrorKind::NotFound => Ok(RpDelete::default()), - _ => Err(err), - } - } - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(DropboxDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -233,52 +213,4 @@ impl Access for DropboxBackend { } } } - - async fn batch(&self, args: OpBatch) -> Result { - let ops = args.into_operation(); - if ops.len() > 1000 { - return Err(Error::new( - ErrorKind::Unsupported, - "dropbox services only allow delete up to 1000 keys at once", - ) - .with_context("length", ops.len().to_string())); - } - - let paths = ops.into_iter().map(|(p, _)| p).collect::>(); - - let resp = self.core.dropbox_delete_batch(paths).await?; - if resp.status() != StatusCode::OK { - return Err(parse_error(resp)); - } - - let bs = resp.into_body(); - let decoded_response: DropboxDeleteBatchResponse = - serde_json::from_reader(bs.reader()).map_err(new_json_deserialize_error)?; - - match decoded_response.tag.as_str() { - "complete" => { - let entries = decoded_response.entries.unwrap_or_default(); - let results = self.core.handle_batch_delete_complete_result(entries); - Ok(RpBatch::new(results)) - } - "async_job_id" => { - let job_id = decoded_response - .async_job_id - .expect("async_job_id should be present"); - let res = { || self.core.dropbox_delete_batch_check(job_id.clone()) } - .retry(*BACKOFF) - .when(|e| e.is_temporary()) - .await?; - - Ok(res) - } - _ => Err(Error::new( - ErrorKind::Unexpected, - format!( - "delete batch failed with unexpected tag {}", - decoded_response.tag - ), - )), - } - } } diff --git a/core/src/services/dropbox/core.rs b/core/src/services/dropbox/core.rs index 1162eb5e7620..3f784fc180ba 100644 --- a/core/src/services/dropbox/core.rs +++ b/core/src/services/dropbox/core.rs @@ -15,14 +15,11 @@ // specific language governing permissions and limitations // under the License. -use std::collections::HashMap; use std::default::Default; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; -use std::time::Duration; -use backon::ExponentialBuilder; use bytes::Buf; use bytes::Bytes; use chrono::DateTime; @@ -33,7 +30,6 @@ use http::header::CONTENT_TYPE; use http::Request; use http::Response; use http::StatusCode; -use once_cell::sync::Lazy; use serde::Deserialize; use serde::Serialize; use tokio::sync::Mutex; @@ -42,14 +38,6 @@ use super::error::parse_error; use crate::raw::*; use crate::*; -/// BACKOFF is the backoff used inside dropbox to make sure dropbox async task succeed. -pub static BACKOFF: Lazy = Lazy::new(|| { - ExponentialBuilder::default() - .with_max_delay(Duration::from_secs(10)) - .with_max_times(10) - .with_jitter() -}); - pub struct DropboxCore { pub root: String, @@ -202,73 +190,6 @@ impl DropboxCore { self.client.send(request).await } - pub async fn dropbox_delete_batch(&self, paths: Vec) -> Result> { - let url = "https://api.dropboxapi.com/2/files/delete_batch".to_string(); - let args = DropboxDeleteBatchArgs { - entries: paths - .into_iter() - .map(|path| DropboxDeleteBatchEntry { - path: self.build_path(&path), - }) - .collect(), - }; - - let bs = Bytes::from(serde_json::to_string(&args).map_err(new_json_serialize_error)?); - - let mut request = Request::post(&url) - .header(CONTENT_TYPE, "application/json") - .header(CONTENT_LENGTH, bs.len()) - .body(Buffer::from(bs)) - .map_err(new_request_build_error)?; - - self.sign(&mut request).await?; - self.client.send(request).await - } - - pub async fn dropbox_delete_batch_check(&self, async_job_id: String) -> Result { - let url = "https://api.dropboxapi.com/2/files/delete_batch/check".to_string(); - let args = DropboxDeleteBatchCheckArgs { async_job_id }; - - let bs = Bytes::from(serde_json::to_vec(&args).map_err(new_json_serialize_error)?); - - let mut request = Request::post(&url) - .header(CONTENT_TYPE, "application/json") - .header(CONTENT_LENGTH, bs.len()) - .body(Buffer::from(bs)) - .map_err(new_request_build_error)?; - - self.sign(&mut request).await?; - - let resp = self.client.send(request).await?; - if resp.status() != StatusCode::OK { - return Err(parse_error(resp)); - } - - let bs = resp.into_body(); - - let decoded_response: DropboxDeleteBatchResponse = - serde_json::from_reader(bs.reader()).map_err(new_json_deserialize_error)?; - match decoded_response.tag.as_str() { - "in_progress" => Err(Error::new( - ErrorKind::Unexpected, - "delete batch job still in progress", - ) - .set_temporary()), - "complete" => { - let entries = decoded_response.entries.unwrap_or_default(); - let results = self.handle_batch_delete_complete_result(entries); - Ok(RpBatch::new(results)) - } - _ => Err(Error::new( - ErrorKind::Unexpected, - format!( - "delete batch check failed with unexpected tag {}", - decoded_response.tag - ), - )), - } - } - pub async fn dropbox_create_folder(&self, path: &str) -> Result { let url = "https://api.dropboxapi.com/2/files/create_folder_v2".to_string(); let args = DropboxCreateFolderArgs { @@ -404,53 +325,6 @@ impl DropboxCore { self.client.send(request).await } - - pub fn handle_batch_delete_complete_result( - &self, - entries: Vec, - ) -> Vec<(String, Result)> { - let mut results = Vec::with_capacity(entries.len()); - for entry in entries { - let result = match entry.tag.as_str() { - // Only success response has metadata and then path, - // so we cannot tell which path failed. - "success" => { - let path = entry - .metadata - .expect("metadata should be present") - .path_display; - (path, Ok(RpDelete::default().into())) - } - "failure" => { - let error = entry.failure.expect("error should be present"); - let error_cause = &error - .failure_cause_map - .get(&error.tag) - .expect("error should be present") - .tag; - // Ignore errors about path lookup not found and report others. - if error.tag == "path_lookup" && error_cause == "not_found" { - ("".to_string(), Ok(RpDelete::default().into())) - } else { - let err = Error::new( - ErrorKind::Unexpected, - format!("delete failed with error {} {}", error.tag, error_cause), - ); - ("".to_string(), Err(err)) - } - } - _ => ( - "".to_string(), - Err(Error::new( - ErrorKind::Unexpected, - format!("delete failed with unexpected tag {}", entry.tag), - )), - ), - }; - results.push(result); - } - results - } } #[derive(Clone)] @@ -642,21 +516,6 @@ pub struct DropboxDeleteBatchResponseEntry { #[serde(rename(deserialize = ".tag"))] pub tag: String, pub metadata: Option, - pub failure: Option, -} - -#[derive(Default, Debug, Deserialize)] -#[serde(default)] -pub struct DropboxDeleteBatchFailureResponse { - #[serde(rename(deserialize = ".tag"))] - pub tag: String, - // During the batch deletion process, Dropbox returns - // part of the error information in the form of a JSON key. - // Since it is impossible to determine the JSON key in advance, - // the error information is parsed into a HashMap here. - // The key of the HashMap is equal to the value of the tag above. - #[serde(flatten)] - pub failure_cause_map: HashMap, } #[derive(Default, Debug, Deserialize)] diff --git a/core/src/services/dropbox/delete.rs b/core/src/services/dropbox/delete.rs new file mode 100644 index 000000000000..a084a7675db2 --- /dev/null +++ b/core/src/services/dropbox/delete.rs @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct DropboxDeleter { + core: Arc, +} + +impl DropboxDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for DropboxDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.dropbox_delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + _ => { + let err = parse_error(resp); + match err.kind() { + ErrorKind::NotFound => Ok(()), + _ => Err(err), + } + } + } + } +} diff --git a/core/src/services/dropbox/mod.rs b/core/src/services/dropbox/mod.rs index 21397e855408..682a93074442 100644 --- a/core/src/services/dropbox/mod.rs +++ b/core/src/services/dropbox/mod.rs @@ -20,6 +20,8 @@ mod backend; #[cfg(feature = "services-dropbox")] mod core; #[cfg(feature = "services-dropbox")] +mod delete; +#[cfg(feature = "services-dropbox")] mod error; #[cfg(feature = "services-dropbox")] mod lister; diff --git a/core/src/services/fs/backend.rs b/core/src/services/fs/backend.rs index ae3ad891d1e7..d2ca09608b1b 100644 --- a/core/src/services/fs/backend.rs +++ b/core/src/services/fs/backend.rs @@ -24,6 +24,7 @@ use chrono::DateTime; use log::debug; use super::core::*; +use super::delete::FsDeleter; use super::lister::FsLister; use super::reader::FsReader; use super::writer::FsWriter; @@ -165,9 +166,11 @@ impl Access for FsBackend { type Reader = FsReader; type Writer = FsWriters; type Lister = Option>; + type Deleter = oio::OneShotDeleter; type BlockingReader = FsReader; type BlockingWriter = FsWriter; type BlockingLister = Option>; + type BlockingDeleter = oio::OneShotDeleter; fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -324,24 +327,11 @@ impl Access for FsBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let p = self.core.root.join(path.trim_end_matches('/')); - - let meta = tokio::fs::metadata(&p).await; - - match meta { - Ok(meta) => { - if meta.is_dir() { - tokio::fs::remove_dir(&p).await.map_err(new_std_io_error)?; - } else { - tokio::fs::remove_file(&p).await.map_err(new_std_io_error)?; - } - - Ok(RpDelete::default()) - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(RpDelete::default()), - Err(err) => Err(new_std_io_error(err)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(FsDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { @@ -493,24 +483,11 @@ impl Access for FsBackend { Ok((RpWrite::new(), FsWriter::new(target_path, tmp_path, f))) } - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { - let p = self.core.root.join(path.trim_end_matches('/')); - - let meta = std::fs::metadata(&p); - - match meta { - Ok(meta) => { - if meta.is_dir() { - std::fs::remove_dir(&p).map_err(new_std_io_error)?; - } else { - std::fs::remove_file(&p).map_err(new_std_io_error)?; - } - - Ok(RpDelete::default()) - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(RpDelete::default()), - Err(err) => Err(new_std_io_error(err)), - } + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(FsDeleter::new(self.core.clone())), + )) } fn blocking_list(&self, path: &str, _: OpList) -> Result<(RpList, Self::BlockingLister)> { diff --git a/core/src/services/fs/delete.rs b/core/src/services/fs/delete.rs new file mode 100644 index 000000000000..81f39d9bf80b --- /dev/null +++ b/core/src/services/fs/delete.rs @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct FsDeleter { + core: Arc, +} + +impl FsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for FsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = self.core.root.join(path.trim_end_matches('/')); + + let meta = tokio::fs::metadata(&p).await; + + match meta { + Ok(meta) => { + if meta.is_dir() { + tokio::fs::remove_dir(&p).await.map_err(new_std_io_error)?; + } else { + tokio::fs::remove_file(&p).await.map_err(new_std_io_error)?; + } + + Ok(()) + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), + Err(err) => Err(new_std_io_error(err)), + } + } +} + +impl oio::BlockingOneShotDelete for FsDeleter { + fn blocking_delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = self.core.root.join(path.trim_end_matches('/')); + + let meta = std::fs::metadata(&p); + + match meta { + Ok(meta) => { + if meta.is_dir() { + std::fs::remove_dir(&p).map_err(new_std_io_error)?; + } else { + std::fs::remove_file(&p).map_err(new_std_io_error)?; + } + + Ok(()) + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), + Err(err) => Err(new_std_io_error(err)), + } + } +} diff --git a/core/src/services/fs/mod.rs b/core/src/services/fs/mod.rs index bf914a75eaf7..caf858386391 100644 --- a/core/src/services/fs/mod.rs +++ b/core/src/services/fs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-fs")] mod core; #[cfg(feature = "services-fs")] +mod delete; +#[cfg(feature = "services-fs")] mod lister; #[cfg(feature = "services-fs")] mod reader; diff --git a/core/src/services/ftp/backend.rs b/core/src/services/ftp/backend.rs index 85bb394687a8..460cdc815770 100644 --- a/core/src/services/ftp/backend.rs +++ b/core/src/services/ftp/backend.rs @@ -37,6 +37,7 @@ use suppaftp::Status; use tokio::sync::OnceCell; use uuid::Uuid; +use super::delete::FtpDeleter; use super::err::parse_error; use super::lister::FtpLister; use super::reader::FtpReader; @@ -259,9 +260,11 @@ impl Access for FtpBackend { type Reader = FtpReader; type Writer = FtpWriter; type Lister = FtpLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -379,27 +382,11 @@ impl Access for FtpBackend { Ok((RpWrite::new(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let mut ftp_stream = self.ftp_connect(Operation::Delete).await?; - - let result = if path.ends_with('/') { - ftp_stream.rmdir(&path).await - } else { - ftp_stream.rm(&path).await - }; - - match result { - Err(FtpError::UnexpectedResponse(Response { - status: Status::FileUnavailable, - .. - })) - | Ok(_) => (), - Err(e) => { - return Err(parse_error(e)); - } - } - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(FtpDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/ftp/delete.rs b/core/src/services/ftp/delete.rs new file mode 100644 index 000000000000..12ebed770071 --- /dev/null +++ b/core/src/services/ftp/delete.rs @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::FtpBackend; +use super::err::parse_error; +use crate::raw::*; +use crate::*; +use std::sync::Arc; +use suppaftp::types::Response; +use suppaftp::FtpError; +use suppaftp::Status; + +pub struct FtpDeleter { + core: Arc, +} + +impl FtpDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for FtpDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let mut ftp_stream = self.core.ftp_connect(Operation::Delete).await?; + + let result = if path.ends_with('/') { + ftp_stream.rmdir(&path).await + } else { + ftp_stream.rm(&path).await + }; + + match result { + Err(FtpError::UnexpectedResponse(Response { + status: Status::FileUnavailable, + .. + })) + | Ok(_) => (), + Err(e) => { + return Err(parse_error(e)); + } + } + + Ok(()) + } +} diff --git a/core/src/services/ftp/mod.rs b/core/src/services/ftp/mod.rs index e1b7440fac72..a6776710f492 100644 --- a/core/src/services/ftp/mod.rs +++ b/core/src/services/ftp/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-ftp")] +mod delete; #[cfg(feature = "services-ftp")] mod err; #[cfg(feature = "services-ftp")] diff --git a/core/src/services/ftp/writer.rs b/core/src/services/ftp/writer.rs index 52ea9115f718..fd2549b04655 100644 --- a/core/src/services/ftp/writer.rs +++ b/core/src/services/ftp/writer.rs @@ -21,8 +21,8 @@ use futures::AsyncWrite; use futures::AsyncWriteExt; use super::backend::Manager; +use super::err::parse_error; use crate::raw::*; -use crate::services::ftp::err::parse_error; use crate::*; pub struct FtpWriter { diff --git a/core/src/services/gcs/backend.rs b/core/src/services/gcs/backend.rs index a2c7ed910a68..2b5aac54dcf1 100644 --- a/core/src/services/gcs/backend.rs +++ b/core/src/services/gcs/backend.rs @@ -32,10 +32,12 @@ use serde::Deserialize; use serde_json; use super::core::*; +use super::delete::GcsDeleter; use super::error::parse_error; use super::lister::GcsLister; use super::writer::GcsWriter; use super::writer::GcsWriters; +use crate::raw::oio::BatchDeleter; use crate::raw::*; use crate::services::GcsConfig; use crate::*; @@ -341,9 +343,11 @@ impl Access for GcsBackend { type Reader = HttpBody; type Writer = GcsWriters; type Lister = oio::PageLister; + type Deleter = oio::BatchDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -463,15 +467,11 @@ impl Access for GcsBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.gcs_delete_object(path).await?; - - // deleting not existing objects is ok - if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { - Ok(RpDelete::default()) - } else { - Err(parse_error(resp)) - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + BatchDeleter::new(GcsDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -518,65 +518,6 @@ impl Access for GcsBackend { parts.headers, ))) } - - async fn batch(&self, args: OpBatch) -> Result { - let ops = args.into_operation(); - if ops.len() > 100 { - return Err(Error::new( - ErrorKind::Unsupported, - "gcs services only allow delete less than 100 keys at once", - ) - .with_context("length", ops.len().to_string())); - } - - let paths: Vec = ops.into_iter().map(|(p, _)| p).collect(); - let resp = self.core.gcs_delete_objects(paths.clone()).await?; - - let status = resp.status(); - - if let StatusCode::OK = status { - let content_type = parse_content_type(resp.headers())?.ok_or_else(|| { - Error::new( - ErrorKind::Unexpected, - "gcs batch delete response content type is empty", - ) - })?; - let boundary = content_type - .strip_prefix("multipart/mixed; boundary=") - .ok_or_else(|| { - Error::new( - ErrorKind::Unexpected, - "gcs batch delete response content type is not multipart/mixed", - ) - })? - .trim_matches('"'); - let multipart: Multipart = Multipart::new() - .with_boundary(boundary) - .parse(resp.into_body().to_bytes())?; - let parts = multipart.into_parts(); - - let mut batched_result = Vec::with_capacity(parts.len()); - - for (i, part) in parts.into_iter().enumerate() { - let resp = part.into_response(); - // TODO: maybe we can take it directly? - let path = paths[i].clone(); - - // deleting not existing objects is ok - if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { - batched_result.push((path, Ok(RpDelete::default().into()))); - } else { - batched_result.push((path, Err(parse_error(resp)))); - } - } - - Ok(RpBatch::new(batched_result)) - } else { - // If the overall request isn't formatted correctly and Cloud Storage is unable to parse it into sub-requests, you receive a 400 error. - // Otherwise, Cloud Storage returns a 200 status code, even if some or all of the sub-requests fail. - Err(parse_error(resp)) - } - } } /// The raw json response returned by [`get`](https://cloud.google.com/storage/docs/json_api/v1/objects/get) diff --git a/core/src/services/gcs/delete.rs b/core/src/services/gcs/delete.rs new file mode 100644 index 000000000000..241b6152edc3 --- /dev/null +++ b/core/src/services/gcs/delete.rs @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::oio::BatchDeleteResult; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct GcsDeleter { + core: Arc, +} + +impl GcsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::BatchDelete for GcsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.gcs_delete_object(&path).await?; + + // deleting not existing objects is ok + if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { + Ok(()) + } else { + Err(parse_error(resp)) + } + } + + async fn delete_batch(&self, batch: Vec<(String, OpDelete)>) -> Result { + let paths: Vec = batch.into_iter().map(|(p, _)| p).collect(); + let resp = self.core.gcs_delete_objects(paths.clone()).await?; + + let status = resp.status(); + + // If the overall request isn't formatted correctly and Cloud Storage is unable to parse it into sub-requests, you receive a 400 error. + // Otherwise, Cloud Storage returns a 200 status code, even if some or all of the sub-requests fail. + if status != StatusCode::OK { + return Err(parse_error(resp)); + } + + let boundary = parse_multipart_boundary(resp.headers())?.ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "gcs batch delete response content type is empty", + ) + })?; + let multipart: Multipart = Multipart::new() + .with_boundary(boundary) + .parse(resp.into_body().to_bytes())?; + let parts = multipart.into_parts(); + + let mut batched_result = BatchDeleteResult::default(); + + for (i, part) in parts.into_iter().enumerate() { + let resp = part.into_response(); + // TODO: maybe we can take it directly? + let path = paths[i].clone(); + + // deleting not existing objects is ok + if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { + batched_result.succeeded.push((path, OpDelete::default())); + } else { + batched_result + .failed + .push((path, OpDelete::default(), parse_error(resp))); + } + } + + // If no object is deleted, return directly. + if batched_result.succeeded.is_empty() { + let err = batched_result.failed.remove(0).2; + return Err(err); + } + + Ok(batched_result) + } +} diff --git a/core/src/services/gcs/mod.rs b/core/src/services/gcs/mod.rs index 2f260dd081bb..d0372f8d7f6d 100644 --- a/core/src/services/gcs/mod.rs +++ b/core/src/services/gcs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-gcs")] mod core; #[cfg(feature = "services-gcs")] +mod delete; +#[cfg(feature = "services-gcs")] mod error; #[cfg(feature = "services-gcs")] mod lister; diff --git a/core/src/services/gdrive/backend.rs b/core/src/services/gdrive/backend.rs index c0d118ded346..0fc6f7e24e60 100644 --- a/core/src/services/gdrive/backend.rs +++ b/core/src/services/gdrive/backend.rs @@ -28,6 +28,7 @@ use serde_json::json; use super::core::GdriveCore; use super::core::GdriveFile; +use super::delete::GdriveDeleter; use super::error::parse_error; use super::lister::GdriveLister; use super::writer::GdriveWriter; @@ -43,9 +44,11 @@ impl Access for GdriveBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); @@ -141,24 +144,11 @@ impl Access for GdriveBackend { )) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let path = build_abs_path(&self.core.root, path); - let file_id = self.core.path_cache.get(&path).await?; - let file_id = if let Some(id) = file_id { - id - } else { - return Ok(RpDelete::default()); - }; - - let resp = self.core.gdrive_trash(&file_id).await?; - let status = resp.status(); - if status != StatusCode::OK { - return Err(parse_error(resp)); - } - - self.core.path_cache.remove(&path).await; - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(GdriveDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/gdrive/builder.rs b/core/src/services/gdrive/builder.rs index 5c6c18712345..6ee4a997bf97 100644 --- a/core/src/services/gdrive/builder.rs +++ b/core/src/services/gdrive/builder.rs @@ -25,13 +25,13 @@ use log::debug; use tokio::sync::Mutex; use super::backend::GdriveBackend; +use super::core::GdriveCore; +use super::core::GdrivePathQuery; +use super::core::GdriveSigner; use crate::raw::normalize_root; use crate::raw::Access; use crate::raw::HttpClient; use crate::raw::PathCacher; -use crate::services::gdrive::core::GdriveCore; -use crate::services::gdrive::core::GdrivePathQuery; -use crate::services::gdrive::core::GdriveSigner; use crate::services::GdriveConfig; use crate::Scheme; use crate::*; diff --git a/core/src/services/gdrive/delete.rs b/core/src/services/gdrive/delete.rs new file mode 100644 index 000000000000..129387458cce --- /dev/null +++ b/core/src/services/gdrive/delete.rs @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct GdriveDeleter { + core: Arc, +} + +impl GdriveDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for GdriveDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let path = build_abs_path(&self.core.root, &path); + let file_id = self.core.path_cache.get(&path).await?; + let file_id = if let Some(id) = file_id { + id + } else { + return Ok(()); + }; + + let resp = self.core.gdrive_trash(&file_id).await?; + let status = resp.status(); + if status != StatusCode::OK { + return Err(parse_error(resp)); + } + + self.core.path_cache.remove(&path).await; + + Ok(()) + } +} diff --git a/core/src/services/gdrive/mod.rs b/core/src/services/gdrive/mod.rs index a98995cb6b88..4b6ce70fa24c 100644 --- a/core/src/services/gdrive/mod.rs +++ b/core/src/services/gdrive/mod.rs @@ -20,6 +20,8 @@ mod backend; #[cfg(feature = "services-gdrive")] mod core; #[cfg(feature = "services-gdrive")] +mod delete; +#[cfg(feature = "services-gdrive")] mod error; #[cfg(feature = "services-gdrive")] mod lister; diff --git a/core/src/services/ghac/backend.rs b/core/src/services/ghac/backend.rs index 7035d6835738..adc1ee6c8adc 100644 --- a/core/src/services/ghac/backend.rs +++ b/core/src/services/ghac/backend.rs @@ -34,6 +34,7 @@ use log::debug; use serde::Deserialize; use serde::Serialize; +use super::delete::GhacDeleter; use super::error::parse_error; use super::writer::GhacWriter; use crate::raw::*; @@ -220,7 +221,7 @@ pub struct GhacBackend { version: String, api_url: String, - api_token: String, + pub api_token: String, repo: String, pub client: HttpClient, @@ -230,9 +231,11 @@ impl Access for GhacBackend { type Reader = HttpBody; type Writer = GhacWriter; type Lister = (); + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -345,22 +348,11 @@ impl Access for GhacBackend { Ok((RpWrite::default(), GhacWriter::new(self.clone(), cache_id))) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - if self.api_token.is_empty() { - return Err(Error::new( - ErrorKind::PermissionDenied, - "github token is not configured, delete is permission denied", - )); - } - - let resp = self.ghac_delete(path).await?; - - // deleting not existing objects is ok - if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { - Ok(RpDelete::default()) - } else { - Err(parse_error(resp)) - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(GhacDeleter::new(self.clone())), + )) } } @@ -463,7 +455,7 @@ impl GhacBackend { Ok(req) } - async fn ghac_delete(&self, path: &str) -> Result> { + pub async fn ghac_delete(&self, path: &str) -> Result> { let p = build_abs_path(&self.root, path); let url = format!( diff --git a/core/src/services/ghac/delete.rs b/core/src/services/ghac/delete.rs new file mode 100644 index 000000000000..3b62c28ce357 --- /dev/null +++ b/core/src/services/ghac/delete.rs @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::GhacBackend; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; + +pub struct GhacDeleter { + core: GhacBackend, +} + +impl GhacDeleter { + pub fn new(core: GhacBackend) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for GhacDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + if self.core.api_token.is_empty() { + return Err(Error::new( + ErrorKind::PermissionDenied, + "github token is not configured, delete is permission denied", + )); + } + + let resp = self.core.ghac_delete(&path).await?; + + // deleting not existing objects is ok + if resp.status().is_success() || resp.status() == StatusCode::NOT_FOUND { + Ok(()) + } else { + Err(parse_error(resp)) + } + } +} diff --git a/core/src/services/ghac/mod.rs b/core/src/services/ghac/mod.rs index fabc5bcc15a3..6b9712a81f29 100644 --- a/core/src/services/ghac/mod.rs +++ b/core/src/services/ghac/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-ghac")] +mod delete; #[cfg(feature = "services-ghac")] mod error; #[cfg(feature = "services-ghac")] diff --git a/core/src/services/github/backend.rs b/core/src/services/github/backend.rs index 867879010f66..11b085987d48 100644 --- a/core/src/services/github/backend.rs +++ b/core/src/services/github/backend.rs @@ -26,6 +26,7 @@ use log::debug; use super::core::Entry; use super::core::GithubCore; +use super::delete::GithubDeleter; use super::error::parse_error; use super::lister::GithubLister; use super::writer::GithubWriter; @@ -169,16 +170,13 @@ pub struct GithubBackend { impl Access for GithubBackend { type Reader = HttpBody; - type Writer = GithubWriters; - type Lister = oio::PageLister; - + type Deleter = oio::OneShotDeleter; type BlockingReader = (); - type BlockingWriter = (); - type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -273,11 +271,11 @@ impl Access for GithubBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - match self.core.delete(path).await { - Ok(_) => Ok(RpDelete::default()), - Err(err) => Err(err), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(GithubDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/github/delete.rs b/core/src/services/github/delete.rs new file mode 100644 index 000000000000..8f246d7e6eb4 --- /dev/null +++ b/core/src/services/github/delete.rs @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct GithubDeleter { + core: Arc, +} + +impl GithubDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for GithubDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + match self.core.delete(&path).await { + Ok(_) => Ok(()), + Err(err) => Err(err), + } + } +} diff --git a/core/src/services/github/mod.rs b/core/src/services/github/mod.rs index bf1b1c5a4b9b..d0ae3eda94ec 100644 --- a/core/src/services/github/mod.rs +++ b/core/src/services/github/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-github")] mod core; #[cfg(feature = "services-github")] +mod delete; +#[cfg(feature = "services-github")] mod error; #[cfg(feature = "services-github")] mod lister; diff --git a/core/src/services/hdfs/backend.rs b/core/src/services/hdfs/backend.rs index 403c78957c2f..da5039437dda 100644 --- a/core/src/services/hdfs/backend.rs +++ b/core/src/services/hdfs/backend.rs @@ -25,10 +25,11 @@ use std::sync::Arc; use log::debug; use uuid::Uuid; +use super::delete::HdfsDeleter; use super::lister::HdfsLister; +use super::reader::HdfsReader; use super::writer::HdfsWriter; use crate::raw::*; -use crate::services::hdfs::reader::HdfsReader; use crate::services::HdfsConfig; use crate::*; @@ -192,9 +193,9 @@ fn tmp_file_of(path: &str) -> String { /// Backend for hdfs services. #[derive(Debug, Clone)] pub struct HdfsBackend { - root: String, + pub root: String, atomic_write_dir: Option, - client: Arc, + pub client: Arc, enable_append: bool, } @@ -206,9 +207,11 @@ impl Access for HdfsBackend { type Reader = HdfsReader; type Writer = HdfsWriter; type Lister = Option; + type Deleter = oio::OneShotDeleter; type BlockingReader = HdfsReader; type BlockingWriter = HdfsWriter; type BlockingLister = Option; + type BlockingDeleter = oio::OneShotDeleter; fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -342,31 +345,11 @@ impl Access for HdfsBackend { )) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - let meta = self.client.metadata(&p); - - if let Err(err) = meta { - return if err.kind() == io::ErrorKind::NotFound { - Ok(RpDelete::default()) - } else { - Err(new_std_io_error(err)) - }; - } - - // Safety: Err branch has been checked, it's OK to unwrap. - let meta = meta.ok().unwrap(); - - let result = if meta.is_dir() { - self.client.remove_dir(&p) - } else { - self.client.remove_file(&p) - }; - - result.map_err(new_std_io_error)?; - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(HdfsDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { @@ -536,31 +519,11 @@ impl Access for HdfsBackend { )) } - fn blocking_delete(&self, path: &str, _: OpDelete) -> Result { - let p = build_rooted_abs_path(&self.root, path); - - let meta = self.client.metadata(&p); - - if let Err(err) = meta { - return if err.kind() == io::ErrorKind::NotFound { - Ok(RpDelete::default()) - } else { - Err(new_std_io_error(err)) - }; - } - - // Safety: Err branch has been checked, it's OK to unwrap. - let meta = meta.ok().unwrap(); - - let result = if meta.is_dir() { - self.client.remove_dir(&p) - } else { - self.client.remove_file(&p) - }; - - result.map_err(new_std_io_error)?; - - Ok(RpDelete::default()) + fn blocking_delete(&self) -> Result<(RpDelete, Self::BlockingDeleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(HdfsDeleter::new(Arc::new(self.clone()))), + )) } fn blocking_list(&self, path: &str, _: OpList) -> Result<(RpList, Self::BlockingLister)> { diff --git a/core/src/services/hdfs/delete.rs b/core/src/services/hdfs/delete.rs new file mode 100644 index 000000000000..007c5ad69cec --- /dev/null +++ b/core/src/services/hdfs/delete.rs @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::HdfsBackend; +use crate::raw::*; +use crate::*; +use std::io; +use std::sync::Arc; + +pub struct HdfsDeleter { + core: Arc, +} + +impl HdfsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for HdfsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_rooted_abs_path(&self.core.root, &path); + + let meta = self.core.client.metadata(&p); + + if let Err(err) = meta { + return if err.kind() == io::ErrorKind::NotFound { + Ok(()) + } else { + Err(new_std_io_error(err)) + }; + } + + // Safety: Err branch has been checked, it's OK to unwrap. + let meta = meta.ok().unwrap(); + + let result = if meta.is_dir() { + self.core.client.remove_dir(&p) + } else { + self.core.client.remove_file(&p) + }; + + result.map_err(new_std_io_error)?; + + Ok(()) + } +} + +impl oio::BlockingOneShotDelete for HdfsDeleter { + fn blocking_delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_rooted_abs_path(&self.core.root, &path); + + let meta = self.core.client.metadata(&p); + + if let Err(err) = meta { + return if err.kind() == io::ErrorKind::NotFound { + Ok(()) + } else { + Err(new_std_io_error(err)) + }; + } + + // Safety: Err branch has been checked, it's OK to unwrap. + let meta = meta.ok().unwrap(); + + let result = if meta.is_dir() { + self.core.client.remove_dir(&p) + } else { + self.core.client.remove_file(&p) + }; + + result.map_err(new_std_io_error)?; + + Ok(()) + } +} diff --git a/core/src/services/hdfs/mod.rs b/core/src/services/hdfs/mod.rs index 1b4a4eb77d05..e93e2776cbf7 100644 --- a/core/src/services/hdfs/mod.rs +++ b/core/src/services/hdfs/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-hdfs")] +mod delete; #[cfg(feature = "services-hdfs")] mod lister; #[cfg(feature = "services-hdfs")] diff --git a/core/src/services/hdfs_native/backend.rs b/core/src/services/hdfs_native/backend.rs index 11a43a0378f2..3884a5f16303 100644 --- a/core/src/services/hdfs_native/backend.rs +++ b/core/src/services/hdfs_native/backend.rs @@ -22,6 +22,7 @@ use std::sync::Arc; use hdfs_native::WriteOptions; use log::debug; +use super::delete::HdfsNativeDeleter; use super::error::parse_hdfs_error; use super::lister::HdfsNativeLister; use super::reader::HdfsNativeReader; @@ -132,8 +133,8 @@ impl Builder for HdfsNativeBuilder { /// Backend for hdfs-native services. #[derive(Debug, Clone)] pub struct HdfsNativeBackend { - root: String, - client: Arc, + pub root: String, + pub client: Arc, _enable_append: bool, } @@ -143,11 +144,13 @@ unsafe impl Sync for HdfsNativeBackend {} impl Access for HdfsNativeBackend { type Reader = HdfsNativeReader; - type BlockingReader = (); type Writer = HdfsNativeWriter; - type BlockingWriter = (); type Lister = Option; + type Deleter = oio::OneShotDeleter; + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -158,7 +161,6 @@ impl Access for HdfsNativeBackend { delete: true, rename: true, - blocking: true, shared: true, @@ -178,42 +180,6 @@ impl Access for HdfsNativeBackend { Ok(RpCreateDir::default()) } - async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { - let p = build_rooted_abs_path(&self.root, path); - - let f = self.client.read(&p).await.map_err(parse_hdfs_error)?; - - let r = HdfsNativeReader::new(f); - - Ok((RpRead::new(), r)) - } - - async fn write(&self, path: &str, _args: OpWrite) -> Result<(RpWrite, Self::Writer)> { - let p = build_rooted_abs_path(&self.root, path); - - let f = self - .client - .create(&p, WriteOptions::default()) - .await - .map_err(parse_hdfs_error)?; - - let w = HdfsNativeWriter::new(f); - - Ok((RpWrite::new(), w)) - } - - async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { - let from_path = build_rooted_abs_path(&self.root, from); - let to_path = build_rooted_abs_path(&self.root, to); - - self.client - .rename(&from_path, &to_path, false) - .await - .map_err(parse_hdfs_error)?; - - Ok(RpRename::default()) - } - async fn stat(&self, path: &str, _args: OpStat) -> Result { let p = build_rooted_abs_path(&self.root, path); @@ -239,15 +205,35 @@ impl Access for HdfsNativeBackend { Ok(RpStat::new(metadata)) } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { + async fn read(&self, path: &str, _args: OpRead) -> Result<(RpRead, Self::Reader)> { let p = build_rooted_abs_path(&self.root, path); - self.client - .delete(&p, true) + let f = self.client.read(&p).await.map_err(parse_hdfs_error)?; + + let r = HdfsNativeReader::new(f); + + Ok((RpRead::new(), r)) + } + + async fn write(&self, path: &str, _args: OpWrite) -> Result<(RpWrite, Self::Writer)> { + let p = build_rooted_abs_path(&self.root, path); + + let f = self + .client + .create(&p, WriteOptions::default()) .await .map_err(parse_hdfs_error)?; - Ok(RpDelete::default()) + let w = HdfsNativeWriter::new(f); + + Ok((RpWrite::new(), w)) + } + + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(HdfsNativeDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { @@ -255,4 +241,16 @@ impl Access for HdfsNativeBackend { let l = HdfsNativeLister::new(p, self.client.clone()); Ok((RpList::default(), Some(l))) } + + async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { + let from_path = build_rooted_abs_path(&self.root, from); + let to_path = build_rooted_abs_path(&self.root, to); + + self.client + .rename(&from_path, &to_path, false) + .await + .map_err(parse_hdfs_error)?; + + Ok(RpRename::default()) + } } diff --git a/core/src/services/hdfs_native/delete.rs b/core/src/services/hdfs_native/delete.rs new file mode 100644 index 000000000000..7b438dac37f1 --- /dev/null +++ b/core/src/services/hdfs_native/delete.rs @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::HdfsNativeBackend; +use super::error::parse_hdfs_error; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct HdfsNativeDeleter { + core: Arc, +} + +impl HdfsNativeDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for HdfsNativeDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let p = build_rooted_abs_path(&self.core.root, &path); + + self.core + .client + .delete(&p, true) + .await + .map_err(parse_hdfs_error)?; + + Ok(()) + } +} diff --git a/core/src/services/hdfs_native/mod.rs b/core/src/services/hdfs_native/mod.rs index 0e758428591e..071f45dfb949 100644 --- a/core/src/services/hdfs_native/mod.rs +++ b/core/src/services/hdfs_native/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-hdfs-native")] +mod delete; #[cfg(feature = "services-hdfs-native")] mod error; #[cfg(feature = "services-hdfs-native")] @@ -30,4 +32,5 @@ mod backend; pub use backend::HdfsNativeBuilder as HdfsNative; mod config; + pub use config::HdfsNativeConfig; diff --git a/core/src/services/http/backend.rs b/core/src/services/http/backend.rs index e6b913d93eb6..4fd894d9946e 100644 --- a/core/src/services/http/backend.rs +++ b/core/src/services/http/backend.rs @@ -196,9 +196,11 @@ impl Access for HttpBackend { type Reader = HttpBody; type Writer = (); type Lister = (); + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); diff --git a/core/src/services/huggingface/backend.rs b/core/src/services/huggingface/backend.rs index f70bdd6557c0..7be0ce152027 100644 --- a/core/src/services/huggingface/backend.rs +++ b/core/src/services/huggingface/backend.rs @@ -194,9 +194,11 @@ impl Access for HuggingfaceBackend { type Reader = HttpBody; type Writer = (); type Lister = oio::PageLister; + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); diff --git a/core/src/services/icloud/backend.rs b/core/src/services/icloud/backend.rs index 2708703604f4..1ba0f2c3662f 100644 --- a/core/src/services/icloud/backend.rs +++ b/core/src/services/icloud/backend.rs @@ -226,11 +226,13 @@ pub struct IcloudBackend { impl Access for IcloudBackend { type Reader = HttpBody; - type BlockingReader = (); type Writer = (); - type BlockingWriter = (); type Lister = (); + type Deleter = (); + type BlockingReader = (); + type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); diff --git a/core/src/services/ipfs/backend.rs b/core/src/services/ipfs/backend.rs index 5ef62a1e470f..026549853386 100644 --- a/core/src/services/ipfs/backend.rs +++ b/core/src/services/ipfs/backend.rs @@ -164,9 +164,11 @@ impl Access for IpfsBackend { type Reader = HttpBody; type Writer = (); type Lister = oio::PageLister; + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); diff --git a/core/src/services/ipmfs/backend.rs b/core/src/services/ipmfs/backend.rs index 97f6a0789318..1f1e63584f80 100644 --- a/core/src/services/ipmfs/backend.rs +++ b/core/src/services/ipmfs/backend.rs @@ -26,6 +26,7 @@ use http::Response; use http::StatusCode; use serde::Deserialize; +use super::delete::IpmfsDeleter; use super::error::parse_error; use super::lister::IpmfsLister; use super::writer::IpmfsWriter; @@ -64,9 +65,11 @@ impl Access for IpmfsBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -157,15 +160,11 @@ impl Access for IpmfsBackend { )) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.ipmfs_rm(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(IpmfsDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { @@ -210,7 +209,7 @@ impl IpmfsBackend { self.client.fetch(req).await } - async fn ipmfs_rm(&self, path: &str) -> Result> { + pub async fn ipmfs_rm(&self, path: &str) -> Result> { let p = build_rooted_abs_path(&self.root, path); let url = format!( diff --git a/core/src/services/ipmfs/delete.rs b/core/src/services/ipmfs/delete.rs new file mode 100644 index 000000000000..a4430379da4a --- /dev/null +++ b/core/src/services/ipmfs/delete.rs @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::IpmfsBackend; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct IpmfsDeleter { + core: Arc, +} + +impl IpmfsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for IpmfsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.ipmfs_rm(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/ipmfs/mod.rs b/core/src/services/ipmfs/mod.rs index b993c242deb0..4b6e9a5bc828 100644 --- a/core/src/services/ipmfs/mod.rs +++ b/core/src/services/ipmfs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-ipmfs")] mod backend; #[cfg(feature = "services-ipmfs")] +mod delete; +#[cfg(feature = "services-ipmfs")] mod error; #[cfg(feature = "services-ipmfs")] mod lister; diff --git a/core/src/services/koofr/backend.rs b/core/src/services/koofr/backend.rs index 948226c962b0..e5af50e351af 100644 --- a/core/src/services/koofr/backend.rs +++ b/core/src/services/koofr/backend.rs @@ -29,6 +29,7 @@ use tokio::sync::OnceCell; use super::core::File; use super::core::KoofrCore; use super::core::KoofrSigner; +use super::delete::KoofrDeleter; use super::error::parse_error; use super::lister::KoofrLister; use super::writer::KoofrWriter; @@ -198,9 +199,11 @@ impl Access for KoofrBackend { type Reader = HttpBody; type Writer = KoofrWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -295,17 +298,11 @@ impl Access for KoofrBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.remove(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - // Allow 404 when deleting a non-existing object - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(KoofrDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/koofr/delete.rs b/core/src/services/koofr/delete.rs new file mode 100644 index 000000000000..a91bf879d296 --- /dev/null +++ b/core/src/services/koofr/delete.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct KoofrDeleter { + core: Arc, +} + +impl KoofrDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for KoofrDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.remove(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + // Allow 404 when deleting a non-existing object + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/koofr/mod.rs b/core/src/services/koofr/mod.rs index c73c4dcbe7ff..255d6ff72aa9 100644 --- a/core/src/services/koofr/mod.rs +++ b/core/src/services/koofr/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-koofr")] mod core; #[cfg(feature = "services-koofr")] +mod delete; +#[cfg(feature = "services-koofr")] mod error; #[cfg(feature = "services-koofr")] mod lister; diff --git a/core/src/services/lakefs/backend.rs b/core/src/services/lakefs/backend.rs index d3086b3786d0..084ca58343a6 100644 --- a/core/src/services/lakefs/backend.rs +++ b/core/src/services/lakefs/backend.rs @@ -28,6 +28,7 @@ use log::debug; use super::core::LakefsCore; use super::core::LakefsStatus; +use super::delete::LakefsDeleter; use super::error::parse_error; use super::lister::LakefsLister; use super::writer::LakefsWriter; @@ -197,9 +198,11 @@ impl Access for LakefsBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -289,21 +292,11 @@ impl Access for LakefsBackend { )) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - // This would delete the bucket, do not perform - if self.core.root == "/" && path == "/" { - return Ok(RpDelete::default()); - } - - let resp = self.core.delete_object(path, &args).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT => Ok(RpDelete::default()), - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(LakefsDeleter::new(self.core.clone())), + )) } async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { diff --git a/core/src/services/lakefs/delete.rs b/core/src/services/lakefs/delete.rs new file mode 100644 index 000000000000..29f16964fcdc --- /dev/null +++ b/core/src/services/lakefs/delete.rs @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct LakefsDeleter { + core: Arc, +} + +impl LakefsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for LakefsDeleter { + async fn delete_once(&self, path: String, args: OpDelete) -> Result<()> { + // This would delete the bucket, do not perform + if self.core.root == "/" && path == "/" { + return Ok(()); + } + + let resp = self.core.delete_object(&path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT => Ok(()), + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/lakefs/mod.rs b/core/src/services/lakefs/mod.rs index 6762ebbfda40..01d161d46fd8 100644 --- a/core/src/services/lakefs/mod.rs +++ b/core/src/services/lakefs/mod.rs @@ -18,11 +18,11 @@ #[cfg(feature = "services-lakefs")] mod core; #[cfg(feature = "services-lakefs")] +mod delete; +#[cfg(feature = "services-lakefs")] mod error; - #[cfg(feature = "services-lakefs")] mod lister; - #[cfg(feature = "services-lakefs")] mod writer; @@ -32,5 +32,4 @@ mod backend; pub use backend::LakefsBuilder as Lakefs; mod config; - pub use config::LakefsConfig; diff --git a/core/src/services/lakefs/writer.rs b/core/src/services/lakefs/writer.rs index a4c22b24a46f..2aa087a36b66 100644 --- a/core/src/services/lakefs/writer.rs +++ b/core/src/services/lakefs/writer.rs @@ -19,9 +19,9 @@ use std::sync::Arc; use http::StatusCode; +use super::core::LakefsCore; use super::error::parse_error; use crate::raw::*; -use crate::services::lakefs::core::LakefsCore; use crate::*; pub struct LakefsWriter { diff --git a/core/src/services/memory/backend.rs b/core/src/services/memory/backend.rs index a6475ab8a1e3..cce6c7840f7c 100644 --- a/core/src/services/memory/backend.rs +++ b/core/src/services/memory/backend.rs @@ -145,9 +145,7 @@ impl typed_kv::Adapter for Adapter { #[cfg(test)] mod tests { use super::*; - use crate::raw::adapters::typed_kv::Adapter; - use crate::raw::adapters::typed_kv::Value; - use crate::services::memory::backend; + use crate::raw::adapters::typed_kv::{Adapter, Value}; #[test] fn test_accessor_metadata_name() { @@ -160,7 +158,7 @@ mod tests { #[test] fn test_blocking_scan() { - let adapter = backend::Adapter { + let adapter = super::Adapter { inner: Arc::new(Mutex::new(BTreeMap::default())), }; diff --git a/core/src/services/monoiofs/backend.rs b/core/src/services/monoiofs/backend.rs index 8fc139386e9c..863fbe668087 100644 --- a/core/src/services/monoiofs/backend.rs +++ b/core/src/services/monoiofs/backend.rs @@ -25,6 +25,7 @@ use monoio::fs::OpenOptions; use super::core::MonoiofsCore; use super::core::BUFFER_SIZE; +use super::delete::MonoiofsDeleter; use super::reader::MonoiofsReader; use super::writer::MonoiofsWriter; use crate::raw::*; @@ -104,9 +105,11 @@ impl Access for MonoiofsBackend { type Reader = MonoiofsReader; type Writer = MonoiofsWriter; type Lister = (); + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -163,34 +166,11 @@ impl Access for MonoiofsBackend { Ok((RpWrite::default(), writer)) } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { - let path = self.core.prepare_path(path); - let meta = self - .core - .dispatch({ - let path = path.clone(); - move || monoio::fs::metadata(path) - }) - .await; - match meta { - Ok(meta) => { - if meta.is_dir() { - self.core - .dispatch(move || monoio::fs::remove_dir(path)) - .await - .map_err(new_std_io_error)?; - } else { - self.core - .dispatch(move || monoio::fs::remove_file(path)) - .await - .map_err(new_std_io_error)?; - } - - Ok(RpDelete::default()) - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(RpDelete::default()), - Err(err) => Err(new_std_io_error(err)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(MonoiofsDeleter::new(self.core.clone())), + )) } async fn rename(&self, from: &str, to: &str, _args: OpRename) -> Result { diff --git a/core/src/services/monoiofs/delete.rs b/core/src/services/monoiofs/delete.rs new file mode 100644 index 000000000000..69e43eeb9055 --- /dev/null +++ b/core/src/services/monoiofs/delete.rs @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::MonoiofsCore; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct MonoiofsDeleter { + core: Arc, +} + +impl MonoiofsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for MonoiofsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let path = self.core.prepare_path(&path); + let meta = self + .core + .dispatch({ + let path = path.clone(); + move || monoio::fs::metadata(path) + }) + .await; + match meta { + Ok(meta) => { + if meta.is_dir() { + self.core + .dispatch(move || monoio::fs::remove_dir(path)) + .await + .map_err(new_std_io_error)?; + } else { + self.core + .dispatch(move || monoio::fs::remove_file(path)) + .await + .map_err(new_std_io_error)?; + } + + Ok(()) + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), + Err(err) => Err(new_std_io_error(err)), + } + } +} diff --git a/core/src/services/monoiofs/mod.rs b/core/src/services/monoiofs/mod.rs index e342397581c6..e1695488142d 100644 --- a/core/src/services/monoiofs/mod.rs +++ b/core/src/services/monoiofs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-monoiofs")] mod core; #[cfg(feature = "services-monoiofs")] +mod delete; +#[cfg(feature = "services-monoiofs")] mod reader; #[cfg(feature = "services-monoiofs")] mod writer; diff --git a/core/src/services/obs/backend.rs b/core/src/services/obs/backend.rs index 82a41a192c15..36f852da8ba6 100644 --- a/core/src/services/obs/backend.rs +++ b/core/src/services/obs/backend.rs @@ -28,11 +28,12 @@ use reqsign::HuaweicloudObsCredentialLoader; use reqsign::HuaweicloudObsSigner; use super::core::ObsCore; +use super::delete::ObsDeleter; use super::error::parse_error; use super::lister::ObsLister; use super::writer::ObsWriter; +use super::writer::ObsWriters; use crate::raw::*; -use crate::services::obs::writer::ObsWriters; use crate::services::ObsConfig; use crate::*; @@ -242,9 +243,11 @@ impl Access for ObsBackend { type Reader = HttpBody; type Writer = ObsWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -347,17 +350,11 @@ impl Access for ObsBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.obs_delete_object(path).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => { - Ok(RpDelete::default()) - } - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(ObsDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/obs/delete.rs b/core/src/services/obs/delete.rs new file mode 100644 index 000000000000..e592c00acb74 --- /dev/null +++ b/core/src/services/obs/delete.rs @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct ObsDeleter { + core: Arc, +} + +impl ObsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for ObsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.obs_delete_object(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT | StatusCode::ACCEPTED | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/obs/mod.rs b/core/src/services/obs/mod.rs index 612a526889e0..3427da280148 100644 --- a/core/src/services/obs/mod.rs +++ b/core/src/services/obs/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-obs")] mod core; #[cfg(feature = "services-obs")] +mod delete; +#[cfg(feature = "services-obs")] mod error; #[cfg(feature = "services-obs")] mod lister; diff --git a/core/src/services/onedrive/backend.rs b/core/src/services/onedrive/backend.rs index ca2b34c9a84f..35af133a8e5c 100644 --- a/core/src/services/onedrive/backend.rs +++ b/core/src/services/onedrive/backend.rs @@ -25,6 +25,7 @@ use http::Request; use http::Response; use http::StatusCode; +use super::delete::OnedriveDeleter; use super::error::parse_error; use super::graph_model::CreateDirPayload; use super::graph_model::ItemType; @@ -65,9 +66,11 @@ impl Access for OnedriveBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); @@ -176,17 +179,11 @@ impl Access for OnedriveBackend { )) } - /// Delete operation - /// Documentation: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete?view=odsp-graph-online - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.onedrive_delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(OnedriveDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _op_list: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/onedrive/delete.rs b/core/src/services/onedrive/delete.rs new file mode 100644 index 000000000000..1e4dcdd36ede --- /dev/null +++ b/core/src/services/onedrive/delete.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::OnedriveBackend; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +/// Delete operation +/// Documentation: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete?view=odsp-graph-online +pub struct OnedriveDeleter { + core: Arc, +} + +impl OnedriveDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for OnedriveDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.onedrive_delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/onedrive/mod.rs b/core/src/services/onedrive/mod.rs index 9168394bb766..cbe3a7ce0612 100644 --- a/core/src/services/onedrive/mod.rs +++ b/core/src/services/onedrive/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-onedrive")] mod backend; #[cfg(feature = "services-onedrive")] +mod delete; +#[cfg(feature = "services-onedrive")] mod error; #[cfg(feature = "services-onedrive")] mod graph_model; diff --git a/core/src/services/oss/backend.rs b/core/src/services/oss/backend.rs index 47018795fb36..0806c3767245 100644 --- a/core/src/services/oss/backend.rs +++ b/core/src/services/oss/backend.rs @@ -15,12 +15,10 @@ // specific language governing permissions and limitations // under the License. -use std::collections::HashSet; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; -use bytes::Buf; use http::Response; use http::StatusCode; use http::Uri; @@ -30,11 +28,12 @@ use reqsign::AliyunLoader; use reqsign::AliyunOssSigner; use super::core::*; +use super::delete::OssDeleter; use super::error::parse_error; use super::lister::OssLister; use super::writer::OssWriter; +use super::writer::OssWriters; use crate::raw::*; -use crate::services::oss::writer::OssWriters; use crate::services::OssConfig; use crate::*; @@ -419,9 +418,11 @@ impl Access for OssBackend { type Reader = HttpBody; type Writer = OssWriters; type Lister = oio::PageLister; + type Deleter = oio::BatchDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -463,6 +464,7 @@ impl Access for OssBackend { write_with_user_metadata: true, delete: true, + delete_max_size: Some(DEFAULT_BATCH_MAX_OPERATIONS), copy: true, list: true, @@ -539,13 +541,11 @@ impl Access for OssBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - let resp = self.core.oss_delete_object(path, &args).await?; - let status = resp.status(); - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::BatchDeleter::new(OssDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -591,60 +591,4 @@ impl Access for OssBackend { parts.headers, ))) } - - async fn batch(&self, args: OpBatch) -> Result { - let ops = args.into_operation(); - // Sadly, OSS will not return failed keys, so we will build - // a set to calculate the failed keys. - let mut keys = HashSet::new(); - - let ops_len = ops.len(); - if ops_len > 1000 { - return Err(Error::new( - ErrorKind::Unsupported, - "oss services only allow delete up to 1000 keys at once", - ) - .with_context("length", ops_len.to_string())); - } - - let paths = ops - .into_iter() - .map(|(p, _)| { - keys.insert(p.clone()); - p - }) - .collect(); - - let resp = self.core.oss_delete_objects(paths).await?; - - let status = resp.status(); - - if let StatusCode::OK = status { - let bs = resp.into_body(); - - let result: DeleteObjectsResult = - quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; - - let mut batched_result = Vec::with_capacity(ops_len); - for i in result.deleted { - let path = build_rel_path(&self.core.root, &i.key); - keys.remove(&path); - batched_result.push((path, Ok(RpDelete::default().into()))); - } - // TODO: we should handle those errors with code. - for i in keys { - batched_result.push(( - i, - Err(Error::new( - ErrorKind::Unexpected, - "oss delete this key failed for reason we don't know", - )), - )); - } - - Ok(RpBatch::new(batched_result)) - } else { - Err(parse_error(resp)) - } - } } diff --git a/core/src/services/oss/delete.rs b/core/src/services/oss/delete.rs new file mode 100644 index 000000000000..544cf242c5f1 --- /dev/null +++ b/core/src/services/oss/delete.rs @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::oio::BatchDeleteResult; +use crate::raw::*; +use crate::*; +use bytes::Buf; +use http::StatusCode; +use std::collections::HashSet; +use std::sync::Arc; + +pub struct OssDeleter { + core: Arc, +} + +impl OssDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::BatchDelete for OssDeleter { + async fn delete_once(&self, path: String, args: OpDelete) -> Result<()> { + let resp = self.core.oss_delete_object(&path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } + + async fn delete_batch(&self, batch: Vec<(String, OpDelete)>) -> Result { + // Sadly, OSS will not return failed keys, so we will build + // a set to calculate the failed keys. + let mut keys = HashSet::new(); + + let paths = batch + .into_iter() + .map(|(p, _)| { + keys.insert(p.clone()); + p + }) + .collect(); + + let resp = self.core.oss_delete_objects(paths).await?; + + let status = resp.status(); + + if status != StatusCode::OK { + return Err(parse_error(resp)); + } + + let bs = resp.into_body(); + + let result: DeleteObjectsResult = + quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; + + if result.deleted.is_empty() { + return Err(Error::new( + ErrorKind::Unexpected, + "oss delete this key failed for reason we don't know", + )); + } + + let mut batched_result = BatchDeleteResult { + succeeded: Vec::with_capacity(result.deleted.len()), + failed: Vec::with_capacity(keys.len() - result.deleted.len()), + }; + + for i in result.deleted { + let path = build_rel_path(&self.core.root, &i.key); + keys.remove(&path); + batched_result.succeeded.push((path, OpDelete::default())); + } + // TODO: we should handle those errors with code. + for i in keys { + batched_result.failed.push(( + i, + OpDelete::default(), + Error::new( + ErrorKind::Unexpected, + "oss delete this key failed for reason we don't know", + ), + )); + } + + Ok(batched_result) + } +} diff --git a/core/src/services/oss/mod.rs b/core/src/services/oss/mod.rs index b5f172e4a70c..19b280e7b4ec 100644 --- a/core/src/services/oss/mod.rs +++ b/core/src/services/oss/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-oss")] mod core; #[cfg(feature = "services-oss")] +mod delete; +#[cfg(feature = "services-oss")] mod error; #[cfg(feature = "services-oss")] mod lister; diff --git a/core/src/services/pcloud/backend.rs b/core/src/services/pcloud/backend.rs index 07078f8d2046..4702d8778b03 100644 --- a/core/src/services/pcloud/backend.rs +++ b/core/src/services/pcloud/backend.rs @@ -25,6 +25,7 @@ use http::StatusCode; use log::debug; use super::core::*; +use super::delete::PcloudDeleter; use super::error::parse_error; use super::error::PcloudError; use super::lister::PcloudLister; @@ -190,9 +191,11 @@ impl Access for PcloudBackend { type Reader = HttpBody; type Writer = PcloudWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -282,31 +285,11 @@ impl Access for PcloudBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = if path.ends_with('/') { - self.core.delete_folder(path).await? - } else { - self.core.delete_file(path).await? - }; - - let status = resp.status(); - - match status { - StatusCode::OK => { - let bs = resp.into_body(); - let resp: PcloudError = - serde_json::from_reader(bs.reader()).map_err(new_json_deserialize_error)?; - let result = resp.result; - - // pCloud returns 2005 or 2009 if the file or folder is not found - if result != 0 && result != 2005 && result != 2009 { - return Err(Error::new(ErrorKind::Unexpected, format!("{resp:?}"))); - } - - Ok(RpDelete::default()) - } - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(PcloudDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/pcloud/delete.rs b/core/src/services/pcloud/delete.rs new file mode 100644 index 000000000000..5f0d240e0b20 --- /dev/null +++ b/core/src/services/pcloud/delete.rs @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::{parse_error, PcloudError}; +use crate::raw::*; +use crate::*; +use bytes::Buf; +use http::StatusCode; +use std::sync::Arc; + +pub struct PcloudDeleter { + core: Arc, +} + +impl PcloudDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for PcloudDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = if path.ends_with('/') { + self.core.delete_folder(&path).await? + } else { + self.core.delete_file(&path).await? + }; + + let status = resp.status(); + + match status { + StatusCode::OK => { + let bs = resp.into_body(); + let resp: PcloudError = + serde_json::from_reader(bs.reader()).map_err(new_json_deserialize_error)?; + let result = resp.result; + + // pCloud returns 2005 or 2009 if the file or folder is not found + if result != 0 && result != 2005 && result != 2009 { + return Err(Error::new(ErrorKind::Unexpected, format!("{resp:?}"))); + } + + Ok(()) + } + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/pcloud/mod.rs b/core/src/services/pcloud/mod.rs index ae3a01c5204b..f42bdc162f1e 100644 --- a/core/src/services/pcloud/mod.rs +++ b/core/src/services/pcloud/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-pcloud")] mod core; #[cfg(feature = "services-pcloud")] +mod delete; +#[cfg(feature = "services-pcloud")] mod error; #[cfg(feature = "services-pcloud")] mod lister; diff --git a/core/src/services/s3/backend.rs b/core/src/services/s3/backend.rs index 663bb2ff770c..654b28da06ee 100644 --- a/core/src/services/s3/backend.rs +++ b/core/src/services/s3/backend.rs @@ -25,7 +25,6 @@ use std::sync::Arc; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use bytes::Buf; use constants::X_AMZ_META_PREFIX; use http::Response; use http::StatusCode; @@ -42,8 +41,8 @@ use reqsign::AwsV4Signer; use reqwest::Url; use super::core::*; +use super::delete::S3Deleter; use super::error::parse_error; -use super::error::parse_s3_error_code; use super::lister::{S3Lister, S3Listers, S3ObjectVersionsLister}; use super::writer::S3Writer; use super::writer::S3Writers; @@ -900,9 +899,11 @@ impl Access for S3Backend { type Reader = HttpBody; type Writer = S3Writers; type Lister = S3Listers; + type Deleter = oio::BatchDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -1026,24 +1027,11 @@ impl Access for S3Backend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, args: OpDelete) -> Result { - // This would delete the bucket, do not perform - if self.core.root == "/" && path == "/" { - return Ok(RpDelete::default()); - } - - let resp = self.core.s3_delete_object(path, &args).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT => Ok(RpDelete::default()), - // Allow 404 when deleting a non-existing object - // This is not a standard behavior, only some s3 alike service like GCS XML API do this. - // ref: - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::BatchDeleter::new(S3Deleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { @@ -1106,56 +1094,6 @@ impl Access for S3Backend { parts.headers, ))) } - - async fn batch(&self, args: OpBatch) -> Result { - let ops = args.into_operation(); - if ops.len() > 1000 { - return Err(Error::new( - ErrorKind::Unsupported, - "s3 services only allow delete up to 1000 keys at once", - ) - .with_context("length", ops.len().to_string())); - } - - let paths = ops - .into_iter() - .map(|(p, BatchOperation::Delete(del))| (p, del)) - .collect(); - - let resp = self.core.s3_delete_objects(paths).await?; - - let status = resp.status(); - - if let StatusCode::OK = status { - let bs = resp.into_body(); - - let result: DeleteObjectsResult = - quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; - - let mut batched_result = Vec::with_capacity(result.deleted.len() + result.error.len()); - for i in result.deleted { - let path = build_rel_path(&self.core.root, &i.key); - batched_result.push((path, Ok(RpDelete::default().into()))); - } - for i in result.error { - let path = build_rel_path(&self.core.root, &i.key); - - // set the error kind and mark temporary if retryable - let (kind, retryable) = - parse_s3_error_code(i.code.as_str()).unwrap_or((ErrorKind::Unexpected, false)); - let mut err: Error = Error::new(kind, format!("{i:?}")); - if retryable { - err = err.set_temporary(); - } - - batched_result.push((path, Err(err))); - } - - Ok(RpBatch::new(batched_result)) - } else { - Err(parse_error(resp)) - } - } } #[cfg(test)] diff --git a/core/src/services/s3/delete.rs b/core/src/services/s3/delete.rs new file mode 100644 index 000000000000..b2b49c84047b --- /dev/null +++ b/core/src/services/s3/delete.rs @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::{parse_error, parse_s3_error_code}; +use crate::raw::oio::BatchDeleteResult; +use crate::raw::*; +use crate::*; +use bytes::Buf; +use http::StatusCode; +use std::sync::Arc; + +pub struct S3Deleter { + core: Arc, +} + +impl S3Deleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::BatchDelete for S3Deleter { + async fn delete_once(&self, path: String, args: OpDelete) -> Result<()> { + // This would delete the bucket, do not perform + if self.core.root == "/" && path == "/" { + return Ok(()); + } + + let resp = self.core.s3_delete_object(&path, &args).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT => Ok(()), + // Allow 404 when deleting a non-existing object + // This is not a standard behavior, only some s3 alike service like GCS XML API do this. + // ref: + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } + + async fn delete_batch(&self, batch: Vec<(String, OpDelete)>) -> Result { + let resp = self.core.s3_delete_objects(batch).await?; + + let status = resp.status(); + if status != StatusCode::OK { + return Err(parse_error(resp)); + } + + let bs = resp.into_body(); + + let mut result: DeleteObjectsResult = + quick_xml::de::from_reader(bs.reader()).map_err(new_xml_deserialize_error)?; + + // If no object is deleted, return directly. + if result.deleted.is_empty() { + let err = result.error.remove(0); + return Err(parse_delete_objects_result_error(err)); + } + + let mut batched_result = BatchDeleteResult { + succeeded: Vec::with_capacity(result.deleted.len()), + failed: Vec::with_capacity(result.error.len()), + }; + for i in result.deleted { + let path = build_rel_path(&self.core.root, &i.key); + // TODO: fix https://github.com/apache/opendal/issues/5329 + batched_result.succeeded.push((path, OpDelete::new())); + } + for i in result.error { + let path = build_rel_path(&self.core.root, &i.key); + + batched_result.failed.push(( + path, + OpDelete::new(), + parse_delete_objects_result_error(i), + )); + } + + Ok(batched_result) + } +} + +fn parse_delete_objects_result_error(err: DeleteObjectsResultError) -> Error { + let (kind, retryable) = + parse_s3_error_code(err.code.as_str()).unwrap_or((ErrorKind::Unexpected, false)); + let mut err: Error = Error::new(kind, format!("{err:?}")); + if retryable { + err = err.set_temporary(); + } + err +} diff --git a/core/src/services/s3/mod.rs b/core/src/services/s3/mod.rs index b4161a9d66b2..587a171e6d32 100644 --- a/core/src/services/s3/mod.rs +++ b/core/src/services/s3/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-s3")] mod core; #[cfg(feature = "services-s3")] +mod delete; +#[cfg(feature = "services-s3")] mod error; #[cfg(feature = "services-s3")] mod lister; diff --git a/core/src/services/seafile/backend.rs b/core/src/services/seafile/backend.rs index 77581a4d772d..a8a1f98e3efa 100644 --- a/core/src/services/seafile/backend.rs +++ b/core/src/services/seafile/backend.rs @@ -28,6 +28,7 @@ use super::core::parse_dir_detail; use super::core::parse_file_detail; use super::core::SeafileCore; use super::core::SeafileSigner; +use super::delete::SeafileDeleter; use super::error::parse_error; use super::lister::SeafileLister; use super::writer::SeafileWriter; @@ -212,9 +213,11 @@ impl Access for SeafileBackend { type Reader = HttpBody; type Writer = SeafileWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -281,10 +284,11 @@ impl Access for SeafileBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { - self.core.delete(path).await?; - - Ok(RpDelete::default()) + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(SeafileDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, _args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/seafile/delete.rs b/core/src/services/seafile/delete.rs new file mode 100644 index 000000000000..21d23cd01fb6 --- /dev/null +++ b/core/src/services/seafile/delete.rs @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct SeafileDeleter { + core: Arc, +} + +impl SeafileDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for SeafileDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + self.core.delete(&path).await?; + + Ok(()) + } +} diff --git a/core/src/services/seafile/mod.rs b/core/src/services/seafile/mod.rs index 4ba76b05b7d5..56c44acdd438 100644 --- a/core/src/services/seafile/mod.rs +++ b/core/src/services/seafile/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-seafile")] mod core; #[cfg(feature = "services-seafile")] +mod delete; +#[cfg(feature = "services-seafile")] mod error; #[cfg(feature = "services-seafile")] mod lister; diff --git a/core/src/services/sftp/backend.rs b/core/src/services/sftp/backend.rs index 552845ba319b..5adf389ea9dd 100644 --- a/core/src/services/sftp/backend.rs +++ b/core/src/services/sftp/backend.rs @@ -32,6 +32,7 @@ use openssh_sftp_client::SftpOptions; use tokio::io::AsyncSeekExt; use tokio::sync::OnceCell; +use super::delete::SftpDeleter; use super::error::is_not_found; use super::error::is_sftp_protocol_error; use super::error::parse_sftp_error; @@ -203,12 +204,12 @@ impl Builder for SftpBuilder { pub struct SftpBackend { copyable: bool, endpoint: String, - root: String, + pub root: String, user: Option, key: Option, known_hosts_strategy: KnownHosts, - client: OnceCell>, + pub client: OnceCell>, } pub struct Manager { @@ -319,9 +320,11 @@ impl Access for SftpBackend { type Reader = SftpReader; type Writer = SftpWriter; type Lister = Option; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -434,23 +437,11 @@ impl Access for SftpBackend { Ok((RpWrite::new(), SftpWriter::new(file))) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let client = self.connect().await?; - - let mut fs = client.fs(); - fs.set_cwd(&self.root); - - let res = if path.ends_with('/') { - fs.remove_dir(path).await - } else { - fs.remove_file(path).await - }; - - match res { - Ok(()) => Ok(RpDelete::default()), - Err(e) if is_not_found(&e) => Ok(RpDelete::default()), - Err(e) => Err(parse_sftp_error(e)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(SftpDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, _: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/sftp/delete.rs b/core/src/services/sftp/delete.rs new file mode 100644 index 000000000000..1976016ef37d --- /dev/null +++ b/core/src/services/sftp/delete.rs @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::SftpBackend; +use super::error::{is_not_found, parse_sftp_error}; +use crate::raw::*; +use crate::*; +use std::sync::Arc; + +pub struct SftpDeleter { + core: Arc, +} + +impl SftpDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for SftpDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let client = self.core.connect().await?; + + let mut fs = client.fs(); + fs.set_cwd(&self.core.root); + + let res = if path.ends_with('/') { + fs.remove_dir(path).await + } else { + fs.remove_file(path).await + }; + + match res { + Ok(()) => Ok(()), + Err(e) if is_not_found(&e) => Ok(()), + Err(e) => Err(parse_sftp_error(e)), + } + } +} diff --git a/core/src/services/sftp/mod.rs b/core/src/services/sftp/mod.rs index b17c0e0007bb..52451fd205b7 100644 --- a/core/src/services/sftp/mod.rs +++ b/core/src/services/sftp/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-sftp")] +mod delete; #[cfg(feature = "services-sftp")] mod error; #[cfg(feature = "services-sftp")] diff --git a/core/src/services/supabase/backend.rs b/core/src/services/supabase/backend.rs index 623ec87b430b..56ddaabdfc67 100644 --- a/core/src/services/supabase/backend.rs +++ b/core/src/services/supabase/backend.rs @@ -149,9 +149,11 @@ impl Access for SupabaseBackend { type Writer = oio::OneShotWriter; // todo: implement Lister to support list type Lister = (); + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -164,7 +166,6 @@ impl Access for SupabaseBackend { read: true, write: true, - delete: true, shared: true, @@ -215,20 +216,4 @@ impl Access for SupabaseBackend { oio::OneShotWriter::new(SupabaseWriter::new(self.core.clone(), path, args)), )) } - - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.supabase_delete_object(path).await?; - - if resp.status().is_success() { - Ok(RpDelete::default()) - } else { - // deleting not existing objects is ok - let e = parse_error(resp); - if e.kind() == ErrorKind::NotFound { - Ok(RpDelete::default()) - } else { - Err(e) - } - } - } } diff --git a/core/src/services/supabase/core.rs b/core/src/services/supabase/core.rs index 41e95f7a29ba..578a8b5f3605 100644 --- a/core/src/services/supabase/core.rs +++ b/core/src/services/supabase/core.rs @@ -109,20 +109,6 @@ impl SupabaseCore { Ok(req) } - pub fn supabase_delete_object_request(&self, path: &str) -> Result> { - let p = build_abs_path(&self.root, path); - let url = format!( - "{}/storage/v1/object/{}/{}", - self.endpoint, - self.bucket, - percent_encode_path(&p) - ); - - Request::delete(&url) - .body(Buffer::new()) - .map_err(new_request_build_error) - } - pub fn supabase_get_object_public_request( &self, path: &str, @@ -255,10 +241,4 @@ impl SupabaseCore { self.sign(&mut req)?; self.send(req).await } - - pub async fn supabase_delete_object(&self, path: &str) -> Result> { - let mut req = self.supabase_delete_object_request(path)?; - self.sign(&mut req)?; - self.send(req).await - } } diff --git a/core/src/services/swift/backend.rs b/core/src/services/swift/backend.rs index adadbf46c64f..12d2cb123311 100644 --- a/core/src/services/swift/backend.rs +++ b/core/src/services/swift/backend.rs @@ -24,6 +24,7 @@ use http::StatusCode; use log::debug; use super::core::*; +use super::delete::SwfitDeleter; use super::error::parse_error; use super::lister::SwiftLister; use super::writer::SwiftWriter; @@ -176,9 +177,11 @@ impl Access for SwiftBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -240,16 +243,11 @@ impl Access for SwiftBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _args: OpDelete) -> Result { - let resp = self.core.swift_delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::NO_CONTENT | StatusCode::OK => Ok(RpDelete::default()), - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(SwfitDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/swift/delete.rs b/core/src/services/swift/delete.rs new file mode 100644 index 000000000000..1c5ce4f0010a --- /dev/null +++ b/core/src/services/swift/delete.rs @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct SwfitDeleter { + core: Arc, +} + +impl SwfitDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for SwfitDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.swift_delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::NO_CONTENT | StatusCode::OK => Ok(()), + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/swift/mod.rs b/core/src/services/swift/mod.rs index 451ca66231e6..244d8f915c68 100644 --- a/core/src/services/swift/mod.rs +++ b/core/src/services/swift/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-swift")] mod core; #[cfg(feature = "services-swift")] +mod delete; +#[cfg(feature = "services-swift")] mod error; #[cfg(feature = "services-swift")] mod lister; diff --git a/core/src/services/upyun/backend.rs b/core/src/services/upyun/backend.rs index 36598d53fa64..4373d357c21b 100644 --- a/core/src/services/upyun/backend.rs +++ b/core/src/services/upyun/backend.rs @@ -24,6 +24,7 @@ use http::StatusCode; use log::debug; use super::core::*; +use super::delete::UpyunDeleter; use super::error::parse_error; use super::lister::UpyunLister; use super::writer::UpyunWriter; @@ -191,9 +192,11 @@ impl Access for UpyunBackend { type Reader = HttpBody; type Writer = UpyunWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -280,17 +283,11 @@ impl Access for UpyunBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - // Allow 404 when deleting a non-existing object - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(UpyunDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/upyun/delete.rs b/core/src/services/upyun/delete.rs new file mode 100644 index 000000000000..ca7dc895086a --- /dev/null +++ b/core/src/services/upyun/delete.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct UpyunDeleter { + core: Arc, +} + +impl UpyunDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for UpyunDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + // Allow 404 when deleting a non-existing object + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/upyun/mod.rs b/core/src/services/upyun/mod.rs index ca78feea823d..694faf301ca0 100644 --- a/core/src/services/upyun/mod.rs +++ b/core/src/services/upyun/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-upyun")] mod core; #[cfg(feature = "services-upyun")] +mod delete; +#[cfg(feature = "services-upyun")] mod error; #[cfg(feature = "services-upyun")] mod lister; diff --git a/core/src/services/vercel_artifacts/backend.rs b/core/src/services/vercel_artifacts/backend.rs index 923f9d3ba9ce..638afed26b21 100644 --- a/core/src/services/vercel_artifacts/backend.rs +++ b/core/src/services/vercel_artifacts/backend.rs @@ -47,9 +47,11 @@ impl Access for VercelArtifactsBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = (); + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); diff --git a/core/src/services/vercel_blob/backend.rs b/core/src/services/vercel_blob/backend.rs index 635e7533b4d9..53ea1f5ef500 100644 --- a/core/src/services/vercel_blob/backend.rs +++ b/core/src/services/vercel_blob/backend.rs @@ -147,9 +147,11 @@ impl Access for VercelBlobBackend { type Reader = HttpBody; type Writer = VercelBlobWriters; type Lister = oio::PageLister; + type Deleter = (); type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -165,7 +167,6 @@ impl Access for VercelBlobBackend { write_can_multi: true, write_multi_min_size: Some(5 * 1024 * 1024), - delete: true, copy: true, list: true, @@ -224,10 +225,6 @@ impl Access for VercelBlobBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - self.core.delete(path).await.map(|_| RpDelete::default()) - } - async fn copy(&self, from: &str, to: &str, _args: OpCopy) -> Result { let resp = self.core.copy(from, to).await?; diff --git a/core/src/services/vercel_blob/core.rs b/core/src/services/vercel_blob/core.rs index d21d57bb4f40..319764beefe3 100644 --- a/core/src/services/vercel_blob/core.rs +++ b/core/src/services/vercel_blob/core.rs @@ -148,40 +148,6 @@ impl VercelBlobCore { Ok(req) } - pub async fn delete(&self, path: &str) -> Result<()> { - let p = build_abs_path(&self.root, path); - - let resp = self.list(&p, Some(1)).await?; - - let url = resolve_blob(resp.blobs, p); - - if url.is_empty() { - return Ok(()); - } - - let req = Request::post("https://blob.vercel-storage.com/delete"); - - let req = self.sign(req); - - let req_body = &json!({ - "urls": vec![url] - }); - - let req = req - .header(header::CONTENT_TYPE, "application/json") - .body(Buffer::from(Bytes::from(req_body.to_string()))) - .map_err(new_request_build_error)?; - - let resp = self.send(req).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(()), - _ => Err(parse_error(resp)), - } - } - pub async fn head(&self, path: &str) -> Result> { let p = build_abs_path(&self.root, path); diff --git a/core/src/services/webdav/backend.rs b/core/src/services/webdav/backend.rs index 6fd4b8567955..81f2bfb0ad2d 100644 --- a/core/src/services/webdav/backend.rs +++ b/core/src/services/webdav/backend.rs @@ -25,6 +25,7 @@ use http::StatusCode; use log::debug; use super::core::*; +use super::delete::WebdavDeleter; use super::error::parse_error; use super::lister::WebdavLister; use super::writer::WebdavWriter; @@ -205,9 +206,11 @@ impl Access for WebdavBackend { type Reader = HttpBody; type Writer = oio::OneShotWriter; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut ma = AccessorInfo::default(); @@ -276,14 +279,11 @@ impl Access for WebdavBackend { )) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.webdav_delete(path).await?; - - let status = resp.status(); - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(WebdavDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/webdav/delete.rs b/core/src/services/webdav/delete.rs new file mode 100644 index 000000000000..b6935507cd83 --- /dev/null +++ b/core/src/services/webdav/delete.rs @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct WebdavDeleter { + core: Arc, +} + +impl WebdavDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for WebdavDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.webdav_delete(&path).await?; + + let status = resp.status(); + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/webdav/mod.rs b/core/src/services/webdav/mod.rs index 721f192613ee..ae95bc94fd9e 100644 --- a/core/src/services/webdav/mod.rs +++ b/core/src/services/webdav/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-webdav")] mod core; #[cfg(feature = "services-webdav")] +mod delete; +#[cfg(feature = "services-webdav")] mod error; #[cfg(feature = "services-webdav")] mod lister; diff --git a/core/src/services/webhdfs/backend.rs b/core/src/services/webhdfs/backend.rs index 661e52c8b24f..4b6757d3bae9 100644 --- a/core/src/services/webhdfs/backend.rs +++ b/core/src/services/webhdfs/backend.rs @@ -29,6 +29,7 @@ use log::debug; use serde::Deserialize; use tokio::sync::OnceCell; +use super::delete::WebhdfsDeleter; use super::error::parse_error; use super::lister::WebhdfsLister; use super::message::BooleanResp; @@ -514,9 +515,11 @@ impl Access for WebhdfsBackend { type Reader = HttpBody; type Writer = WebhdfsWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -640,13 +643,11 @@ impl Access for WebhdfsBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.webhdfs_delete(path).await?; - - match resp.status() { - StatusCode::OK => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(WebhdfsDeleter::new(Arc::new(self.clone()))), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/webhdfs/delete.rs b/core/src/services/webhdfs/delete.rs new file mode 100644 index 000000000000..c08b36e7ad34 --- /dev/null +++ b/core/src/services/webhdfs/delete.rs @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::backend::WebhdfsBackend; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct WebhdfsDeleter { + core: Arc, +} + +impl WebhdfsDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for WebhdfsDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.webhdfs_delete(&path).await?; + + match resp.status() { + StatusCode::OK => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/webhdfs/mod.rs b/core/src/services/webhdfs/mod.rs index dc0e3faf851f..87585cce7c83 100644 --- a/core/src/services/webhdfs/mod.rs +++ b/core/src/services/webhdfs/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +#[cfg(feature = "services-webhdfs")] +mod delete; #[cfg(feature = "services-webhdfs")] mod error; #[cfg(feature = "services-webhdfs")] diff --git a/core/src/services/yandex_disk/backend.rs b/core/src/services/yandex_disk/backend.rs index 7dbd42b46913..491efd9f8104 100644 --- a/core/src/services/yandex_disk/backend.rs +++ b/core/src/services/yandex_disk/backend.rs @@ -27,6 +27,7 @@ use http::StatusCode; use log::debug; use super::core::*; +use super::delete::YandexDiskDeleter; use super::error::parse_error; use super::lister::YandexDiskLister; use super::writer::YandexDiskWriter; @@ -148,9 +149,11 @@ impl Access for YandexDiskBackend { type Reader = HttpBody; type Writer = YandexDiskWriters; type Lister = oio::PageLister; + type Deleter = oio::OneShotDeleter; type BlockingReader = (); type BlockingWriter = (); type BlockingLister = (); + type BlockingDeleter = (); fn info(&self) -> Arc { let mut am = AccessorInfo::default(); @@ -260,21 +263,11 @@ impl Access for YandexDiskBackend { Ok((RpWrite::default(), w)) } - async fn delete(&self, path: &str, _: OpDelete) -> Result { - let resp = self.core.delete(path).await?; - - let status = resp.status(); - - match status { - StatusCode::OK => Ok(RpDelete::default()), - StatusCode::NO_CONTENT => Ok(RpDelete::default()), - // Yandex Disk deleting a non-empty folder can take an unknown amount of time, - // So the API responds with the code 202 Accepted (the deletion process has started). - StatusCode::ACCEPTED => Ok(RpDelete::default()), - // Allow 404 when deleting a non-existing object - StatusCode::NOT_FOUND => Ok(RpDelete::default()), - _ => Err(parse_error(resp)), - } + async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> { + Ok(( + RpDelete::default(), + oio::OneShotDeleter::new(YandexDiskDeleter::new(self.core.clone())), + )) } async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> { diff --git a/core/src/services/yandex_disk/delete.rs b/core/src/services/yandex_disk/delete.rs new file mode 100644 index 000000000000..01a95d6a5b4c --- /dev/null +++ b/core/src/services/yandex_disk/delete.rs @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use super::core::*; +use super::error::parse_error; +use crate::raw::*; +use crate::*; +use http::StatusCode; +use std::sync::Arc; + +pub struct YandexDiskDeleter { + core: Arc, +} + +impl YandexDiskDeleter { + pub fn new(core: Arc) -> Self { + Self { core } + } +} + +impl oio::OneShotDelete for YandexDiskDeleter { + async fn delete_once(&self, path: String, _: OpDelete) -> Result<()> { + let resp = self.core.delete(&path).await?; + + let status = resp.status(); + + match status { + StatusCode::OK => Ok(()), + StatusCode::NO_CONTENT => Ok(()), + // Yandex Disk deleting a non-empty folder can take an unknown amount of time, + // So the API responds with the code 202 Accepted (the deletion process has started). + StatusCode::ACCEPTED => Ok(()), + // Allow 404 when deleting a non-existing object + StatusCode::NOT_FOUND => Ok(()), + _ => Err(parse_error(resp)), + } + } +} diff --git a/core/src/services/yandex_disk/mod.rs b/core/src/services/yandex_disk/mod.rs index bae1f563650c..f643bb7223e0 100644 --- a/core/src/services/yandex_disk/mod.rs +++ b/core/src/services/yandex_disk/mod.rs @@ -18,6 +18,8 @@ #[cfg(feature = "services-yandex-disk")] mod core; #[cfg(feature = "services-yandex-disk")] +mod delete; +#[cfg(feature = "services-yandex-disk")] mod error; #[cfg(feature = "services-yandex-disk")] mod lister; diff --git a/core/src/types/capability.rs b/core/src/types/capability.rs index 405ff66e0712..f8b87d28a393 100644 --- a/core/src/types/capability.rs +++ b/core/src/types/capability.rs @@ -153,6 +153,8 @@ pub struct Capability { pub delete: bool, /// Indicates if versioned delete operations are supported. pub delete_with_version: bool, + /// Maximum size supported for single delete operations. + pub delete_max_size: Option, /// Indicates if copy operations are supported. pub copy: bool, diff --git a/core/src/types/delete/blocking_deleter.rs b/core/src/types/delete/blocking_deleter.rs new file mode 100644 index 000000000000..53a5f26197a7 --- /dev/null +++ b/core/src/types/delete/blocking_deleter.rs @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::*; +use crate::*; + +/// BlockingDeleter is designed to continuously remove content from storage. +/// +/// It leverages batch deletion capabilities provided by storage services for efficient removal. +pub struct BlockingDeleter { + deleter: oio::BlockingDeleter, + + max_size: usize, + cur_size: usize, +} + +impl BlockingDeleter { + pub(crate) fn create(acc: Accessor) -> Result { + let max_size = acc.info().full_capability().delete_max_size.unwrap_or(1); + let (_, deleter) = acc.blocking_delete()?; + + Ok(Self { + deleter, + max_size, + cur_size: 0, + }) + } + + /// Delete a path. + pub fn delete(&mut self, input: impl IntoDeleteInput) -> Result<()> { + if self.cur_size >= self.max_size { + let deleted = self.deleter.flush()?; + self.cur_size -= deleted; + } + + let input = input.into_delete_input(); + let mut op = OpDelete::default(); + if let Some(version) = &input.version { + op = op.with_version(version); + } + + self.deleter.delete(&input.path, op)?; + self.cur_size += 1; + Ok(()) + } + + /// Delete an infallible iterator of paths. + /// + /// Also see: + /// + /// - [`BlockingDeleter::delete_try_iter`]: delete an fallible iterator of paths. + pub fn delete_iter(&mut self, iter: I) -> Result<()> + where + I: IntoIterator, + D: IntoDeleteInput, + { + for entry in iter { + self.delete(entry)?; + } + + Ok(()) + } + + /// Delete an fallible iterator of paths. + /// + /// Also see: + /// + /// - [`BlockingDeleter::delete_iter`]: delete an infallible iterator of paths. + pub fn delete_try_iter(&mut self, try_iter: I) -> Result<()> + where + I: IntoIterator>, + D: IntoDeleteInput, + { + for entry in try_iter { + self.delete(entry?)?; + } + + Ok(()) + } + + /// Flush the deleter, returns the number of deleted paths. + pub fn flush(&mut self) -> Result { + let deleted = self.deleter.flush()?; + self.cur_size -= deleted; + Ok(deleted) + } + + /// Close the deleter, this will flush the deleter and wait until all paths are deleted. + pub fn close(&mut self) -> Result<()> { + loop { + self.flush()?; + if self.cur_size == 0 { + break; + } + } + Ok(()) + } +} diff --git a/core/src/types/delete/deleter.rs b/core/src/types/delete/deleter.rs new file mode 100644 index 000000000000..74163809ee53 --- /dev/null +++ b/core/src/types/delete/deleter.rs @@ -0,0 +1,216 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::oio::DeleteDyn; +use crate::raw::*; +use crate::*; +use futures::{Stream, StreamExt}; +use std::pin::pin; + +/// Deleter is designed to continuously remove content from storage. +/// +/// It leverages batch deletion capabilities provided by storage services for efficient removal. +/// +/// # Usage +/// +/// [`Deleter`] provides several ways to delete files: +/// +/// ## Direct Deletion +/// +/// Use the `delete` method to remove a single file: +/// +/// ```rust +/// use opendal::Operator; +/// use opendal::Result; +/// +/// async fn example(op: Operator) -> Result<()> { +/// let mut d = op.deleter().await?; +/// d.delete("path/to/file").await?; +/// d.close().await?; +/// Ok(()) +/// } +/// ``` +/// +/// Delete multiple files via a stream: +/// +/// ```rust +/// use opendal::Operator; +/// use opendal::Result; +/// use futures::stream; +/// +/// async fn example(op: Operator) -> Result<()> { +/// let mut d = op.deleter().await?; +/// d.delete_stream(stream::iter(vec!["path/to/file"])).await?; +/// d.close().await?; +/// Ok(()) +/// } +/// ``` +/// +/// ## Using as a Sink +/// +/// Deleter can be used as a Sink for file deletion: +/// +/// ```rust +/// use opendal::Operator; +/// use opendal::Result; +/// use futures::{stream, Sink}; +/// use futures::SinkExt; +/// +/// async fn example(op: Operator) -> Result<()> { +/// let mut sink = op.deleter().await?.into_sink(); +/// sink.send("path/to/file").await?; +/// sink.close().await?; +/// Ok(()) +/// } +/// ``` +pub struct Deleter { + deleter: oio::Deleter, + + max_size: usize, + cur_size: usize, +} + +impl Deleter { + pub(crate) async fn create(acc: Accessor) -> Result { + let max_size = acc.info().full_capability().delete_max_size.unwrap_or(1); + let (_, deleter) = acc.delete().await?; + + Ok(Self { + deleter, + max_size, + cur_size: 0, + }) + } + + /// Delete a path. + pub async fn delete(&mut self, input: impl IntoDeleteInput) -> Result<()> { + if self.cur_size >= self.max_size { + let deleted = self.deleter.flush_dyn().await?; + self.cur_size -= deleted; + } + + let input = input.into_delete_input(); + let mut op = OpDelete::default(); + if let Some(version) = &input.version { + op = op.with_version(version); + } + + self.deleter.delete_dyn(&input.path, op)?; + self.cur_size += 1; + Ok(()) + } + + /// Delete an infallible iterator of paths. + /// + /// Also see: + /// + /// - [`Deleter::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Deleter::delete_stream`]: delete an infallible stream of paths. + /// - [`Deleter::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_iter(&mut self, iter: I) -> Result<()> + where + I: IntoIterator, + D: IntoDeleteInput, + { + for entry in iter { + self.delete(entry).await?; + } + Ok(()) + } + + /// Delete an fallible iterator of paths. + /// + /// Also see: + /// + /// - [`Deleter::delete_iter`]: delete an infallible iterator of paths. + /// - [`Deleter::delete_stream`]: delete an infallible stream of paths. + /// - [`Deleter::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_try_iter(&mut self, try_iter: I) -> Result<()> + where + I: IntoIterator>, + D: IntoDeleteInput, + { + for entry in try_iter { + self.delete(entry?).await?; + } + + Ok(()) + } + + /// Delete an infallible stream of paths. + /// + /// Also see: + /// + /// - [`Deleter::delete_iter`]: delete an infallible iterator of paths. + /// - [`Deleter::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Deleter::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_stream(&mut self, mut stream: S) -> Result<()> + where + S: Stream, + D: IntoDeleteInput, + { + let mut stream = pin!(stream); + while let Some(entry) = stream.next().await { + self.delete(entry).await?; + } + + Ok(()) + } + + /// Delete an fallible stream of paths. + /// + /// Also see: + /// + /// - [`Deleter::delete_iter`]: delete an infallible iterator of paths. + /// - [`Deleter::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Deleter::delete_stream`]: delete an infallible stream of paths. + pub async fn delete_try_stream(&mut self, mut try_stream: S) -> Result<()> + where + S: Stream>, + D: IntoDeleteInput, + { + let mut stream = pin!(try_stream); + while let Some(entry) = stream.next().await.transpose()? { + self.delete(entry).await?; + } + + Ok(()) + } + + /// Flush the deleter, returns the number of deleted paths. + pub async fn flush(&mut self) -> Result { + let deleted = self.deleter.flush_dyn().await?; + self.cur_size -= deleted; + Ok(deleted) + } + + /// Close the deleter, this will flush the deleter and wait until all paths are deleted. + pub async fn close(&mut self) -> Result<()> { + loop { + self.flush().await?; + if self.cur_size == 0 { + break; + } + } + Ok(()) + } + + /// Convert the deleter into a sink. + pub fn into_sink(self) -> FuturesDeleteSink { + FuturesDeleteSink::new(self) + } +} diff --git a/core/src/types/delete/futures_delete_sink.rs b/core/src/types/delete/futures_delete_sink.rs new file mode 100644 index 000000000000..458481b87c74 --- /dev/null +++ b/core/src/types/delete/futures_delete_sink.rs @@ -0,0 +1,172 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::*; +use crate::*; +use futures::Sink; +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{ready, Context, Poll}; + +/// FuturesDeleteSink is a sink that generated by [`Deleter`] +pub struct FuturesDeleteSink { + state: State, + _phantom: PhantomData, +} + +enum State { + Idle(Option), + Delete(BoxedStaticFuture<(Deleter, Result<()>)>), + Flush(BoxedStaticFuture<(Deleter, Result)>), + Close(BoxedStaticFuture<(Deleter, Result<()>)>), +} + +impl FuturesDeleteSink { + #[inline] + pub(super) fn new(deleter: Deleter) -> Self { + Self { + state: State::Idle(Some(deleter)), + _phantom: PhantomData, + } + } +} + +impl Sink for FuturesDeleteSink { + type Error = Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.state { + State::Idle(_) => Poll::Ready(Ok(())), + State::Delete(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + Poll::Ready(res.map(|_| ())) + } + State::Flush(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + Poll::Ready(res.map(|_| ())) + } + State::Close(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + Poll::Ready(res.map(|_| ())) + } + } + } + + fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<()> { + match &mut self.state { + State::Idle(deleter) => { + let mut deleter = deleter.take().ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "FuturesDeleteSink has been closed or errored", + ) + })?; + let input = item.into_delete_input(); + let fut = async move { + let res = deleter.delete(input).await; + (deleter, res) + }; + self.state = State::Delete(Box::pin(fut)); + Ok(()) + } + _ => Err(Error::new( + ErrorKind::Unexpected, + "FuturesDeleteSink is not ready to send, please poll_ready first", + )), + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match &mut self.state { + State::Idle(deleter) => { + let mut deleter = deleter.take().ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "FuturesDeleteSink has been closed or errored", + ) + })?; + let fut = async move { + let res = deleter.flush().await; + (deleter, res) + }; + self.state = State::Flush(Box::pin(fut)); + return Poll::Ready(Ok(())); + } + State::Delete(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + res?; + continue; + } + State::Flush(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + let _ = res?; + return Poll::Ready(Ok(())); + } + State::Close(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + res?; + continue; + } + }; + } + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match &mut self.state { + State::Idle(deleter) => { + let mut deleter = deleter.take().ok_or_else(|| { + Error::new( + ErrorKind::Unexpected, + "FuturesDeleteSink has been closed or errored", + ) + })?; + let fut = async move { + let res = deleter.close().await; + (deleter, res) + }; + self.state = State::Close(Box::pin(fut)); + return Poll::Ready(Ok(())); + } + State::Delete(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + res?; + continue; + } + State::Flush(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + res?; + continue; + } + State::Close(fut) => { + let (deleter, res) = ready!(fut.as_mut().poll(cx)); + self.state = State::Idle(Some(deleter)); + return Poll::Ready(res); + } + }; + } + } +} diff --git a/core/src/types/delete/input.rs b/core/src/types/delete/input.rs new file mode 100644 index 000000000000..ae136a6ade09 --- /dev/null +++ b/core/src/types/delete/input.rs @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::raw::OpDelete; +use crate::Entry; + +/// DeleteInput is the input for delete operations. +#[non_exhaustive] +#[derive(Default, Debug)] +pub struct DeleteInput { + /// The path of the path to delete. + pub path: String, + /// The version of the path to delete. + pub version: Option, +} + +/// IntoDeleteInput is a helper trait that makes it easier for users to play with `Deleter`. +pub trait IntoDeleteInput: Send + Sync + Unpin { + /// Convert `self` into a `DeleteInput`. + fn into_delete_input(self) -> DeleteInput; +} + +/// Implement `IntoDeleteInput` for `DeleteInput` self. +impl IntoDeleteInput for DeleteInput { + fn into_delete_input(self) -> DeleteInput { + self + } +} + +/// Implement `IntoDeleteInput` for `&str` so we can use `&str` as a DeleteInput. +impl IntoDeleteInput for &str { + fn into_delete_input(self) -> DeleteInput { + DeleteInput { + path: self.to_string(), + ..Default::default() + } + } +} + +/// Implement `IntoDeleteInput` for `String` so we can use `Vec` as a DeleteInput stream. +impl IntoDeleteInput for String { + fn into_delete_input(self) -> DeleteInput { + DeleteInput { + path: self, + ..Default::default() + } + } +} + +/// Implement `IntoDeleteInput` for `(String, OpDelete)` so we can use `(String, OpDelete)` +/// as a DeleteInput stream. +impl IntoDeleteInput for (String, OpDelete) { + fn into_delete_input(self) -> DeleteInput { + let (path, args) = self; + + let mut input = DeleteInput { + path, + ..Default::default() + }; + + if let Some(version) = args.version() { + input.version = Some(version.to_string()); + } + input + } +} + +/// Implement `IntoDeleteInput` for `Entry` so we can use `Lister` as a DeleteInput stream. +impl IntoDeleteInput for Entry { + fn into_delete_input(self) -> DeleteInput { + let (path, meta) = self.into_parts(); + + let mut input = DeleteInput { + path, + ..Default::default() + }; + + if let Some(version) = meta.version() { + input.version = Some(version.to_string()); + } + input + } +} diff --git a/core/src/types/delete/mod.rs b/core/src/types/delete/mod.rs new file mode 100644 index 000000000000..c9f7bb50f1ce --- /dev/null +++ b/core/src/types/delete/mod.rs @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +mod input; +pub use input::DeleteInput; +pub use input::IntoDeleteInput; + +mod deleter; +pub use deleter::Deleter; + +mod futures_delete_sink; +pub use futures_delete_sink::FuturesDeleteSink; + +mod blocking_deleter; +pub use blocking_deleter::BlockingDeleter; diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 448a5f430245..3f7afc58f206 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -43,6 +43,9 @@ mod list; pub use list::BlockingLister; pub use list::Lister; +mod delete; +pub use delete::*; + mod execute; pub use execute::*; diff --git a/core/src/types/operator/blocking_operator.rs b/core/src/types/operator/blocking_operator.rs index 0119e10485be..9d7620b15563 100644 --- a/core/src/types/operator/blocking_operator.rs +++ b/core/src/types/operator/blocking_operator.rs @@ -16,6 +16,7 @@ // under the License. use super::operator_functions::*; +use crate::raw::oio::BlockingDelete; use crate::raw::*; use crate::*; @@ -765,13 +766,56 @@ impl BlockingOperator { path, OpDelete::new(), |inner, path, args| { - let _ = inner.blocking_delete(&path, args)?; + let (_, mut deleter) = inner.blocking_delete()?; + deleter.delete(&path, args)?; + deleter.flush()?; Ok(()) }, )) } + /// Delete an infallible iterator of paths. + /// + /// Also see: + /// + /// - [`BlockingOperator::delete_try_iter`]: delete an fallible iterator of paths. + pub fn delete_iter(&self, iter: I) -> Result<()> + where + I: IntoIterator, + D: IntoDeleteInput, + { + let mut deleter = self.deleter()?; + deleter.delete_iter(iter)?; + deleter.close()?; + Ok(()) + } + + /// Delete a fallible iterator of paths. + /// + /// Also see: + /// + /// - [`BlockingOperator::delete_iter`]: delete an infallible iterator of paths. + pub fn delete_try_iter(&self, try_iter: I) -> Result<()> + where + I: IntoIterator>, + D: IntoDeleteInput, + { + let mut deleter = self.deleter()?; + deleter.delete_try_iter(try_iter)?; + deleter.close()?; + Ok(()) + } + + /// Create a [`BlockingDeleter`] to continuously remove content from storage. + /// + /// It leverages batch deletion capabilities provided by storage services for efficient removal. + /// + /// Users can have more control over the deletion process by using [`BlockingDeleter`] directly. + pub fn deleter(&self) -> Result { + BlockingDeleter::create(self.inner().clone()) + } + /// remove will remove files via the given paths. /// /// remove_via will remove files via the given vector iterators. @@ -792,6 +836,7 @@ impl BlockingOperator { /// # Ok(()) /// # } /// ``` + #[deprecated(note = "use `BlockingOperator::delete_iter` instead", since = "0.52.0")] pub fn remove_via(&self, input: impl Iterator) -> Result<()> { for path in input { self.delete(&path)?; @@ -814,10 +859,9 @@ impl BlockingOperator { /// # Ok(()) /// # } /// ``` + #[deprecated(note = "use `BlockingOperator::delete_iter` instead", since = "0.52.0")] pub fn remove(&self, paths: Vec) -> Result<()> { - self.remove_via(paths.into_iter())?; - - Ok(()) + self.delete_iter(paths) } /// Remove the path and all nested dirs and files recursively. @@ -853,17 +897,8 @@ impl BlockingOperator { Err(e) => return Err(e), }; - let obs = self.lister_with(path).recursive(true).call()?; - - for v in obs { - match v { - Ok(entry) => { - self.inner() - .blocking_delete(entry.path(), OpDelete::new())?; - } - Err(e) => return Err(e), - } - } + let lister = self.lister_with(path).recursive(true).call()?; + self.delete_try_iter(lister)?; Ok(()) } diff --git a/core/src/types/operator/operator.rs b/core/src/types/operator/operator.rs index 7c4240630049..502d0fa248c0 100644 --- a/core/src/types/operator/operator.rs +++ b/core/src/types/operator/operator.rs @@ -18,14 +18,15 @@ use std::future::Future; use std::time::Duration; -use futures::stream; use futures::Stream; use futures::StreamExt; use futures::TryStreamExt; use super::BlockingOperator; use crate::operator_futures::*; +use crate::raw::oio::DeleteDyn; use crate::raw::*; +use crate::types::delete::Deleter; use crate::*; /// Operator is the entry for all public async APIs. @@ -1605,13 +1606,95 @@ impl Operator { path, OpDelete::default(), |inner, path, args| async move { - let _ = inner.delete(&path, args).await?; + let (_, mut deleter) = inner.delete_dyn().await?; + deleter.delete_dyn(&path, args)?; + deleter.flush_dyn().await?; Ok(()) }, ) } + /// Delete an infallible iterator of paths. + /// + /// Also see: + /// + /// - [`Operator::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Operator::delete_stream`]: delete an infallible stream of paths. + /// - [`Operator::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_iter(&self, iter: I) -> Result<()> + where + I: IntoIterator, + D: IntoDeleteInput, + { + let mut deleter = self.deleter().await?; + deleter.delete_iter(iter).await?; + deleter.close().await?; + Ok(()) + } + + /// Delete a fallible iterator of paths. + /// + /// Also see: + /// + /// - [`Operator::delete_iter`]: delete an infallible iterator of paths. + /// - [`Operator::delete_stream`]: delete an infallible stream of paths. + /// - [`Operator::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_try_iter(&self, try_iter: I) -> Result<()> + where + I: IntoIterator>, + D: IntoDeleteInput, + { + let mut deleter = self.deleter().await?; + deleter.delete_try_iter(try_iter).await?; + deleter.close().await?; + Ok(()) + } + + /// Delete an infallible stream of paths. + /// + /// Also see: + /// + /// - [`Operator::delete_iter`]: delete an infallible iterator of paths. + /// - [`Operator::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Operator::delete_try_stream`]: delete an fallible stream of paths. + pub async fn delete_stream(&self, stream: S) -> Result<()> + where + S: Stream, + D: IntoDeleteInput, + { + let mut deleter = self.deleter().await?; + deleter.delete_stream(stream).await?; + deleter.close().await?; + Ok(()) + } + + /// Delete an fallible stream of paths. + /// + /// Also see: + /// + /// - [`Operator::delete_iter`]: delete an infallible iterator of paths. + /// - [`Operator::delete_try_iter`]: delete an fallible iterator of paths. + /// - [`Operator::delete_stream`]: delete an infallible stream of paths. + pub async fn delete_try_stream(&self, try_stream: S) -> Result<()> + where + S: Stream>, + D: IntoDeleteInput, + { + let mut deleter = self.deleter().await?; + deleter.delete_try_stream(try_stream).await?; + deleter.close().await?; + Ok(()) + } + + /// Create a [`Deleter`] to continuously remove content from storage. + /// + /// It leverages batch deletion capabilities provided by storage services for efficient removal. /// + /// Users can have more control over the deletion process by using [`Deleter`] directly. + pub async fn deleter(&self) -> Result { + Deleter::create(self.inner().clone()).await + } + /// # Notes /// /// If underlying services support delete in batch, we will use batch @@ -1630,8 +1713,12 @@ impl Operator { /// # Ok(()) /// # } /// ``` + #[deprecated(note = "use `Operator::delete_iter` instead", since = "0.52.0")] pub async fn remove(&self, paths: Vec) -> Result<()> { - self.remove_via(stream::iter(paths)).await + let mut deleter = self.deleter().await?; + deleter.delete_iter(paths).await?; + deleter.close().await?; + Ok(()) } /// remove will remove files via the given paths. @@ -1659,35 +1746,13 @@ impl Operator { /// # Ok(()) /// # } /// ``` + #[deprecated(note = "use `Operator::delete_stream` instead", since = "0.52.0")] pub async fn remove_via(&self, input: impl Stream + Unpin) -> Result<()> { - let input = input.map(|v| normalize_path(&v)); - - if self.info().full_capability().batch { - let mut input = input - .map(|v| (v, OpDelete::default().into())) - .chunks(self.limit()); - - while let Some(batches) = input.next().await { - let results = self - .inner() - .batch(OpBatch::new(batches)) - .await? - .into_results(); - - // TODO: return error here directly seems not a good idea? - for (_, result) in results { - let _ = result?; - } - } - } else { - input - .map(Ok) - .try_for_each_concurrent(self.limit, |path| async move { - let _ = self.inner().delete(&path, OpDelete::default()).await?; - Ok::<(), Error>(()) - }) - .await?; - } + let mut deleter = self.deleter().await?; + deleter + .delete_stream(input.map(|v| normalize_path(&v))) + .await?; + deleter.close().await?; Ok(()) } @@ -1730,34 +1795,8 @@ impl Operator { Err(e) => return Err(e), }; - let obs = self.lister_with(path).recursive(true).await?; - - if self.info().full_capability().batch { - let mut obs = obs.try_chunks(self.limit()); - - while let Some(batches) = obs.next().await { - let batches = batches - .map_err(|err| err.1)? - .into_iter() - .map(|v| (v.path().to_string(), OpDelete::default().into())) - .collect(); - - let results = self - .inner() - .batch(OpBatch::new(batches)) - .await? - .into_results(); - - // TODO: return error here directly seems not a good idea? - for (_, result) in results { - let _ = result?; - } - } - } else { - obs.try_for_each(|v| async move { self.delete(v.path()).await }) - .await?; - } - + let lister = self.lister_with(path).recursive(true).await?; + self.delete_try_stream(lister).await?; Ok(()) } diff --git a/core/src/types/operator/operator_futures.rs b/core/src/types/operator/operator_futures.rs index 4ce296f748ff..de8f277ff284 100644 --- a/core/src/types/operator/operator_futures.rs +++ b/core/src/types/operator/operator_futures.rs @@ -439,6 +439,11 @@ impl>> FutureDelete { } } +/// Future that generated by [`Operator::deleter_with`]. +/// +/// Users can add more options by public functions provided by this struct. +pub type FutureDeleter = OperatorFuture; + /// Future that generated by [`Operator::list_with`] or [`Operator::lister_with`]. /// /// Users can add more options by public functions provided by this struct. diff --git a/core/src/types/read/reader.rs b/core/src/types/read/reader.rs index 3ec284e5bb89..ba7b6ea601f5 100644 --- a/core/src/types/read/reader.rs +++ b/core/src/types/read/reader.rs @@ -31,8 +31,7 @@ use crate::*; /// /// # Usage /// -/// [`Reader`] provides multiple ways to read data from given reader. Please note that it's -/// undefined behavior to use `Reader` in different ways. +/// [`Reader`] provides multiple ways to read data from given reader. /// /// `Reader` implements `Clone` so you can clone it and store in place where ever you want. /// diff --git a/core/tests/behavior/async_delete.rs b/core/tests/behavior/async_delete.rs index 9f055b67a481..199d8965b253 100644 --- a/core/tests/behavior/async_delete.rs +++ b/core/tests/behavior/async_delete.rs @@ -16,7 +16,6 @@ // under the License. use anyhow::Result; -use futures::StreamExt; use futures::TryStreamExt; use log::warn; @@ -118,7 +117,7 @@ pub async fn test_remove_one_file(op: Operator) -> Result<()> { .await .expect("write must succeed"); - op.remove(vec![path.clone()]).await?; + op.delete_iter(vec![path.clone()]).await?; // Stat it again to check. assert!(!op.exists(&path).await?); @@ -127,7 +126,7 @@ pub async fn test_remove_one_file(op: Operator) -> Result<()> { .await .expect("write must succeed"); - op.remove(vec![path.clone()]).await?; + op.delete_iter(vec![path.clone()]).await?; // Stat it again to check. assert!(!op.exists(&path).await?); @@ -156,9 +155,11 @@ pub async fn test_delete_stream(op: Operator) -> Result<()> { op.write(&format!("{dir}/{path}"), "delete_stream").await?; } - op.with_limit(30) - .remove_via(futures::stream::iter(expected.clone()).map(|v| format!("{dir}/{v}"))) + let mut deleter = op.deleter().await?; + deleter + .delete_iter(expected.iter().map(|v| format!("{dir}/{v}"))) .await?; + deleter.close().await?; // Stat it again to check. for path in expected.iter() { diff --git a/core/tests/behavior/blocking_delete.rs b/core/tests/behavior/blocking_delete.rs index 79e0ef24db10..7b66e520dda4 100644 --- a/core/tests/behavior/blocking_delete.rs +++ b/core/tests/behavior/blocking_delete.rs @@ -64,7 +64,7 @@ pub fn test_blocking_remove_one_file(op: BlockingOperator) -> Result<()> { op.write(&path, content).expect("write must succeed"); - op.remove(vec![path.clone()])?; + op.delete_iter(vec![path.clone()])?; // Stat it again to check. assert!(!op.exists(&path)?);