From 73bdb7f66c773017a8845f83afdb7d9e2877e068 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 26 Jun 2024 15:47:25 +0200 Subject: [PATCH 01/29] Fix and Improve liveliness doc (#1195) --- zenoh/src/api/liveliness.rs | 59 ++++++++++++++++++++++++++++--------- zenoh/src/lib.rs | 52 ++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 14 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 11cfc78918..04b69183a3 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -40,14 +40,12 @@ use super::{ /// A [`LivelinessToken`](LivelinessToken) is a token which liveliness is tied /// to the Zenoh [`Session`](Session) and can be monitored by remote applications. /// -/// A [`LivelinessToken`](LivelinessToken) with key `key/expression` can be -/// queried or subscribed to on key `@/liveliness/key/expression`. -/// /// The `Liveliness` structure can be obtained with the /// [`Session::liveliness()`](Session::liveliness) function /// of the [`Session`] struct. /// /// # Examples +/// ### Declaring a token /// ``` /// # #[tokio::main] /// # async fn main() { @@ -61,6 +59,39 @@ use super::{ /// .unwrap(); /// # } /// ``` +/// +/// ### Querying tokens +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } +/// # } +/// ``` +/// +/// ### Subscribing to liveliness changes +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, sample::SampleKind}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } +/// # } +/// ``` #[zenoh_macros::unstable] pub struct Liveliness<'a> { pub(crate) session: SessionRef<'a>, @@ -250,9 +281,6 @@ pub(crate) struct LivelinessTokenState { /// A token whose liveliness is tied to the Zenoh [`Session`](Session) /// and can be monitored by remote applications. /// -/// A `LivelinessToken` with key `key/expression` can be queried or subscribed -/// to on key `@/liveliness/key/expression`. -/// /// A declared liveliness token will be seen as alive by any other Zenoh /// application in the system that monitors it while the liveliness token /// is not undeclared or dropped, while the Zenoh application that declared @@ -388,7 +416,7 @@ impl Drop for LivelinessToken<'_> { } } -/// A builder for initializing a [`FlumeSubscriber`](FlumeSubscriber). +/// A builder for initializing a liveliness [`FlumeSubscriber`](FlumeSubscriber). /// /// # Examples /// ``` @@ -398,8 +426,8 @@ impl Drop for LivelinessToken<'_> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session +/// .liveliness() /// .declare_subscriber("key/expression") -/// .best_effort() /// .await /// .unwrap(); /// # } @@ -415,7 +443,7 @@ pub struct LivelinessSubscriberBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { - /// Receive the samples for this subscription with a callback. + /// Receive the samples for this liveliness subscription with a callback. /// /// # Examples /// ``` @@ -425,6 +453,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .await @@ -452,10 +481,10 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the samples for this subscription with a mutable callback. + /// Receive the samples for this liveliness subscription with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](SubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// If your callback is also accepted by the [`callback`](LivelinessSubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` /// /// # Examples /// ``` @@ -466,6 +495,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) /// .await @@ -484,7 +514,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this liveliness subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -494,6 +524,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) /// .await @@ -642,7 +673,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the replies for this query with a mutable callback. + /// Receive the replies for this liveliness query with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. /// If your callback is also accepted by the [`callback`](LivelinessGetBuilder::callback) method, we suggest you use it instead of `callback_mut` @@ -674,7 +705,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this liveliness query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c75e31aa3a..7299453f54 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -326,6 +326,58 @@ pub mod scouting { } /// Liveliness primitives +/// +/// A [`LivelinessToken`](liveliness::LivelinessToken) is a token which liveliness is tied +/// to the Zenoh [`Session`](Session) and can be monitored by remote applications. +/// +/// # Examples +/// ### Declaring a token +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let liveliness = session +/// .liveliness() +/// .declare_token("key/expression") +/// .await +/// .unwrap(); +/// # } +/// ``` +/// +/// ### Querying tokens +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } +/// # } +/// ``` +/// +/// ### Subscribing to liveliness changes +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, sample::SampleKind}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } +/// # } +/// ``` #[zenoh_macros::unstable] pub mod liveliness { pub use crate::api::liveliness::{ From ebc684cd54d8ba31fe2b938fda673cf507cff320 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 26 Jun 2024 21:10:01 +0200 Subject: [PATCH 02/29] Allow to enable/disable batching from config (#1196) --- DEFAULT_CONFIG.json5 | 2 ++ commons/zenoh-config/src/defaults.rs | 1 + commons/zenoh-config/src/lib.rs | 4 +++- io/zenoh-transport/src/common/pipeline.rs | 23 ++++++++++++++----- io/zenoh-transport/src/manager.rs | 10 ++++++++ io/zenoh-transport/src/multicast/link.rs | 1 + .../src/unicast/universal/link.rs | 1 + 7 files changed, 35 insertions(+), 7 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0e180a0e07..6906d15cf5 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -284,6 +284,8 @@ /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). /// The default batch size value is the maximum batch size: 65535. batch_size: 65535, + /// Perform batching of messages if they are smaller of the batch_size + batching: true, /// Each zenoh link has a transmission queue that can be configured queue: { /// The size of each priority queue indicates the number of batches a given queue can contain. diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 9d593fabb1..a6be460bcb 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -163,6 +163,7 @@ impl Default for LinkTxConf { batch_size: BatchSize::MAX, queue: QueueConf::default(), threads: num, + batching: true, } } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7530e91a6..51dce4ffb4 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -384,8 +384,10 @@ validated_struct::validator! { lease: u64, /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, - /// Zenoh's MTU equivalent (default: 2^16-1) + /// Zenoh's MTU equivalent (default: 2^16-1) (max: 2^16-1) batch_size: BatchSize, + /// Perform batching of messages if they are smaller of the batch_size + batching: bool, pub queue: QueueConf { /// The size of each priority queue indicates the number of batches a given queue can contain. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index e497199010..68a4b87d24 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -124,6 +124,7 @@ struct StageIn { s_out: StageInOut, mutex: StageInMutex, fragbuf: ZBuf, + batching: bool, } impl StageIn { @@ -179,7 +180,7 @@ impl StageIn { macro_rules! zretok { ($batch:expr, $msg:expr) => {{ - if $msg.is_express() { + if !self.batching || $msg.is_express() { // Move out existing batch self.s_out.move_batch($batch); return true; @@ -315,11 +316,17 @@ impl StageIn { macro_rules! zretok { ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + if !self.batching { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } @@ -494,6 +501,7 @@ pub(crate) struct TransmissionPipelineConf { pub(crate) batch: BatchConfig, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) wait_before_drop: Duration, + pub(crate) batching: bool, pub(crate) backoff: Duration, } @@ -554,6 +562,7 @@ impl TransmissionPipeline { priority: priority[prio].clone(), }, fragbuf: ZBuf::empty(), + batching: config.batching, })); // The stage out for this priority @@ -765,6 +774,7 @@ mod tests { is_compression: true, }, queue_size: [1; Priority::NUM], + batching: true, wait_before_drop: Duration::from_millis(1), backoff: Duration::from_micros(1), }; @@ -777,6 +787,7 @@ mod tests { is_compression: false, }, queue_size: [1; Priority::NUM], + batching: true, wait_before_drop: Duration::from_millis(1), backoff: Duration::from_micros(1), }; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 0d8c29ea9d..9df02dfc67 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -100,6 +100,7 @@ pub struct TransportManagerConfig { pub whatami: WhatAmI, pub resolution: Resolution, pub batch_size: BatchSize, + pub batching: bool, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -129,6 +130,7 @@ pub struct TransportManagerBuilder { whatami: WhatAmI, resolution: Resolution, batch_size: BatchSize, + batching: bool, wait_before_drop: Duration, queue_size: QueueSizeConf, queue_backoff: Duration, @@ -170,6 +172,11 @@ impl TransportManagerBuilder { self } + pub fn batching(mut self, batching: bool) -> Self { + self.batching = batching; + self + } + pub fn wait_before_drop(mut self, wait_before_drop: Duration) -> Self { self.wait_before_drop = wait_before_drop; self @@ -231,6 +238,7 @@ impl TransportManagerBuilder { resolution.set(Field::FrameSN, *link.tx().sequence_number_resolution()); self = self.resolution(resolution); self = self.batch_size(*link.tx().batch_size()); + self = self.batching(*link.tx().batching()); self = self.defrag_buff_size(*link.rx().max_message_size()); self = self.link_rx_buffer_size(*link.rx().buffer_size()); self = self.wait_before_drop(Duration::from_micros( @@ -293,6 +301,7 @@ impl TransportManagerBuilder { whatami: self.whatami, resolution: self.resolution, batch_size: self.batch_size, + batching: self.batching, wait_before_drop: self.wait_before_drop, queue_size, queue_backoff: self.queue_backoff, @@ -339,6 +348,7 @@ impl Default for TransportManagerBuilder { whatami: zenoh_config::defaults::mode, resolution: Resolution::default(), batch_size: BatchSize::MAX, + batching: true, wait_before_drop: Duration::from_micros(wait_before_drop), queue_size: queue.size, queue_backoff: Duration::from_nanos(backoff), diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 794d36d9e7..9c2bdbe1f1 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -323,6 +323,7 @@ impl TransportLinkMulticastUniversal { batch: self.link.config.batch, queue_size: self.transport.manager.config.queue_size, wait_before_drop: self.transport.manager.config.wait_before_drop, + batching: self.transport.manager.config.batching, backoff: self.transport.manager.config.queue_backoff, }; // The pipeline diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 07de4fb744..9655d0964d 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -62,6 +62,7 @@ impl TransportLinkUnicastUniversal { }, queue_size: transport.manager.config.queue_size, wait_before_drop: transport.manager.config.wait_before_drop, + batching: transport.manager.config.batching, backoff: transport.manager.config.queue_backoff, }; From 1790d59d1a9aa4995b3997f6fa96e90b24d28a25 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 26 Jun 2024 21:20:21 +0200 Subject: [PATCH 03/29] Fix doc warnings (#1197) * Fix and Improve liveliness doc * Fix doc warnings * Add doc warnings check in CI --- .github/workflows/ci.yml | 5 ++++ .../zenoh-keyexpr/src/key_expr/format/mod.rs | 10 +++---- commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- commons/zenoh-keyexpr/src/lib.rs | 8 ++--- commons/zenoh-protocol/src/core/mod.rs | 2 +- commons/zenoh-protocol/src/core/parameters.rs | 6 ++-- commons/zenoh-protocol/src/network/declare.rs | 8 +++-- commons/zenoh-protocol/src/network/mod.rs | 2 ++ commons/zenoh-protocol/src/network/request.rs | 3 +- commons/zenoh-protocol/src/scouting/hello.rs | 10 +++---- commons/zenoh-protocol/src/scouting/scout.rs | 2 +- .../zenoh-protocol/src/transport/fragment.rs | 2 +- commons/zenoh-protocol/src/transport/frame.rs | 6 ++-- commons/zenoh-protocol/src/transport/mod.rs | 2 ++ commons/zenoh-protocol/src/zenoh/mod.rs | 2 ++ commons/zenoh-util/src/log.rs | 4 +-- io/zenoh-transport/src/common/batch.rs | 30 +++++++++---------- .../unicast/establishment/ext/auth/pubkey.rs | 8 +++++ .../unicast/establishment/ext/auth/usrpwd.rs | 8 +++++ .../src/unicast/establishment/ext/shm.rs | 8 +++++ plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-trait/src/lib.rs | 6 ++-- plugins/zenoh-plugin-trait/src/manager.rs | 2 +- plugins/zenoh-plugin-trait/src/plugin.rs | 4 +-- zenoh/src/api/info.rs | 2 +- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/query.rs | 4 +-- zenoh/src/api/queryable.rs | 8 ++--- zenoh/src/api/scouting.rs | 8 ++--- zenoh/src/api/selector.rs | 2 +- zenoh/src/api/session.rs | 16 +++++----- zenoh/src/api/subscriber.rs | 6 ++-- zenoh/src/lib.rs | 24 +++++++-------- zenohd/src/main.rs | 2 +- 34 files changed, 128 insertions(+), 88 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a62257446e..123085319e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -86,6 +86,11 @@ jobs: - name: Run doctests run: cargo test --doc + - name: Build doc + run: cargo doc --no-deps --features unstable + env: + RUSTDOCFLAGS: -Dwarnings + - name: Check licenses run: cargo deny check licenses diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index bf5536ec63..d4eccd6d41 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -17,8 +17,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.htmll) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. //! //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -67,8 +67,8 @@ use support::{IterativeConstructor, Spec}; /// The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, /// both in constructing and in parsing KEs that fit the formats you've defined. /// -/// [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -/// as the [`zenoh::keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`zenoh::kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +/// [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. /// /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -120,7 +120,7 @@ impl<'s> KeFormat<'s, Vec>> { /// /// `N` is simply the number of specifications in `value`. If this number of specs isn't known at compile-time, use [`KeFormat::new`] instead. /// - /// If you know `value` at compile time, using [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) instead is advised, + /// If you know `value` at compile time, using [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) instead is advised, /// as it will provide more features and construct higher performance formats than this constructor. pub fn noalloc_new(value: &'s str) -> ZResult; N]>> { value.try_into() diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index 5d7991289e..5bd0f7dae3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -47,7 +47,7 @@ //! # Iterators //! KeTrees provide iterators for the following operations: //! - Iterating on all nodes ([`IKeyExprTree::tree_iter`]/[`IKeyExprTreeMut::tree_iter_mut`]) -//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTreeExt::key_value_pairs`]) +//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTree::key_value_pairs`]) //! - Iterating on nodes whose KE intersects with a queried KE ([`IKeyExprTree::intersecting_nodes`], [`IKeyExprTreeMut::intersecting_nodes_mut`]) //! - Iterating on nodes whose KE are included by a queried KE ([`IKeyExprTree::included_nodes`], [`IKeyExprTreeMut::included_nodes_mut`]) //! - Iterating on nodes whose KE includes a queried KE ([`IKeyExprTree::nodes_including`], [`IKeyExprTreeMut::nodes_including_mut`]) diff --git a/commons/zenoh-keyexpr/src/lib.rs b/commons/zenoh-keyexpr/src/lib.rs index f80a9c177c..5142076b6d 100644 --- a/commons/zenoh-keyexpr/src/lib.rs +++ b/commons/zenoh-keyexpr/src/lib.rs @@ -22,8 +22,8 @@ //! # Storing Key Expressions //! This module provides 2 flavours to store strings that have been validated to respect the KE syntax, and a third is provided by [`zenoh`](https://docs.rs/zenoh): //! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`Arc`], -//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`], but also stores some additional context internal to Zenoh to optimize +//! - [`OwnedKeyExpr`] works like an [`Arc`](std::sync::Arc), +//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`](std::borrow::Cow), but also stores some additional context internal to Zenoh to optimize //! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, @@ -40,8 +40,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index e9bc700318..ebf1bb7f85 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -273,7 +273,7 @@ impl<'de> serde::Deserialize<'de> for ZenohIdProto { } } -/// The unique id of a zenoh entity inside it's parent [`Session`]. +/// The unique id of a zenoh entity inside it's parent `Session`. pub type EntityId = u32; /// The global unique id of a zenoh entity. diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index e44f2f6284..38cb368b5b 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -50,7 +50,7 @@ pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { .map(|p| split_once(p, FIELD_SEPARATOR)) } -/// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. +/// Same as [`from_iter_into`] but keys are sorted in alphabetical order. pub fn sort<'s, I>(iter: I) -> impl Iterator where I: Iterator, @@ -84,7 +84,7 @@ where into } -/// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. +/// Same as [`from_iter`] but it writes into a user-provided string instead of allocating a new one. pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, @@ -131,7 +131,7 @@ pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str (from_iter(iter), item) } -/// Same as [`Self::insert`] but keys are sorted in alphabetical order. +/// Same as [`insert`] but keys are sorted in alphabetical order. pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { let (iter, item) = _insert(iter(s), k, v); (from_iter(sort(iter)), item) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index d8c66559ce..8f31e0ff2a 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -32,6 +32,7 @@ pub mod flag { pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } +/// ```text /// Flags: /// - I: Interest If I==1 then interest_id is present /// - X: Reserved @@ -47,7 +48,7 @@ pub mod flag { /// +---------------+ /// ~ declaration ~ /// +---------------+ -/// +/// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { pub interest_id: Option, @@ -178,6 +179,7 @@ pub mod common { pub mod ext { use super::*; + /// ```text /// Flags: /// - N: Named If N==1 then the key expr has name/suffix /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver @@ -190,7 +192,7 @@ pub mod common { /// +---------------+ /// ~ key_suffix ~ if N==1 -- /// +---------------+ - /// + /// ``` pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -513,6 +515,7 @@ pub mod queryable { pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete } /// + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | @@ -521,6 +524,7 @@ pub mod queryable { /// +---------------+ /// ~ distance ~ /// +---------------+ + /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QueryableInfoType { pub complete: bool, // Default false: incomplete diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index b9f3076581..407df6dd52 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -418,6 +418,7 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -426,6 +427,7 @@ pub mod ext { /// +---------------+ /// % eid % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct EntityGlobalIdType { pub zid: ZenohIdProto, diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index 09e8e6b2b6..ceeec85043 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -82,12 +82,13 @@ pub mod ext { pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; pub type Target = zextz64!(0x4, true); + /// ```text /// - Target (0x03) /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// % target % /// +---------------+ - /// + /// ``` /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum TargetType { diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 61c7db4ce6..69109ed611 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -17,8 +17,8 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// # Hello message /// -/// The [`Hello`] message is used to advertise the locators a zenoh node is reachable at. -/// The [`Hello`] message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] +/// The `Hello` message is used to advertise the locators a zenoh node is reachable at. +/// The `Hello` message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] /// message as shown below: /// /// ```text @@ -34,7 +34,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// | | | /// ``` /// -/// Moreover, a [`Hello`] message MAY be sent in the network in a multicast +/// Moreover, a `Hello` message MAY be sent in the network in a multicast /// fashion to advertise the presence of zenoh node. The advertisement operation MAY be performed /// periodically as shown below: /// @@ -54,7 +54,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// | | | /// ``` /// -/// Examples of locators included in the [`Hello`] message are: +/// Examples of locators included in the `Hello` message are: /// /// ```text /// udp/192.168.1.1:7447 @@ -63,7 +63,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// tcp/localhost:7447 /// ``` /// -/// The [`Hello`] message structure is defined as follows: +/// The `Hello` message structure is defined as follows: /// /// ```text /// Header flags: diff --git a/commons/zenoh-protocol/src/scouting/scout.rs b/commons/zenoh-protocol/src/scouting/scout.rs index 6d2b49f335..a65c10a4f5 100644 --- a/commons/zenoh-protocol/src/scouting/scout.rs +++ b/commons/zenoh-protocol/src/scouting/scout.rs @@ -18,7 +18,7 @@ use crate::core::{whatami::WhatAmIMatcher, ZenohIdProto}; /// The [`Scout`] message MAY be sent at any point in time to discover the available zenoh nodes in the /// network. The [`Scout`] message SHOULD be sent in a multicast or broadcast fashion. Upon receiving a /// [`Scout`] message, a zenoh node MUST first verify whether the matching criteria are satisfied, then -/// it SHOULD reply with a [`super::Hello`] message in a unicast fashion including all the requested +/// it SHOULD reply with a [`super::HelloProto`] message in a unicast fashion including all the requested /// information. /// /// The scouting message flow is the following: diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index d60df23227..eccc7b80c0 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -18,7 +18,7 @@ pub use crate::transport::TransportSn; /// # Fragment message /// -/// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] +/// The [`Fragment`] message is used to transmit on the wire large [`crate::network::NetworkMessage`] /// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 480bebe08e..b3ef1d819f 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -18,11 +18,11 @@ use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; /// # Frame message /// /// The [`Frame`] message is used to transmit one ore more complete serialized -/// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the -/// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller +/// [`crate::network::NetworkMessage`]. I.e., the total length of the +/// serialized [`crate::network::NetworkMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. /// The [`Frame`] message is used as means to aggregate multiple -/// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that +/// [`crate::network::NetworkMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. /// diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index e92860f441..ba2ac32c4a 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -255,11 +255,13 @@ impl fmt::Display for TransportMessage { pub mod ext { use crate::{common::ZExtZ64, core::Priority}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// %0| rsv |prio % /// +---------------+ /// - prio: Priority class + /// ``` #[repr(transparent)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QoSType { diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index eeb1a63c1d..320db6884d 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -138,6 +138,7 @@ pub mod ext { use crate::core::{Encoding, EntityGlobalIdProto}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -148,6 +149,7 @@ pub mod ext { /// +---------------+ /// % sn % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { pub id: EntityGlobalIdProto, diff --git a/commons/zenoh-util/src/log.rs b/commons/zenoh-util/src/log.rs index 67f1a45df7..023c77121f 100644 --- a/commons/zenoh-util/src/log.rs +++ b/commons/zenoh-util/src/log.rs @@ -27,7 +27,7 @@ use tracing_subscriber::{ /// Calling this function initializes a `lazy_static` in the `tracing` crate /// such static is not deallocated prior to process existing, thus tools such as `valgrind` /// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 +/// Refer to this issue: pub fn try_init_log_from_env() { if let Ok(env_filter) = EnvFilter::try_from_default_env() { init_env_filter(env_filter); @@ -41,7 +41,7 @@ pub fn try_init_log_from_env() { /// Calling this function initializes a `lazy_static` in the `tracing` crate /// such static is not deallocated prior to process existing, thus tools such as `valgrind` /// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 +/// Refer to this issue: pub fn init_log_from_env_or(fallback: S) where S: AsRef, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 9a58aafd5d..1b065191c0 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -149,7 +149,7 @@ impl BatchHeader { self.0 } - /// Verify that the [`WBatch`][WBatch] is for a stream-based protocol, i.e., the first + /// Verify that the [`WBatch`] is for a stream-based protocol, i.e., the first /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. #[cfg(feature = "transport_compression")] #[inline(always)] @@ -181,22 +181,22 @@ pub enum Finalize { /// Write Batch /// -/// A [`WBatch`][WBatch] is a non-expandable and contiguous region of memory -/// that is used to serialize [`TransportMessage`][TransportMessage] and [`ZenohMessage`][ZenohMessage]. +/// A [`WBatch`] is a non-expandable and contiguous region of memory +/// that is used to serialize [`TransportMessage`] and [`NetworkMessage`]. /// -/// [`TransportMessage`][TransportMessage] are always serialized on the batch as they are, while -/// [`ZenohMessage`][ZenohMessage] are always serializaed on the batch as part of a [`TransportMessage`] +/// [`TransportMessage`] are always serialized on the batch as they are, while +/// [`NetworkMessage`] are always serializaed on the batch as part of a [`TransportMessage`] /// [TransportMessage] Frame. Reliable and Best Effort Frames can be interleaved on the same -/// [`WBatch`][WBatch] as long as they fit in the remaining buffer capacity. +/// [`WBatch`] as long as they fit in the remaining buffer capacity. /// -/// In the serialized form, the [`WBatch`][WBatch] always contains one or more -/// [`TransportMessage`][TransportMessage]. In the particular case of [`TransportMessage`][TransportMessage] Frame, -/// its payload is either (i) one or more complete [`ZenohMessage`][ZenohMessage] or (ii) a fragment of a -/// a [`ZenohMessage`][ZenohMessage]. +/// In the serialized form, the [`WBatch`] always contains one or more +/// [`TransportMessage`]. In the particular case of [`TransportMessage`] Frame, +/// its payload is either (i) one or more complete [`NetworkMessage`] or (ii) a fragment of a +/// a [`NetworkMessage`]. /// -/// As an example, the content of the [`WBatch`][WBatch] in memory could be: +/// As an example, the content of the [`WBatch`] in memory could be: /// -/// | Keep Alive | Frame Reliable | Frame Best Effort | +/// | Keep Alive | Frame Reliable\ | Frame Best Effort\ | /// #[derive(Clone, Debug)] pub struct WBatch { @@ -227,20 +227,20 @@ impl WBatch { batch } - /// Verify that the [`WBatch`][WBatch] has no serialized bytes. + /// Verify that the [`WBatch`] has no serialized bytes. #[inline(always)] pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. + /// Get the total number of bytes that have been serialized on the [`WBatch`]. #[inline(always)] pub fn len(&self) -> BatchSize { let (_l, _h, p) = Self::split(self.buffer.as_slice(), &self.config); p.len() as BatchSize } - /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. + /// Clear the [`WBatch`] memory buffer and related internal state. #[inline(always)] pub fn clear(&mut self) { self.buffer.clear(); diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 69b4707bf0..5638a9ee33 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -213,12 +213,14 @@ where /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitSyn { pub(crate) alice_pubkey: ZPublicKey, } @@ -250,6 +252,7 @@ where /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ @@ -258,6 +261,7 @@ where /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitAck { pub(crate) bob_pubkey: ZPublicKey, pub(crate) nonce_encrypted_with_alice_pubkey: Vec, @@ -295,12 +299,14 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ciphered nonce~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct OpenSyn { pub(crate) nonce_encrypted_with_bob_pubkey: Vec, } @@ -334,11 +340,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` pub(crate) struct AuthPubKeyFsm<'a> { inner: &'a RwLock, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 22d7a86817..46d3f349b4 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -217,25 +217,30 @@ impl<'a> AuthUsrPwdFsm<'a> { /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ nonce ~ /// +---------------+ /// /// ZExtZ64 +/// ``` /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ user ~ @@ -244,6 +249,7 @@ impl<'a> AuthUsrPwdFsm<'a> { /// +---------------+ /// /// ZExtZBuf +/// ``` struct OpenSyn { user: Vec, hmac: Vec, @@ -278,11 +284,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` #[async_trait] impl<'a> OpenFsm for &'a AuthUsrPwdFsm<'a> { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index bc96d2e34a..1a6f272d42 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -109,10 +109,12 @@ impl AuthUnicast { /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ Segment id ~ /// +---------------+ +/// ``` pub(crate) struct InitSyn { pub(crate) alice_segment: AuthSegmentID, } @@ -145,12 +147,14 @@ where /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ /// ~ Segment id ~ /// +---------------+ +/// ``` struct InitAck { alice_challenge: u64, bob_segment: AuthSegmentID, @@ -188,18 +192,22 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ +/// ``` /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ack ~ /// +---------------+ +/// ``` // Extension Fsm pub(crate) struct ShmFsm<'a> { diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d59d764004..851e9cfbb0 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -225,7 +225,7 @@ pub trait Storage: Send + Sync { /// on the administration space for this storage. fn get_admin_status(&self) -> serde_json::Value; - /// Function called for each incoming data ([`Sample`]) to be stored in this storage. + /// Function called for each incoming data ([`Sample`](zenoh::sample::Sample)) to be stored in this storage. /// A key can be `None` if it matches the `strip_prefix` exactly. /// In order to avoid data loss, the storage must store the `value` and `timestamp` associated with the `None` key /// in a manner suitable for the given backend technology diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index b9dbb455ab..36c5097795 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`]. Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! -//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. +//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`] by [`PluginsManager::declare_static_plugin`](crate::manager::PluginsManager::declare_static_plugin) method. //! -//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`] macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index 5c9c9e8bd2..90651532ec 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -100,7 +100,7 @@ impl DeclaredPlugin { default_lib_prefix: String, loader: Option, diff --git a/plugins/zenoh-plugin-trait/src/plugin.rs b/plugins/zenoh-plugin-trait/src/plugin.rs index 373da64634..b0651d9842 100644 --- a/plugins/zenoh-plugin-trait/src/plugin.rs +++ b/plugins/zenoh-plugin-trait/src/plugin.rs @@ -175,9 +175,9 @@ pub trait Plugin: Sized + 'static { type Instance: PluginInstance; /// Plugins' default name when statically linked. const DEFAULT_NAME: &'static str; - /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!] macro to generate this string. + /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_VERSION: &'static str; - /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_long_version!] macro to generate this string. + /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_LONG_VERSION: &'static str; /// Starts your plugin. Use `Ok` to return your plugin's control structure fn start(name: &str, args: &Self::StartArgs) -> ZResult; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 4a53a60851..32bed0eb53 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -159,7 +159,7 @@ impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { } } -/// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows +/// Struct returned by [`Session::info()`](crate::session::SessionDeclarations::info) which allows /// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 04b69183a3..038a4b8eab 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -705,7 +705,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this liveliness query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this liveliness query with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 408be5514b..e9598a0064 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -308,7 +308,7 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { /// Receive the replies for this query with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](GetBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// If your callback is also accepted by the [`callback`](crate::session::SessionGetBuilder::callback) method, we suggest you use it instead of `callback_mut` /// /// # Examples /// ``` @@ -336,7 +336,7 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this query with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 7c610bf2b4..566a903bd1 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -713,7 +713,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the queries for this Queryable with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -771,10 +771,10 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { } } -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoHandler). +/// A queryable that provides data through a [`Handler`](crate::handlers::IntoHandler). /// -/// Queryables can be created from a zenoh [`Session`] -/// with the [`declare_queryable`](crate::Session::declare_queryable) function +/// Queryables can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_queryable`](crate::session::SessionDeclarations::declare_queryable) function /// and the [`with`](QueryableBuilder::with) function /// of the resulting builder. /// diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 59b3d0dfcb..4f08530533 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -116,7 +116,7 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -238,7 +238,7 @@ impl fmt::Debug for ScoutInner { } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoHandler). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -348,12 +348,12 @@ fn _scout( /// /// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. /// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. +/// Drop the returned [`Scout`] to stop the scouting task. /// /// # Arguments /// /// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting +/// * `config` - The configuration [`crate::Config`] to use for scouting /// /// # Examples /// ```no_run diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index e328761cb5..813ae0528d 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -24,7 +24,7 @@ use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; use super::{key_expr::KeyExpr, queryable::Query}; -/// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the +/// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters /// with a few intendend uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0d50fb9a38..893f4725d5 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -514,8 +514,8 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also allows to create [`Subscriber`](Subscriber) and - /// [`Queryable`](Queryable) with static lifetime that can be moved to several + /// and tasks. It also allows to create [`Subscriber`](crate::subscriber::Subscriber) and + /// [`Queryable`](crate::queryable::Queryable) with static lifetime that can be moved to several /// threads and tasks /// /// Note: the given zenoh `Session` will be closed when the last reference to @@ -547,7 +547,7 @@ impl Session { /// the program's life. Dropping the returned reference will cause a memory /// leak. /// - /// This is useful to move entities (like [`Subscriber`](Subscriber)) which + /// This is useful to move entities (like [`Subscriber`](crate::subscriber::Subscriber)) which /// lifetimes are bound to the session lifetime in several threads or tasks. /// /// Note: the given zenoh `Session` cannot be closed any more. At process @@ -862,7 +862,7 @@ impl Session { } /// Query data from the matching queryables in the system. /// - /// Unless explicitly requested via [`GetBuilder::accept_replies`], replies are guaranteed to have + /// Unless explicitly requested via [`accept_replies`](crate::session::SessionGetBuilder::accept_replies), replies are guaranteed to have /// key expressions that match the requested `selector`. /// /// # Arguments @@ -1978,7 +1978,7 @@ impl Session { } impl<'s> SessionDeclarations<'s, 'static> for Arc { - /// Create a [`Subscriber`](Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2018,12 +2018,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Queryable`](Queryable) for the given key expression. + /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](Queryable) will reply to + /// [`Queryable`](crate::queryable::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2622,7 +2622,7 @@ impl fmt::Debug for Session { /// [`Queryable`](crate::queryable::Queryable) /// /// This trait is implemented by [`Session`](crate::session::Session) itself and -/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](crate::session::Arc) +/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](std::sync::Arc) /// /// # Examples /// ```no_run diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 79b4429876..f3c1fa14e7 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -285,7 +285,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this subscription with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -410,10 +410,10 @@ where } } -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). +/// A subscriber that provides data through a [`Handler`](crate::handlers::IntoHandler). /// /// Subscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function +/// with the [`declare_subscriber`](crate::session::SessionDeclarations::declare_subscriber) function /// and the [`with`](SubscriberBuilder::with) function /// of the resulting builder. /// diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 7299453f54..1a01ff922d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -148,27 +148,27 @@ pub mod core { /// /// # Storing Key Expressions /// This module provides 3 flavours to store strings that have been validated to respect the KE syntax: -/// - [`keyexpr`] is the equivalent of a [`str`], -/// - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], -/// - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize +/// - [`keyexpr`](crate::key_expr::keyexpr) is the equivalent of a [`str`], +/// - [`OwnedKeyExpr`](crate::key_expr::OwnedKeyExpr) works like an [`std::sync::Arc`], +/// - [`KeyExpr`](crate::key_expr::KeyExpr) works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize /// routing and network usage. /// -/// All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, -/// or even if a [`keyexpr::includes`] another. +/// All of these types [`Deref`](std::ops::Deref) to [`keyexpr`](crate::key_expr::keyexpr), which notably has methods to check whether a given [`intersects`](crate::key_expr::keyexpr::includes) with another, +/// or even if a [`includes`](crate::key_expr::keyexpr::includes) another. /// /// # Tying values to Key Expressions /// When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect /// the Key Expression semantics with high performance. /// -/// Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. +/// Enter [KeTrees](crate::key_expr::keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. /// /// # Building and parsing Key Expressions /// A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. -/// The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, +/// The same issue arises naturally when designing a KE space, and [`KeFormat`](crate::key_expr::format::KeFormat) was designed to help you with this, /// both in constructing and in parsing KEs that fit the formats you've defined. /// -/// [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -/// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +/// [`kedefine`](crate::key_expr::format::kedefine) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](crate::key_expr::format::keformat) and [`kewrite`](crate::key_expr::format::kewrite) macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { #[zenoh_macros::unstable] pub mod keyexpr_tree { @@ -194,7 +194,7 @@ pub mod key_expr { } } -/// Zenoh [`Session`](crate::session::Session) and associated types +/// Zenoh [`Session`] and associated types pub mod session { #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; @@ -205,7 +205,7 @@ pub mod session { }; } -/// Tools to access information about the current zenoh [`Session`](crate::Session). +/// Tools to access information about the current zenoh [`Session`]. pub mod info { pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; pub use zenoh_protocol::core::EntityId; @@ -393,7 +393,7 @@ pub mod time { pub use crate::api::time::new_timestamp; } -/// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants +/// Configuration to pass to [`open`] and [`scout`] functions and associated constants pub mod config { // pub use zenoh_config::{ // client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index bf7f4841a1..81ca715f44 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -56,7 +56,7 @@ struct Args { /// WARNING: this identifier must be unique in the system and must be 16 bytes maximum (32 chars)! #[arg(short, long)] id: Option, - /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_.so' (exact name depending the OS). Or you can give such a string: ": + /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_\.so' (exact name depending the OS). Or you can give such a string: "\:\" /// Repeat this option to load several plugins. If loading failed, zenohd will exit. #[arg(short = 'P', long)] plugin: Vec, From 655922d85124c59281f5a5b4c798156e5268fc91 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 27 Jun 2024 11:21:44 +0200 Subject: [PATCH 04/29] Fix #1185. Add encoding option to publisher creation (#1194) * Add encoding option to publisher builder * Cargo fmt * Fix PublisherBuilder Clone --- examples/examples/z_pub.rs | 11 +++++++---- zenoh/src/api/builders/publisher.rs | 13 +++++++++++++ zenoh/src/api/publisher.rs | 9 ++++++++- zenoh/src/api/session.rs | 2 ++ zenoh/tests/qos.rs | 16 ++++++++++++---- 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 2130832fb4..56584f53c4 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::{key_expr::KeyExpr, prelude::*, Config}; +use zenoh::{encoding::Encoding, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -35,7 +35,12 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - publisher.put(buf).attachment(&attachment).await.unwrap(); + publisher + .put(buf) + .encoding(Encoding::TEXT_PLAIN) // Optionally set the encoding metadata + .attachment(&attachment) // Optionally add an attachment + .await + .unwrap(); } } @@ -49,8 +54,6 @@ struct Args { payload: String, #[arg(short, long)] /// The attachments to add to each put. - /// - /// The key-value pairs are &-separated, and = serves as the separator between key and value. attach: Option, #[command(flatten)] common: CommonArgs, diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 923689d0bc..380a9251d5 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -113,6 +113,15 @@ impl PublicationBuilder, T> { } } +impl EncodingBuilderTrait for PublisherBuilder<'_, '_> { + fn encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } + } +} + impl

EncodingBuilderTrait for PublicationBuilder { fn encoding>(self, encoding: T) -> Self { Self { @@ -226,6 +235,7 @@ impl IntoFuture for PublicationBuilder, PublicationBuil pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, + pub(crate) encoding: Encoding, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, @@ -240,6 +250,7 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { Ok(k) => Ok(k.clone()), Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), }, + encoding: self.encoding.clone(), congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, @@ -289,6 +300,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { session: self.session, id: 0, // This is a one shot Publisher key_expr: self.key_expr?, + encoding: self.encoding, congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, @@ -343,6 +355,7 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { session: self.session, id, key_expr, + encoding: self.encoding, congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 6b581ccfad..f4b969b18f 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -139,6 +139,7 @@ pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, pub(crate) id: Id, pub(crate) key_expr: KeyExpr<'a>, + pub(crate) encoding: Encoding, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, @@ -178,8 +179,14 @@ impl<'a> Publisher<'a> { &self.key_expr } + /// Get the [`Encoding`] used when publishing data. #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + /// Get the `congestion_control` applied when routing the data. + #[inline] pub fn congestion_control(&self) -> CongestionControl { self.congestion_control } @@ -248,7 +255,7 @@ impl<'a> Publisher<'a> { publisher: self, kind: PublicationBuilderPut { payload: payload.into(), - encoding: Encoding::ZENOH_BYTES, + encoding: self.encoding.clone(), }, timestamp: None, #[cfg(feature = "unstable")] diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 893f4725d5..3125e90225 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -409,6 +409,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), + encoding: Encoding::default(), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, is_express: false, @@ -2092,6 +2093,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), + encoding: Encoding::default(), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, is_express: false, diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 7ba694d80c..77e7e43a10 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,27 +13,31 @@ // use std::time::Duration; -use zenoh::{core::Priority, prelude::*, publisher::CongestionControl}; +use zenoh::{core::Priority, encoding::Encoding, prelude::*, publisher::CongestionControl}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn pubsub() { +async fn qos_pubsub() { let session1 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding("text/plain") .priority(Priority::DataHigh) - .congestion_control(CongestionControl::Drop)) + .congestion_control(CongestionControl::Drop) + .express(true)) .unwrap(); let publisher2 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding(Encoding::ZENOH_STRING) .priority(Priority::DataLow) - .congestion_control(CongestionControl::Block)) + .congestion_control(CongestionControl::Block) + .express(false)) .unwrap(); let subscriber = ztimeout!(session2.declare_subscriber("test/qos")).unwrap(); @@ -42,12 +46,16 @@ async fn pubsub() { ztimeout!(publisher1.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); + assert_eq!(sample.encoding(), &Encoding::TEXT_PLAIN); assert_eq!(sample.priority(), Priority::DataHigh); assert_eq!(sample.congestion_control(), CongestionControl::Drop); + assert!(sample.express()); ztimeout!(publisher2.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); + assert_eq!(sample.encoding(), &Encoding::ZENOH_STRING); assert_eq!(sample.priority(), Priority::DataLow); assert_eq!(sample.congestion_control(), CongestionControl::Block); + assert!(!sample.express()); } From 271b7c7910262f2ab424f9b8cd00be980900f6b0 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 27 Jun 2024 16:13:31 +0200 Subject: [PATCH 05/29] fix: replace some unsafe code with safe version (#1184) * fix: replace some unsafe code with safe version Compiler is able to optimize bound checks based on previous checks See https://godbolt.org/z/oGesnb6a4 or https://godbolt.org/z/c6c41bvE5 `Writer::with_slot` has been made unsafe, because its implementations rely on a precondition on the write callback * fix: fix documentation * fix: fix missing annotation * fix: fix test * fix: address PR review --- commons/zenoh-buffers/src/bbuf.rs | 5 +- commons/zenoh-buffers/src/lib.rs | 11 ++- commons/zenoh-buffers/src/slice.rs | 91 +++++++----------------- commons/zenoh-buffers/src/vec.rs | 4 +- commons/zenoh-buffers/src/zbuf.rs | 11 ++- commons/zenoh-buffers/src/zslice.rs | 2 +- commons/zenoh-buffers/tests/readwrite.rs | 8 ++- commons/zenoh-codec/src/core/zint.rs | 7 +- io/zenoh-transport/src/common/batch.rs | 12 ++-- 9 files changed, 62 insertions(+), 89 deletions(-) diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 72491ae704..7af2a1a464 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -127,7 +127,7 @@ impl Writer for &mut BBuf { self.capacity() - self.len() } - fn with_slot(&mut self, len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -135,7 +135,8 @@ impl Writer for &mut BBuf { return Err(DidntWrite); } - let written = f(self.as_writable_slice()); + // SAFETY: self.remaining() >= len + let written = write(unsafe { self.as_writable_slice().get_unchecked_mut(..len) }); self.len += written; NonZeroUsize::new(written).ok_or(DidntWrite) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index a527dfbc19..ee630b4201 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -137,9 +137,14 @@ pub mod writer { fn can_write(&self) -> bool { self.remaining() != 0 } - /// Provides a buffer of exactly `len` uninitialized bytes to `f` to allow in-place writing. - /// `f` must return the number of bytes it actually wrote. - fn with_slot(&mut self, len: usize, f: F) -> Result + /// Provides a buffer of exactly `len` uninitialized bytes to `write` to allow in-place writing. + /// `write` must return the number of bytes it actually wrote. + /// + /// # Safety + /// + /// Caller must ensure that `write` return an integer lesser than or equal to the length of + /// the slice passed in argument + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize; } diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index 658827b6c4..1f3771c2eb 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -61,26 +61,13 @@ impl HasWriter for &mut [u8] { impl Writer for &mut [u8] { fn write(&mut self, bytes: &[u8]) -> Result { - let len = bytes.len().min(self.len()); - if len == 0 { + let Some(len) = NonZeroUsize::new(bytes.len().min(self.len())) else { return Err(DidntWrite); - } - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - // We early return if length is 0. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - - // SAFETY: this operation is safe since we check if len is non-zero. - Ok(unsafe { NonZeroUsize::new_unchecked(len) }) + }; + let (to_write, remain) = mem::take(self).split_at_mut(len.get()); + to_write.copy_from_slice(&bytes[..len.get()]); + *self = remain; + Ok(len) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { @@ -88,19 +75,7 @@ impl Writer for &mut [u8] { if self.len() < len { return Err(DidntWrite); } - - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - + let _ = self.write(bytes); Ok(()) } @@ -108,24 +83,17 @@ impl Writer for &mut [u8] { self.len() } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { if len > self.len() { return Err(DidntWrite); } - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, ..len); - len = f(s); - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(s) }; - - NonZeroUsize::new(len).ok_or(DidntWrite) + let written = write(&mut self[..len]); + // SAFETY: `written` < `len` is guaranteed by function contract + *self = unsafe { mem::take(self).get_unchecked_mut(written..) }; + NonZeroUsize::new(written).ok_or(DidntWrite) } } @@ -165,14 +133,13 @@ impl<'a> HasReader for &'a [u8] { impl Reader for &[u8] { fn read(&mut self, into: &mut [u8]) -> Result { - let len = self.len().min(into.len()); - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); - NonZeroUsize::new(len).ok_or(DidntRead) + let Some(len) = NonZeroUsize::new(self.len().min(into.len())) else { + return Err(DidntRead); + }; + let (to_write, remain) = self.split_at(len.get()); + into[..len.get()].copy_from_slice(to_write); + *self = remain; + Ok(len) } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { @@ -180,24 +147,16 @@ impl Reader for &[u8] { if self.len() < len { return Err(DidntRead); } - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); + let (to_write, remain) = self.split_at(len); + into[..len].copy_from_slice(to_write); + *self = remain; Ok(()) } fn read_u8(&mut self) -> Result { - if !self.can_read() { - return Err(DidntRead); - } - // SAFETY: we early return in case the slice is empty. - // Therefore, there is at least one element in the slice. - let ret = *crate::unsafe_slice!(self, 0); - *self = crate::unsafe_slice!(self, 1..); - Ok(ret) + let mut buf = [0; 1]; + self.read(&mut buf)?; + Ok(buf[0]) } fn read_zslices(&mut self, len: usize, mut f: F) -> Result<(), DidntRead> { diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index 9d63880aea..fc81fa6687 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -93,7 +93,7 @@ impl Writer for &mut Vec { usize::MAX } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -103,7 +103,7 @@ impl Writer for &mut Vec { let s = crate::unsafe_slice_mut!(self.spare_capacity_mut(), ..len); // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; + len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; // SAFETY: we already reserved len elements on the vector. unsafe { self.set_len(self.len() + len) }; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index a198c654d2..f846280b91 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -590,12 +590,11 @@ impl<'a> HasWriter for &'a mut ZBuf { impl Writer for ZBufWriter<'_> { fn write(&mut self, bytes: &[u8]) -> Result { - if bytes.is_empty() { + let Some(len) = NonZeroUsize::new(bytes.len()) else { return Err(DidntWrite); - } + }; self.write_exact(bytes)?; - // SAFETY: this operation is safe since we check if bytes is empty - Ok(unsafe { NonZeroUsize::new_unchecked(bytes.len()) }) + Ok(len) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { @@ -646,7 +645,7 @@ impl Writer for ZBufWriter<'_> { Ok(()) } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -658,7 +657,7 @@ impl Writer for ZBufWriter<'_> { let s = crate::unsafe_slice_mut!(cache.spare_capacity_mut(), ..len); // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; + len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; // SAFETY: we already reserved len elements on the vector. unsafe { cache.set_len(prev_cache_len + len) }; diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 42babb8b88..6ed404eb78 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -122,7 +122,7 @@ impl ZSlice { } pub fn empty() -> Self { - unsafe { ZSlice::new_unchecked(Arc::new([]), 0, 0) } + Self::new(Arc::new([]), 0, 0).unwrap() } /// # Safety diff --git a/commons/zenoh-buffers/tests/readwrite.rs b/commons/zenoh-buffers/tests/readwrite.rs index cdfc8fea05..dd5481c958 100644 --- a/commons/zenoh-buffers/tests/readwrite.rs +++ b/commons/zenoh-buffers/tests/readwrite.rs @@ -46,13 +46,15 @@ macro_rules! run_write { writer.write_exact(&WBS4).unwrap(); - writer - .with_slot(4, |mut buffer| { + // SAFETY: callback returns the length of the buffer + unsafe { + writer.with_slot(4, |mut buffer| { let w = buffer.write(&WBS5).unwrap(); assert_eq!(4, w.get()); w.get() }) - .unwrap(); + } + .unwrap(); }; } diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 20c0a0a4f6..a42395b781 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -112,7 +112,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN_MAX, move |buffer| { + let write = move |buffer: &mut [u8]| { let mut len = 0; while (x & !0x7f_u64) != 0 { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is @@ -139,7 +139,10 @@ where } // The number of written bytes len - })?; + }; + // SAFETY: write algorithm guarantees than returned length is lesser than or equal to + // `VLE_LEN_MAX`. + unsafe { writer.with_slot(VLE_LEN_MAX, write)? }; Ok(()) } } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 1b065191c0..c36993ddf7 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -322,11 +322,15 @@ impl WBatch { // Compress the actual content let (_length, _header, payload) = Self::split(self.buffer.as_slice(), &self.config); let mut writer = support.writer(); - writer - .with_slot(writer.remaining(), |b| { - lz4_flex::block::compress_into(payload, b).unwrap_or(0) + // SAFETY: assertion ensures `with_slot` precondition + unsafe { + writer.with_slot(writer.remaining(), |b| { + let len = lz4_flex::block::compress_into(payload, b).unwrap_or(0); + assert!(len <= b.len()); + len }) - .map_err(|_| zerror!("Compression error"))?; + } + .map_err(|_| zerror!("Compression error"))?; // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { From c96f6b010e8c647ed715dec0c11c0a1afbd739e7 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 27 Jun 2024 17:06:23 +0200 Subject: [PATCH 06/29] Fix Interest codec test (#1198) --- commons/zenoh-codec/tests/codec.rs | 5 +++++ commons/zenoh-protocol/src/network/interest.rs | 8 ++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 1e1bbe18a3..46fabe5c51 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -532,6 +532,11 @@ fn codec_declare_body() { run!(DeclareBody, DeclareBody::rand()); } +#[test] +fn codec_interest() { + run!(Interest, Interest::rand()); +} + #[test] fn codec_declare_keyexpr() { run!(DeclareKeyExpr, DeclareKeyExpr::rand()); diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index 29ed7e4c29..9f329b6ff5 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -195,8 +195,12 @@ impl Interest { let id = rng.gen::(); let mode = InterestMode::rand(); - let options = InterestOptions::rand(); - let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let options = if mode == InterestMode::Final { + InterestOptions::empty() + } else { + InterestOptions::rand() + }; + let wire_expr = options.restricted().then_some(WireExpr::rand()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); From 0c7faa950adce5688ab6e84d6796dcb61516ed58 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 28 Jun 2024 09:35:33 +0200 Subject: [PATCH 07/29] Fix CLI args for z_pub_shm (#1199) --- examples/examples/z_pub_shm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index fd3c7ce1b6..457027ba75 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -95,7 +95,7 @@ async fn main() -> Result<(), ZError> { struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to publish onto. - path: KeyExpr<'static>, + key: KeyExpr<'static>, #[arg(short, long, default_value = "Pub from SHM Rust!")] /// The payload of to publish. payload: String, @@ -105,5 +105,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.path, args.payload) + (args.common.into(), args.key, args.payload) } From 90054a615e2acddfbcfa1fd283aa8c866aa85682 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 1 Jul 2024 17:07:50 +0200 Subject: [PATCH 08/29] Fix bug leading to call get_unchecked on empty array UB (#1207) --- commons/zenoh-keyexpr/src/key_expr/include.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/include.rs b/commons/zenoh-keyexpr/src/key_expr/include.rs index ca9efaee2d..15e4f50f40 100644 --- a/commons/zenoh-keyexpr/src/key_expr/include.rs +++ b/commons/zenoh-keyexpr/src/key_expr/include.rs @@ -41,7 +41,7 @@ impl Includer<&[u8], &[u8]> for LTRIncluder { if (lempty && !right.has_verbatim()) || (!lempty && self.includes(lrest, right)) { return true; } - if unsafe { right.has_direct_verbatim_non_empty() } { + if right.has_direct_verbatim() { return false; } right = Split::split_once(right, &DELIMITER).1; From 9fcb61e9c4e101e9070cf74868b1bcb3876c2cec Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Wed, 3 Jul 2024 10:10:55 +0200 Subject: [PATCH 09/29] feat: unify pub/sub and query/reply modules (#1193) * feat: unify pub/sub and query/reply modules * fix: fix shm error * feat: move encoding and remove core * feat: rename `Canonizable` into `Canonize` and reexport it * fix: fix examples * fix: fix doc links --- .../src/queryable_get/bin/z_queryable_get.rs | 5 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 4 +- commons/zenoh-keyexpr/src/key_expr/canon.rs | 6 +- commons/zenoh-keyexpr/src/key_expr/owned.rs | 4 +- commons/zenoh-macros/src/lib.rs | 2 +- examples/examples/z_get.rs | 5 +- examples/examples/z_get_shm.rs | 3 +- examples/examples/z_info.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 6 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 4 +- plugins/zenoh-plugin-rest/src/lib.rs | 10 +- .../src/backends_mgt.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/memory_backend/mod.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/snapshotter.rs | 2 +- .../src/replica/storage.rs | 5 +- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 8 +- zenoh-ext/src/querying_subscriber.rs | 7 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 6 +- zenoh/src/api/builders/publisher.rs | 6 +- zenoh/src/api/bytes.rs | 1 + zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/key_expr.rs | 4 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/session.rs | 30 ++-- zenoh/src/lib.rs | 146 +++++++----------- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 12 +- zenoh/tests/qos.rs | 6 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 4 +- zenoh/tests/shm.rs | 4 +- zenoh/tests/unicity.rs | 2 +- zenohd/src/main.rs | 2 +- 47 files changed, 160 insertions(+), 182 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 70945a4926..8ea7be201b 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -14,7 +14,10 @@ use std::{convert::TryFrom, time::Duration}; use zenoh::{ - config::Config, key_expr::KeyExpr, prelude::*, query::QueryTarget, selector::Selector, + config::Config, + key_expr::KeyExpr, + prelude::*, + query::{QueryTarget, Selector}, }; #[tokio::main] diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 6d1774bcd8..a98337b987 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -27,7 +27,7 @@ use core::{ use zenoh_result::{bail, Error as ZError, ZResult}; -use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; +use super::{canon::Canonize, OwnedKeyExpr, FORBIDDEN_CHARS}; /// A [`str`] newtype that is statically known to be a valid key expression. /// @@ -72,7 +72,7 @@ impl keyexpr { pub fn autocanonize<'a, T, E>(t: &'a mut T) -> Result<&'a Self, E> where &'a Self: TryFrom<&'a T, Error = E>, - T: Canonizable + ?Sized, + T: Canonize + ?Sized, { t.canonize(); Self::new(t) diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index cccccdfba3..7080dbde1a 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -19,13 +19,13 @@ use crate::key_expr::{ DELIMITER, DOUBLE_WILD, SINGLE_WILD, }; -pub trait Canonizable { +pub trait Canonize { fn canonize(&mut self); } const DOLLAR_STAR: &[u8; 2] = b"$*"; -impl Canonizable for &mut str { +impl Canonize for &mut str { fn canonize(&mut self) { let mut writer = Writer { ptr: self.as_mut_ptr(), @@ -114,7 +114,7 @@ impl Canonizable for &mut str { } } -impl Canonizable for String { +impl Canonize for String { fn canonize(&mut self) { let mut s = self.as_mut(); s.canonize(); diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index a53fdec2f0..6089df2a1e 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -22,7 +22,7 @@ use core::{ str::FromStr, }; -use super::{canon::Canonizable, keyexpr}; +use super::{canon::Canonize, keyexpr}; /// A [`Arc`] newtype that is statically known to be a valid key expression. /// @@ -60,7 +60,7 @@ impl OwnedKeyExpr { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index c1c58d725e..003525daa9 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -287,7 +287,7 @@ fn keformat_support(source: &str) -> proc_macro2::TokenStream { let formatter_doc = format!("And instance of a formatter for `{source}`."); quote! { - use ::zenoh::core::Result as ZResult; + use ::zenoh::Result as ZResult; const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) }; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 2069e20b31..eebe582f98 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,10 @@ use std::time::Duration; use clap::Parser; -use zenoh::{query::QueryTarget, selector::Selector, Config}; +use zenoh::{ + query::{QueryTarget, Selector}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 71a3e3aa65..d8ea97da33 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,8 +15,7 @@ use std::time::Duration; use clap::Parser; use zenoh::{ - query::QueryTarget, - selector::Selector, + query::{QueryTarget, Selector}, shm::{ zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index d2e4bfdbc0..aa40ef62d4 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{info::ZenohId, prelude::*}; +use zenoh::{prelude::*, session::ZenohId}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 96454da614..eec9324173 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,7 +14,7 @@ use std::time::{Duration, Instant}; use clap::Parser; -use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; +use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 5e809c9341..4c6f8fed01 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -18,7 +18,7 @@ use zenoh::{ bytes::ZBytes, key_expr::keyexpr, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 6a1b8580c7..ef022d234c 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; +use zenoh::{key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 56584f53c4..9f84ba118f 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::{encoding::Encoding, key_expr::KeyExpr, prelude::*, Config}; +use zenoh::{bytes::Encoding, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 3093a0962d..4641c51c95 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::{ bytes::ZBytes, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 359e375203..e6c063318e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -15,7 +15,11 @@ use std::convert::TryInto; use clap::Parser; -use zenoh::{bytes::ZBytes, core::Priority, prelude::*, publisher::CongestionControl}; +use zenoh::{ + bytes::ZBytes, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 0d710d9942..98167680c8 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -18,8 +18,8 @@ use derive_more::{AsMut, AsRef}; use schemars::JsonSchema; use serde_json::{Map, Value}; use zenoh::{ - core::Result as ZResult, key_expr::{keyexpr, OwnedKeyExpr}, + Result as ZResult, }; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 851e9cfbb0..a75d934050 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -124,10 +124,10 @@ use async_trait::async_trait; use const_format::concatcp; use zenoh::{ - core::Result as ZResult, internal::Value, key_expr::{keyexpr, OwnedKeyExpr}, time::Timestamp, + Result as ZResult, }; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index e39d7c28b2..5f7d466f13 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,9 @@ use std::time::Duration; use clap::{arg, Command}; use zenoh::{ - config::Config, key_expr::keyexpr, publisher::CongestionControl, sample::QoSBuilderTrait, + config::Config, + key_expr::keyexpr, + qos::{CongestionControl, QoSBuilderTrait}, session::SessionDeclarations, }; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index a35025e26e..e3dcc0130e 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -26,8 +26,7 @@ use http_types::Method; use serde::{Deserialize, Serialize}; use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; use zenoh::{ - bytes::ZBytes, - encoding::Encoding, + bytes::{Encoding, ZBytes}, internal::{ bail, plugins::{RunningPluginTrait, ZenohPlugin}, @@ -36,16 +35,15 @@ use zenoh::{ }, key_expr::{keyexpr, KeyExpr}, prelude::*, - query::{QueryConsolidation, Reply}, - sample::{EncodingBuilderTrait, Sample, SampleKind}, - selector::{Parameters, Selector, ZenohParameters}, + query::{Parameters, QueryConsolidation, Reply, Selector, ZenohParameters}, + sample::{Sample, SampleKind}, session::{Session, SessionDeclarations}, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; mod config; pub use config::Config; -use zenoh::query::ReplyError; +use zenoh::{bytes::EncodingBuilderTrait, query::ReplyError}; const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); lazy_static::lazy_static! { diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 1bb8af4330..b789b563d2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use flume::Sender; -use zenoh::{core::Result as ZResult, session::Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::{config::StorageConfig, Capability, VolumeInstance}; use super::storages_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 6ea19ce25c..c916b649d9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,7 +30,6 @@ use flume::Sender; use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ - core::Result as ZResult, internal::{ plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, runtime::Runtime, @@ -39,6 +38,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, prelude::Wait, session::Session, + Result as ZResult, }; use zenoh_backend_traits::{ config::{ConfigDiff, PluginConfig, StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index e3a9cd9196..7c74d9f7f9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,7 +15,7 @@ use std::{collections::HashMap, sync::Arc}; use async_std::sync::RwLock; use async_trait::async_trait; -use zenoh::{core::Result as ZResult, internal::Value, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp, Result as ZResult}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, *, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 802e420636..c11a632e41 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,7 +21,7 @@ use std::{ use async_std::sync::Arc; use zenoh::{ - internal::Value, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Parameters, + internal::Value, key_expr::OwnedKeyExpr, prelude::*, query::Parameters, sample::Sample, time::Timestamp, Session, }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 8ffeddd71f..7992053a67 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,8 +24,8 @@ use zenoh::{ internal::Value, key_expr::{KeyExpr, OwnedKeyExpr}, prelude::*, + query::Selector, sample::{Sample, SampleBuilder}, - selector::Selector, time::Timestamp, Session, }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index c5b2573335..6bb2cf113b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,7 +24,7 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{info::ZenohId, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{key_expr::OwnedKeyExpr, session::ZenohId, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9d12dbd599..f926417743 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use zenoh::{ - core::Result as ZResult, + bytes::EncodingBuilderTrait, internal::{ bail, buffers::{SplitBuffer, ZBuf}, @@ -35,9 +35,10 @@ use zenoh::{ KeyExpr, OwnedKeyExpr, }, query::{ConsolidationMode, QueryTarget}, - sample::{EncodingBuilderTrait, Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, + sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, session::{Session, SessionDeclarations}, time::{new_timestamp, Timestamp, NTP64}, + Result as ZResult, }; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 1670310fcf..27dbaf58f6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::{core::Result as ZResult, session::Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 9120a323ae..4078db08dc 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -28,11 +28,11 @@ use serde::{Deserialize, Serialize}; use tokio::sync::Mutex; use zenoh::{ bytes::ZBytesReader, - core::Priority, internal::{bail, Condition, TaskController}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::*, - publisher::Publisher, + pubsub::Publisher, + qos::Priority, Session, }; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 9802d04e3a..659afa006d 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -22,7 +22,7 @@ pub use querying_subscriber::{ }; pub use session_ext::SessionExt; pub use subscriber_ext::{SubscriberBuilderExt, SubscriberForward}; -use zenoh::{core::Result as ZResult, internal::zerror, query::Reply, sample::Sample}; +use zenoh::{internal::zerror, query::Reply, sample::Sample, Result as ZResult}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b50f5affb4..09a21f2e16 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -19,16 +19,14 @@ use std::{ }; use zenoh::{ - core::{Error, Resolvable, Resolve, Result as ZResult}, internal::{bail, runtime::ZRuntime, ResolveFuture, TerminatableTask}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::Wait, - query::Query, - queryable::Queryable, + pubsub::FlumeSubscriber, + query::{Query, Queryable, ZenohParameters}, sample::{Locality, Sample}, - selector::ZenohParameters, session::{SessionDeclarations, SessionRef}, - subscriber::FlumeSubscriber, + Error, Resolvable, Resolve, Result as ZResult, }; /// The builder of PublicationCache, allowing to configure it. diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e26de62ae0..baf486601d 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,17 +21,16 @@ use std::{ }; use zenoh::{ - core::{Error, Resolvable, Resolve, Result as ZResult}, handlers::{locked, DefaultHandler, IntoHandler}, internal::zlock, key_expr::KeyExpr, prelude::Wait, - query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + pubsub::{Reliability, Subscriber}, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr, Selector}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, - selector::Selector, session::{SessionDeclarations, SessionRef}, - subscriber::{Reliability, Subscriber}, time::{new_timestamp, Timestamp}, + Error, Resolvable, Resolve, Result as ZResult, }; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2b9cda7cb0..606f00743b 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -14,9 +14,9 @@ use std::{convert::TryInto, sync::Arc}; use zenoh::{ - core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, + Error, }; use super::PublicationCacheBuilder; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index bac334035d..a7356f86dc 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -16,11 +16,11 @@ use std::time::Duration; use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; use zenoh::{ - core::Result as ZResult, liveliness::LivelinessSubscriberBuilder, + pubsub::{Reliability, Subscriber, SubscriberBuilder}, query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, sample::{Locality, Sample}, - subscriber::{Reliability, Subscriber, SubscriberBuilder}, + Result as ZResult, }; use crate::{ @@ -42,7 +42,7 @@ where } } -/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::subscriber::SubscriberBuilder) +/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::pubsub::SubscriberBuilder) pub trait SubscriberBuilderExt<'a, 'b, Handler> { type KeySpace; diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 380a9251d5..666b4378e0 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -50,13 +50,13 @@ pub struct PublicationBuilderPut { pub struct PublicationBuilderDelete; /// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), -/// [`Publisher::put`](crate::publisher::Publisher::put), and [`Publisher::delete`](crate::publisher::Publisher::delete) operations. +/// [`Publisher::put`](crate::pubsub::Publisher::put), and [`Publisher::delete`](crate::pubsub::Publisher::delete) operations. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::{encoding::Encoding, prelude::*, publisher::CongestionControl}; +/// use zenoh::{bytes::Encoding, prelude::*, qos::CongestionControl}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session @@ -220,7 +220,7 @@ impl IntoFuture for PublicationBuilder, PublicationBuil /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::{prelude::*, publisher::CongestionControl}; +/// use zenoh::{prelude::*, qos::CongestionControl}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index b1d1ff079f..572ac16cab 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -2959,6 +2959,7 @@ impl From> for ZBytes { } mod tests { + #[test] fn serializer() { use std::borrow::Cow; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 2b6cee2b23..7e86e9091a 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -37,7 +37,7 @@ use super::bytes::ZBytes; /// /// Create an [`Encoding`] from a string and viceversa. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// /// let encoding: Encoding = "text/plain".into(); /// let text: String = encoding.clone().into(); @@ -49,7 +49,7 @@ use super::bytes::ZBytes; /// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use /// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// use std::borrow::Cow; /// /// // This allocates @@ -64,7 +64,7 @@ use super::bytes::ZBytes; /// The conventions is to use the `;` separator if an encoding is created from a string. /// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a scheme to one of the associated constants. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// /// let encoding1 = Encoding::from("text/plain;utf-8"); /// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index c6ece3f129..50ce79180b 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -21,7 +21,7 @@ use std::{ use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ - core::{key_expr::canon::Canonizable, ExprId, WireExpr}, + core::{key_expr::canon::Canonize, ExprId, WireExpr}, network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -145,7 +145,7 @@ impl<'a> KeyExpr<'a> { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e9598a0064..8cf62344f2 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -42,7 +42,7 @@ use super::{ value::Value, }; -/// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). +/// The [`Queryable`](crate::query::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3125e90225..f97e5d7541 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -515,8 +515,8 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also allows to create [`Subscriber`](crate::subscriber::Subscriber) and - /// [`Queryable`](crate::queryable::Queryable) with static lifetime that can be moved to several + /// and tasks. It also allows to create [`Subscriber`](crate::pubsub::Subscriber) and + /// [`Queryable`](crate::query::Queryable) with static lifetime that can be moved to several /// threads and tasks /// /// Note: the given zenoh `Session` will be closed when the last reference to @@ -548,7 +548,7 @@ impl Session { /// the program's life. Dropping the returned reference will cause a memory /// leak. /// - /// This is useful to move entities (like [`Subscriber`](crate::subscriber::Subscriber)) which + /// This is useful to move entities (like [`Subscriber`](crate::pubsub::Subscriber)) which /// lifetimes are bound to the session lifetime in several threads or tasks. /// /// Note: the given zenoh `Session` cannot be closed any more. At process @@ -793,7 +793,7 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::{encoding::Encoding, prelude::*}; + /// use zenoh::{bytes::Encoding, prelude::*}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session @@ -1979,7 +1979,7 @@ impl Session { } impl<'s> SessionDeclarations<'s, 'static> for Arc { - /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2019,12 +2019,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2063,7 +2063,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2620,8 +2620,8 @@ impl fmt::Debug for Session { /// Functions to create zenoh entities /// /// This trait contains functions to create zenoh entities like -/// [`Subscriber`](crate::subscriber::Subscriber), and -/// [`Queryable`](crate::queryable::Queryable) +/// [`Subscriber`](crate::pubsub::Subscriber), and +/// [`Queryable`](crate::query::Queryable) /// /// This trait is implemented by [`Session`](crate::session::Session) itself and /// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](std::sync::Arc) @@ -2644,7 +2644,7 @@ impl fmt::Debug for Session { /// # } /// ``` pub trait SessionDeclarations<'s, 'a> { - /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2675,12 +2675,12 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2710,7 +2710,7 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2826,7 +2826,7 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::{info::ZenohId, prelude::*}; +/// use zenoh::{session::ZenohId, prelude::*}; /// /// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1a01ff922d..77db49f525 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -111,34 +111,25 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); +#[allow(deprecated)] +pub use zenoh_core::{AsyncResolve, SyncResolve}; +pub use zenoh_core::{Resolvable, Resolve, Wait}; +/// A zenoh error. +pub use zenoh_result::Error; +/// A zenoh result. +pub use zenoh_result::ZResult as Result; #[doc(inline)] -pub use { - crate::{ - config::Config, - core::{Error, Result}, - scouting::scout, - session::{open, Session}, - }, - zenoh_util::{init_log_from_env_or, try_init_log_from_env}, +pub use zenoh_util::{init_log_from_env_or, try_init_log_from_env}; + +#[doc(inline)] +pub use crate::{ + config::Config, + scouting::scout, + session::{open, Session}, }; pub mod prelude; -/// Zenoh core types -pub mod core { - #[allow(deprecated)] - pub use zenoh_core::{AsyncResolve, SyncResolve}; - pub use zenoh_core::{Resolvable, Resolve, Wait}; - pub use zenoh_result::ErrNo; - /// A zenoh error. - pub use zenoh_result::Error; - /// A zenoh result. - pub use zenoh_result::ZResult as Result; - - /// Zenoh message priority - pub use crate::api::publisher::Priority; -} - /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. /// /// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). @@ -180,7 +171,7 @@ pub mod key_expr { } #[zenoh_macros::unstable] pub use zenoh_keyexpr::SetIntersectionLevel; - pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; + pub use zenoh_keyexpr::{canon::Canonize, keyexpr, OwnedKeyExpr}; pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support @@ -196,25 +187,20 @@ pub mod key_expr { /// Zenoh [`Session`] and associated types pub mod session { + #[zenoh_macros::unstable] + pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; + pub use zenoh_protocol::core::EntityId; + #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; pub use crate::api::{ builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, + info::{PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder}, query::SessionGetBuilder, session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, }; } -/// Tools to access information about the current zenoh [`Session`]. -pub mod info { - pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; - pub use zenoh_protocol::core::EntityId; - - pub use crate::api::info::{ - PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder, - }; -} - /// Sample primitives pub mod sample { #[zenoh_macros::unstable] @@ -223,93 +209,64 @@ pub mod sample { pub use crate::api::sample::SourceInfo; pub use crate::api::{ builders::sample::{ - EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderAny, - SampleBuilderDelete, SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, + SampleBuilder, SampleBuilderAny, SampleBuilderDelete, SampleBuilderPut, + SampleBuilderTrait, TimestampBuilderTrait, }, sample::{Sample, SampleFields, SampleKind, SourceSn}, }; } -/// Encoding support -pub mod encoding { - pub use crate::api::encoding::Encoding; -} - /// Payload primitives pub mod bytes { - pub use crate::api::bytes::{ - Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, ZBytesWriter, - ZDeserializeError, ZSerde, + pub use crate::api::{ + builders::sample::EncodingBuilderTrait, + bytes::{ + Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, + ZBytesWriter, ZDeserializeError, ZSerde, + }, + encoding::Encoding, }; } -/// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -pub mod selector { - pub use zenoh_protocol::core::Parameters; - #[zenoh_macros::unstable] - pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - - pub use crate::api::selector::Selector; - #[zenoh_macros::unstable] - pub use crate::api::selector::ZenohParameters; -} - -/// Subscribing primitives -pub mod subscriber { - /// The kind of reliability. +/// Pub/sub primitives +pub mod pubsub { pub use zenoh_protocol::core::Reliability; - pub use crate::api::subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}; -} - -/// Publishing primitives -pub mod publisher { - pub use zenoh_protocol::core::CongestionControl; - - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListener; - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListenerBuilder; #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListenerUndeclaration; - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingStatus; - #[zenoh_macros::unstable] - pub use crate::api::publisher::PublisherDeclarations; - #[zenoh_macros::unstable] - pub use crate::api::publisher::PublisherRef; + pub use crate::api::publisher::{ + MatchingListener, MatchingListenerBuilder, MatchingListenerUndeclaration, MatchingStatus, + PublisherDeclarations, PublisherRef, + }; pub use crate::api::{ builders::publisher::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, PublisherDeleteBuilder, PublisherPutBuilder, }, publisher::{Publisher, PublisherUndeclaration}, + subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}, }; } -/// Get operation primitives -pub mod querier { - // Later the `Querier` with `get`` operation will be added here, in addition to `Session::get`, - // similarly to the `Publisher` with `put` operation and `Session::put` -} - -/// Query and Reply primitives +/// Query/reply primitives pub mod query { + pub use zenoh_protocol::core::Parameters; #[zenoh_macros::unstable] - pub use crate::api::query::ReplyKeyExpr; + pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; + #[zenoh_macros::unstable] + pub use crate::api::{query::ReplyKeyExpr, selector::ZenohParameters}; pub use crate::api::{ query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply, ReplyError}, - queryable::{Query, ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder}, + queryable::{ + Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, + ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + }, + selector::Selector, }; } -/// Queryable primitives -pub mod queryable { - pub use crate::api::queryable::{Queryable, QueryableBuilder, QueryableUndeclaration}; -} - /// Callback handler trait pub mod handlers { pub use crate::api::handlers::{ @@ -318,6 +275,13 @@ pub mod handlers { }; } +/// Quality of service primitives +pub mod qos { + pub use zenoh_protocol::core::CongestionControl; + + pub use crate::api::{builders::sample::QoSBuilderTrait, publisher::Priority}; +} + /// Scouting primitives pub mod scouting { pub use zenoh_config::wrappers::Hello; @@ -455,6 +419,8 @@ pub mod internal { }; } + pub use zenoh_result::ErrNo; + pub use crate::api::value::Value; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index eb010f9037..26807e8907 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -50,7 +50,7 @@ use crate::{ queryable::{Query, QueryInner}, value::Value, }, - encoding::Encoding, + bytes::Encoding, net::primitives::Primitives, }; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 63cb397e38..373d56c65a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,29 +37,29 @@ mod _prelude { session::{SessionDeclarations, Undeclarable}, }, config::ValidatedMap, - core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, + Error as ZError, Resolvable, Resolve, Result as ZResult, }; } pub use _prelude::*; #[allow(deprecated)] -pub use crate::core::AsyncResolve; +pub use crate::AsyncResolve; #[allow(deprecated)] -pub use crate::core::SyncResolve; -pub use crate::core::Wait; +pub use crate::SyncResolve; +pub use crate::Wait; /// Prelude to import when using Zenoh's sync API. #[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod sync { pub use super::_prelude::*; #[allow(deprecated)] - pub use crate::core::SyncResolve; + pub use crate::SyncResolve; } /// Prelude to import when using Zenoh's async API. #[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod r#async { pub use super::_prelude::*; #[allow(deprecated)] - pub use crate::core::AsyncResolve; + pub use crate::AsyncResolve; } diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 77e7e43a10..8d7d7e7322 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,11 @@ // use std::time::Duration; -use zenoh::{core::Priority, encoding::Encoding, prelude::*, publisher::CongestionControl}; +use zenoh::{ + bytes::Encoding, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 7f61f459d6..2256455be5 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -24,7 +24,7 @@ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::{ config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, Config, Result, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 25adaf42e0..859ff43f7d 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -22,8 +22,8 @@ use std::{ #[cfg(feature = "internal")] use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; use zenoh::{ - config, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, sample::SampleKind, - subscriber::Reliability, Session, + config, key_expr::KeyExpr, prelude::*, pubsub::Reliability, qos::CongestionControl, + sample::SampleKind, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 43205e8e47..33665913ed 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -23,11 +23,11 @@ use std::{ use zenoh::{ config, prelude::*, - publisher::CongestionControl, + pubsub::Reliability, + qos::CongestionControl, shm::{ BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, }, - subscriber::Reliability, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 6ce01ff2bf..a89ddb4b04 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -25,7 +25,7 @@ use zenoh::{ config::{EndPoint, WhatAmI}, key_expr::KeyExpr, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, Session, }; use zenoh_core::ztimeout; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 81ca715f44..e69dd3d263 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,7 +19,7 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilte use url::Url; use zenoh::{ config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, - core::Result, + Result, }; #[cfg(feature = "loki")] From e021d0de69e589e06c4ec8f328097610ce52a44d Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Wed, 3 Jul 2024 16:15:06 +0800 Subject: [PATCH 10/29] Add z_bytes in the examples. (#1180) * Add z_bytes in the examples. Signed-off-by: ChenYing Kuo * Add encoding information in examples. Signed-off-by: ChenYing Kuo * Update the format in examples README. Signed-off-by: ChenYing Kuo * Add z_bytes description in README. Signed-off-by: ChenYing Kuo * Update comments in other examples to point to z_bytes.rs. Signed-off-by: ChenYing Kuo * Support JSON, YAML, Protobuf in z_bytes.rs. Signed-off-by: ChenYing Kuo * Fix lint issues. Signed-off-by: ChenYing Kuo * Use Cow instead of Vec. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- Cargo.lock | 44 +++++++++- examples/Cargo.toml | 4 + examples/README.md | 49 ++++++++++- examples/examples/z_bytes.rs | 143 +++++++++++++++++++++++++++++++ examples/examples/z_get.rs | 2 + examples/examples/z_pub.rs | 1 + examples/examples/z_put.rs | 1 + examples/examples/z_queryable.rs | 2 + examples/examples/z_sub.rs | 1 + 9 files changed, 240 insertions(+), 7 deletions(-) create mode 100644 examples/examples/z_bytes.rs diff --git a/Cargo.lock b/Cargo.lock index b8a03280c2..5344e0135c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,8 +2200,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f56d36f573486ba7f462b62cbae597fef7d5d93665e7047956b457531b8a1ced" dependencies = [ - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", ] [[package]] @@ -2972,7 +2972,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive 0.12.6", ] [[package]] @@ -2988,13 +2998,35 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "prost", + "prost 0.11.9", +] + +[[package]] +name = "prost-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +dependencies = [ + "prost 0.12.6", ] [[package]] @@ -5497,8 +5529,12 @@ dependencies = [ "futures", "git-version", "json5", + "prost 0.12.6", + "prost-types 0.12.6", "rand 0.8.5", "rustc_version 0.4.0", + "serde_json", + "serde_yaml", "tokio", "tracing", "zenoh", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 90281ae558..e8cda2ae27 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -43,6 +43,10 @@ zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +prost = "0.12.6" +prost-types = "0.12.6" [dev-dependencies] rand = { workspace = true, features = ["default"] } diff --git a/examples/README.md b/examples/README.md index 7776561ef8..d187244c51 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,6 +3,7 @@ ## Start instructions When Zenoh is built in release mode: + ```bash ./target/release/example/ ``` @@ -20,6 +21,7 @@ Scouts for Zenoh peers and routers available on the network. Typical usage: + ```bash z_scout ``` @@ -29,11 +31,11 @@ Gets information about the Zenoh session. Typical usage: + ```bash z_info ``` - ### z_put Puts a path/value into Zenoh. @@ -41,10 +43,13 @@ and [z_storage](#z_storage) examples. Typical usage: + ```bash z_put ``` + or + ```bash z_put -k demo/example/test -v 'Hello World' ``` @@ -55,10 +60,13 @@ The published value will be received by all matching subscribers, for instance the [z_sub](#z_sub) and [z_storage](#z_storage) examples. Typical usage: + ```bash z_pub ``` + or + ```bash z_pub -k demo/example/test -v 'Hello World' ``` @@ -69,10 +77,13 @@ The subscriber will be notified of each `put` or `delete` made on any key expression matching the subscriber key expression, and will print this notification. Typical usage: + ```bash z_sub ``` + or + ```bash z_sub -k 'demo/**' ``` @@ -82,14 +93,16 @@ Declares a key expression and a pull subscriber. On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. - Typical usage: + ```bash z_pull ``` + or + ```bash - z_pull -k demo/** --size 3 + z_pull -k demo/** --size 3 ``` ### z_get @@ -99,10 +112,13 @@ will receive this query and reply with paths/values that will be received by the receiver stream. Typical usage: + ```bash z_get ``` + or + ```bash z_get -s 'demo/**' ``` @@ -114,10 +130,13 @@ with a selector that matches the path, and will return a value to the querier. Typical usage: + ```bash z_queryable ``` + or + ```bash z_queryable -k demo/example/queryable -v 'This is the result' ``` @@ -131,10 +150,13 @@ and that match the queried selector. Typical usage: + ```bash z_storage ``` + or + ```bash z_storage -k 'demo/**' ``` @@ -145,11 +167,13 @@ Note that on subscriber side, the same `z_sub` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub ``` Typical Publisher usage: + ```bash z_pub_shm ``` @@ -161,11 +185,13 @@ put operations and a subscriber receiving notifications of those puts. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_thr 1024 ``` @@ -182,11 +208,13 @@ :warning: z_pong needs to start first to avoid missing the kickoff from z_ping. Typical Pong usage: + ```bash z_pong ``` Typical Ping usage: + ```bash z_ping 1024 ``` @@ -200,11 +228,13 @@ Note that on subscriber side, the same `z_sub_thr` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_shm_thr ``` @@ -217,10 +247,13 @@ or killing the `z_liveliness` example. Typical usage: + ```bash z_liveliness ``` + or + ```bash z_liveliness -k 'group1/member1' ``` @@ -231,10 +264,13 @@ (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. Typical usage: + ```bash z_get_liveliness ``` + or + ```bash z_get_liveliness -k 'group1/**' ``` @@ -249,10 +285,17 @@ matching liveliness tokens that were alive before it's start. Typical usage: + ```bash z_sub_liveliness ``` + or + ```bash z_sub_liveliness -k 'group1/**' ``` + +### z_bytes + + Show how to serialize different message types into ZBytes, and then deserialize from ZBytes to the original message types. diff --git a/examples/examples/z_bytes.rs b/examples/examples/z_bytes.rs new file mode 100644 index 0000000000..ac4a2cc94a --- /dev/null +++ b/examples/examples/z_bytes.rs @@ -0,0 +1,143 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{borrow::Cow, collections::HashMap, io::Cursor}; + +use zenoh::bytes::ZBytes; + +fn main() { + // Numeric: u8, u16, u32, u128, usize, i8, i16, i32, i128, isize, f32, f64 + let input = 1234_u32; + let payload = ZBytes::from(input); + let output: u32 = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_UINT32; + + // String + let input = String::from("test"); + let payload = ZBytes::from(&input); + let output: String = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Cow + let input = Cow::from("test"); + let payload = ZBytes::from(&input); + let output: Cow = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Vec: The deserialization should be infallible + let input: Vec = vec![1, 2, 3, 4]; + let payload = ZBytes::from(&input); + let output: Vec = payload.into(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_BYTES; + + // Writer & Reader + // serialization + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + let i1 = 1234_u32; + let i2 = String::from("test"); + let i3 = vec![1, 2, 3, 4]; + writer.serialize(i1); + writer.serialize(&i2); + writer.serialize(&i3); + // deserialization + let mut reader = bytes.reader(); + let o1: u32 = reader.deserialize().unwrap(); + let o2: String = reader.deserialize().unwrap(); + let o3: Vec = reader.deserialize().unwrap(); + assert_eq!(i1, o1); + assert_eq!(i2, o2); + assert_eq!(i3, o3); + + // Tuple + let input = (1234_u32, String::from("test")); + let payload = ZBytes::serialize(input.clone()); + let output: (u32, String) = payload.deserialize().unwrap(); + assert_eq!(input, output); + + // Iterator + let input: [i32; 4] = [1, 2, 3, 4]; + let payload = ZBytes::from_iter(input.iter()); + for (idx, value) in payload.iter::().enumerate() { + assert_eq!(input[idx], value.unwrap()); + } + + // HashMap + let mut input: HashMap = HashMap::new(); + input.insert(0, String::from("abc")); + input.insert(1, String::from("def")); + let payload = ZBytes::from(input.clone()); + let output = payload.deserialize::>().unwrap(); + assert_eq!(input, output); + + // JSON + let data = r#" + { + "name": "John Doe", + "age": 43, + "phones": [ + "+44 1234567", + "+44 2345678" + ] + }"#; + let input: serde_json::Value = serde_json::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_json::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_JSON; + + // YAML + let data = r#" + name: "John Doe" + age: 43 + phones: + - "+44 1234567" + - "+44 2345678" + "#; + let input: serde_yaml::Value = serde_yaml::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_yaml::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_YAML; + + // Protobuf + use prost::Message; + #[derive(Message, Eq, PartialEq)] + struct EntityInfo { + #[prost(uint32)] + id: u32, + #[prost(string)] + name: String, + } + let input = EntityInfo { + id: 1234, + name: String::from("John Doe"), + }; + let payload = ZBytes::from(input.encode_to_vec()); + let output = + EntityInfo::decode(Cursor::new(payload.deserialize::>().unwrap())).unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_PROTOBUF; +} diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index eebe582f98..a83eeb5034 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -37,6 +37,7 @@ async fn main() { // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. // .with(zenoh::handlers::RingChannel::default()) + // Refer to z_bytes.rs to see how to serialize different types of message .payload(payload.unwrap_or_default()) .target(target) .timeout(timeout) @@ -45,6 +46,7 @@ async fn main() { while let Ok(reply) = replies.recv_async().await { match reply.result() { Ok(sample) => { + // Refer to z_bytes.rs to see how to deserialize different types of message let payload = sample .payload() .deserialize::() diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 9f84ba118f..4ff177c32a 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -35,6 +35,7 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); + // Refer to z_bytes.rs to see how to serialize different types of message publisher .put(buf) .encoding(Encoding::TEXT_PLAIN) // Optionally set the encoding metadata diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index bc4dd88eed..0097f99139 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -26,6 +26,7 @@ async fn main() { let session = zenoh::open(config).await.unwrap(); println!("Putting Data ('{key_expr}': '{payload}')..."); + // Refer to z_bytes.rs to see how to serialize different types of message session.put(&key_expr, payload).await.unwrap(); } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 7857c8caff..d6c5c7ea46 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -46,6 +46,7 @@ async fn main() { match query.payload() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), Some(query_payload) => { + // Refer to z_bytes.rs to see how to deserialize different types of message let deserialized_payload = query_payload .deserialize::() .unwrap_or_else(|e| format!("{}", e)); @@ -61,6 +62,7 @@ async fn main() { key_expr.as_str(), payload, ); + // Refer to z_bytes.rs to see how to serialize different types of message query .reply(key_expr.clone(), payload.clone()) .await diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 8ecc4b9818..690a211119 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -35,6 +35,7 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + // Refer to z_bytes.rs to see how to deserialize different types of message let payload = sample .payload() .deserialize::() From 76a1d18791606475acd617917becd8237a706477 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 16:39:08 +0200 Subject: [PATCH 11/29] Use TCP MSS as TCP link MTU --- Cargo.lock | 2 + io/zenoh-links/zenoh-link-tcp/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 20 ++++++++- io/zenoh-links/zenoh-link-tls/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tls/src/unicast.rs | 41 ++++++++++++++----- io/zenoh-transport/src/common/batch.rs | 10 +---- io/zenoh-transport/src/multicast/link.rs | 6 +-- .../src/unicast/establishment/accept.rs | 2 +- io/zenoh-transport/src/unicast/link.rs | 4 +- .../src/unicast/lowlatency/link.rs | 6 +-- .../src/unicast/universal/link.rs | 2 +- .../tests/unicast_compression.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- 13 files changed, 62 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5344e0135c..1f8b8bac9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5695,6 +5695,7 @@ name = "zenoh-link-tcp" version = "0.11.0-dev" dependencies = [ "async-trait", + "socket2 0.5.6", "tokio", "tokio-util", "tracing", @@ -5719,6 +5720,7 @@ dependencies = [ "rustls-pki-types", "rustls-webpki", "secrecy", + "socket2 0.5.6", "tokio", "tokio-rustls", "tokio-util", diff --git a/io/zenoh-links/zenoh-link-tcp/Cargo.toml b/io/zenoh-links/zenoh-link-tcp/Cargo.toml index ca94412382..4a501f61ed 100644 --- a/io/zenoh-links/zenoh-link-tcp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tcp/Cargo.toml @@ -26,6 +26,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } tracing = {workspace = true} diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 5c4d086c5b..df5a3bdae4 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -80,6 +80,12 @@ impl LinkUnicastTcp { dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), } } + + #[allow(clippy::mut_from_ref)] + fn get_socket(&self) -> &TcpStream { + unsafe { &*self.socket.get() } + } + #[allow(clippy::mut_from_ref)] fn get_mut_socket(&self) -> &mut TcpStream { unsafe { &mut *self.socket.get() } @@ -147,7 +153,18 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - *TCP_DEFAULT_MTU + // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(not(target_os = "redox"))] + { + let socket = socket2::SockRef::from(self.get_socket()); + let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); + mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize + } + + #[cfg(target_os = "redox")] + { + *TCP_DEFAULT_MTU + } } #[inline(always)] @@ -195,6 +212,7 @@ impl fmt::Debug for LinkUnicastTcp { f.debug_struct("Tcp") .field("src", &self.src_addr) .field("dst", &self.dst_addr) + .field("mtu", &self.get_mtu()) .finish() } } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index e0f1c6b03d..a716c72c99 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -33,6 +33,7 @@ rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "net", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 41847a1577..2f2d2431a1 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -109,11 +109,15 @@ impl LinkUnicastTls { } } + fn get_socket(&self) -> &TlsStream { + unsafe { &*self.inner.get() } + } + // NOTE: It is safe to suppress Clippy warning since no concurrent reads // or concurrent writes will ever happen. The read_mtx and write_mtx // are respectively acquired in any read and write operation. #[allow(clippy::mut_from_ref)] - fn get_sock_mut(&self) -> &mut TlsStream { + fn get_mut_socket(&self) -> &mut TlsStream { unsafe { &mut *self.inner.get() } } } @@ -124,7 +128,7 @@ impl LinkUnicastTrait for LinkUnicastTls { tracing::trace!("Closing TLS link: {}", self); // Flush the TLS stream let _guard = zasynclock!(self.write_mtx); - let tls_stream = self.get_sock_mut(); + let tls_stream = self.get_mut_socket(); let res = tls_stream.flush().await; tracing::trace!("TLS link flush {}: {:?}", self, res); // Close the underlying TCP stream @@ -136,7 +140,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write(&self, buffer: &[u8]) -> ZResult { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write(buffer).await.map_err(|e| { + self.get_mut_socket().write(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -144,7 +148,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write_all(buffer).await.map_err(|e| { + self.get_mut_socket().write_all(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -152,7 +156,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read(&self, buffer: &mut [u8]) -> ZResult { let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read(buffer).await.map_err(|e| { + self.get_mut_socket().read(buffer).await.map_err(|e| { tracing::trace!("Read error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -160,10 +164,14 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { let _guard = zasynclock!(self.read_mtx); - let _ = self.get_sock_mut().read_exact(buffer).await.map_err(|e| { - tracing::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e) - })?; + let _ = self + .get_mut_socket() + .read_exact(buffer) + .await + .map_err(|e| { + tracing::trace!("Read error on TLS link {}: {}", self, e); + zerror!(e) + })?; Ok(()) } @@ -179,7 +187,18 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - *TLS_DEFAULT_MTU + // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(not(target_os = "redox"))] + { + let socket = socket2::SockRef::from(self.get_socket().get_ref().0); + let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); + mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize + } + + #[cfg(target_os = "redox")] + { + *TLS_DEFAULT_MTU + } } #[inline(always)] @@ -206,7 +225,7 @@ impl LinkUnicastTrait for LinkUnicastTls { impl Drop for LinkUnicastTls { fn drop(&mut self) { // Close the underlying TCP stream - let (tcp_stream, _) = self.get_sock_mut().get_mut(); + let (tcp_stream, _) = self.get_mut_socket().get_mut(); let _ = zenoh_runtime::ZRuntime::Acceptor .block_in_place(async move { tcp_stream.shutdown().await }); } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index c36993ddf7..94b03b0514 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -120,14 +120,6 @@ impl BatchConfig { .then_some(BatchHeader::new(BatchHeader::COMPRESSION)) } } - - pub fn max_buffer_size(&self) -> usize { - let mut len = self.mtu as usize; - if self.is_streamed { - len += BatchSize::BITS as usize / 8; - } - len - } } // Batch header @@ -214,7 +206,7 @@ pub struct WBatch { impl WBatch { pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(config.max_buffer_size()), + buffer: BBuf::with_capacity(config.mtu as usize), codec: Zenoh080Batch::new(), config, #[cfg(feature = "stats")] diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 9c2bdbe1f1..90999d32ce 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -73,9 +73,7 @@ impl TransportLinkMulticast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), @@ -551,7 +549,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.batch.max_buffer_size(); + let mtu = link.inner.config.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index f3a053aa63..3f71d7b6da 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -769,7 +769,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .await?; tracing::debug!( - "New transport link accepted from {} to {}: {}.", + "New transport link accepted from {} to {}: {}", osyn_out.other_zid, manager.config.zid, s_link, diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index e43f4d3813..736360db63 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -67,9 +67,7 @@ impl TransportLinkUnicast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 250850726f..3ba1cd724f 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -152,11 +152,7 @@ impl TransportUnicastLowlatency { // The pool of buffers let pool = { - let mtu = if is_streamed { - link_rx.batch.mtu as usize - } else { - link_rx.batch.max_buffer_size() - }; + let mtu = link_rx.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 9655d0964d..cc3afc06e5 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -248,7 +248,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.batch.max_buffer_size(); + let mtu = link.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 7c2443c5d9..e5015c3d25 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -51,8 +51,8 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; - const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index a0fabe1ffd..1c5d749b59 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -232,13 +232,13 @@ const SLEEP_COUNT: Duration = Duration::from_millis(10); const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; -const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; #[cfg(any( feature = "transport_tcp", feature = "transport_udp", feature = "transport_unixsock-stream", ))] const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; +const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { From feb25ec0f5de0a596b3502008b692071c6eb01e0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 16:55:58 +0200 Subject: [PATCH 12/29] Compute default TCP MSS if not supported --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 13 +++++++++---- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index df5a3bdae4..e0690dff16 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -153,17 +153,22 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(not(target_os = "redox"))] + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize } - #[cfg(target_os = "redox")] + #[cfg(not(target_family = "unix"))] { - *TCP_DEFAULT_MTU + // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + *TCP_DEFAULT_MTU - header } } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 2f2d2431a1..933461b47e 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -187,17 +187,22 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(not(target_os = "redox"))] + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize } - #[cfg(target_os = "redox")] + #[cfg(not(target_family = "unix"))] { - *TLS_DEFAULT_MTU + // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + *TLS_DEFAULT_MTU - header } } From 770b707925ed70e381ba9d8e40858e0bb3c8cd6c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 17:04:00 +0200 Subject: [PATCH 13/29] Fix clippy --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 2 +- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index e0690dff16..e1682604df 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -81,7 +81,7 @@ impl LinkUnicastTcp { } } - #[allow(clippy::mut_from_ref)] + #[cfg(target_family = "unix")] fn get_socket(&self) -> &TcpStream { unsafe { &*self.socket.get() } } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 933461b47e..bc90ba0983 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -109,6 +109,7 @@ impl LinkUnicastTls { } } + #[cfg(target_family = "unix")] fn get_socket(&self) -> &TlsStream { unsafe { &*self.inner.get() } } From 78ffa1085c7e08395f462d2f9e59214de154883b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 14:23:56 +0200 Subject: [PATCH 14/29] Compute larget tcp mss lesser than max batch size --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 27 ++++++++++++-------- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 27 ++++++++++++-------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index e1682604df..6f5bf96ee9 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -153,23 +153,28 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TCP_DEFAULT_MTU - header; + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); - let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); - mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize + let mss = socket.mss().unwrap_or(mtu as u32); + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; } - #[cfg(not(target_family = "unix"))] - { - // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - *TCP_DEFAULT_MTU - header - } + mtu } #[inline(always)] diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index bc90ba0983..62bf49e611 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -188,23 +188,28 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut, assign)] // mut is not needed when target_family != unix + let mut mtu = *TLS_DEFAULT_MTU - header; + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); - mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize + let mss = socket.mss().unwrap_or(mtu as u32); + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; } - #[cfg(not(target_family = "unix"))] - { - // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - *TLS_DEFAULT_MTU - header - } + mtu } #[inline(always)] From a5195af7edc12bed5e876bbac3e2f55503dee1db Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 14:26:36 +0200 Subject: [PATCH 15/29] Fix clippy --- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 62bf49e611..74e3ae8341 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -193,7 +193,7 @@ impl LinkUnicastTrait for LinkUnicastTls { std::net::IpAddr::V4(_) => 40, std::net::IpAddr::V6(_) => 60, }; - #[allow(unused_mut, assign)] // mut is not needed when target_family != unix + #[allow(unused_mut)] // mut is not needed when target_family != unix let mut mtu = *TLS_DEFAULT_MTU - header; // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 From 5850c946fbf351928f76b24a25566724c8ff70ad Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 17:48:00 +0200 Subject: [PATCH 16/29] Consider MSS/2 in TCP MTU computation --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 3 ++- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 6f5bf96ee9..99d2d44c36 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -165,7 +165,8 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); - let mss = socket.mss().unwrap_or(mtu as u32); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; // Compute largest multiple of TCP MSS that is smaller of default MTU let mut tgt = mss; while (tgt + mss) < mtu as u32 { diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 74e3ae8341..4ab21d9993 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -200,7 +200,8 @@ impl LinkUnicastTrait for LinkUnicastTls { #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - let mss = socket.mss().unwrap_or(mtu as u32); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; // Compute largest multiple of TCP MSS that is smaller of default MTU let mut tgt = mss; while (tgt + mss) < mtu as u32 { From bef94d19a901f0f85c42947058271a6a59858582 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 4 Jul 2024 17:52:50 +0200 Subject: [PATCH 17/29] Make listen and connect endpoints ModeDependentValues (#1216) * Make listen and connect endpoints ModeDependentValues * Improve DEFAULT_CONFIG doc * Fix doctests --- DEFAULT_CONFIG.json5 | 24 +++-- commons/zenoh-config/src/defaults.rs | 28 ++++++ commons/zenoh-config/src/lib.rs | 18 ++-- commons/zenoh-config/src/mode_dependent.rs | 90 +++++++++++++++---- commons/zenoh-protocol/src/core/whatami.rs | 3 +- examples/src/lib.rs | 12 ++- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- zenoh-ext/examples/src/lib.rs | 12 ++- zenoh-ext/tests/liveliness.rs | 36 +++++--- zenoh/src/api/session.rs | 3 +- zenoh/src/net/runtime/orchestrator.rs | 81 ++++++++--------- zenoh/tests/acl.rs | 6 +- zenoh/tests/authentication.rs | 18 +++- zenoh/tests/connection_retry.rs | 10 ++- zenoh/tests/events.rs | 28 ++++-- zenoh/tests/interceptors.rs | 12 ++- zenoh/tests/liveliness.rs | 30 ++++--- zenoh/tests/matching.rs | 3 +- zenoh/tests/open_time.rs | 6 +- zenoh/tests/routing.rs | 24 ++--- zenoh/tests/session.rs | 68 ++++++++++---- zenoh/tests/shm.rs | 40 ++++++--- zenoh/tests/unicity.rs | 36 ++++++-- zenohd/src/main.rs | 14 +-- 24 files changed, 427 insertions(+), 181 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 6906d15cf5..1e9921bbe3 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -23,9 +23,13 @@ /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: { router: -1, peer: -1, client: 0 }, + /// The list of endpoints to connect to. + /// Accepts a single list (e.g. endpoints: ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/10.10.10.10:7447"], peer: ["tcp/11.11.11.11:7447"] }). endpoints: [ // "/

" ], @@ -49,19 +53,21 @@ }, }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// Which endpoints to listen on. E.g. tcp/0.0.0.0:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 listen: { /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: 0, - endpoints: [ - // "/
" - ], + /// The list of endpoints to listen on. + /// Accepts a single list (e.g. endpoints: ["tcp/[::]:7447", "udp/[::]:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }). + endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }, /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. @@ -98,7 +104,8 @@ /// The time-to-live on multicast scouting packets ttl: 1, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. autoconnect: "router|peer") + /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). /// Each value is bit-or-like combinations of "peer", "router" and "client". autoconnect: { router: "", peer: "router|peer" }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. @@ -115,7 +122,8 @@ /// direct connectivity with each other. multihop: false, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. autoconnect: "router|peer") + /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). /// Each value is bit-or-like combinations of "peer", "router" and "client". autoconnect: { router: "", peer: "router|peer" }, }, diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index a6be460bcb..bbb03a7eff 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -100,6 +100,34 @@ pub mod routing { } } +impl Default for ListenConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Dependent(ModeValues { + router: Some(vec!["tcp/[::]:7447".parse().unwrap()]), + peer: Some(vec!["tcp/[::]:0".parse().unwrap()]), + client: None, + }), + exit_on_failure: None, + retry: None, + } + } +} + +impl Default for ConnectConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Unique(vec![]), + exit_on_failure: None, + retry: None, + } + } +} + impl Default for TransportUnicastConf { fn default() -> Self { Self { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 51dce4ffb4..e239ac8b7a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -185,10 +185,8 @@ pub fn peer() -> Config { pub fn client, T: Into>(peers: I) -> Config { let mut config = Config::default(); config.set_mode(Some(WhatAmI::Client)).unwrap(); - config - .connect - .endpoints - .extend(peers.into_iter().map(|t| t.into())); + config.connect.endpoints = + ModeDependentValue::Unique(peers.into_iter().map(|t| t.into()).collect()); config } @@ -227,21 +225,23 @@ validated_struct::validator! { /// The node's mode ("router" (default value in `zenohd`), "peer" or "client"). mode: Option, /// Which zenoh nodes to connect to. - pub connect: #[derive(Default)] + pub connect: ConnectConfig { /// global timeout for full connect cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to connect to + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, }, - /// Which endpoints to listen on. `zenohd` will add `tcp/[::]:7447` to these locators if left empty. - pub listen: #[derive(Default)] + /// Which endpoints to listen on. + pub listen: ListenConfig { /// global timeout for full listen cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to listen on + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 7c331c8318..6a06f967ba 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -18,7 +18,7 @@ use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; +use zenoh_protocol::core::{EndPoint, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; pub trait ModeDependent { fn router(&self) -> Option<&T>; @@ -32,6 +32,7 @@ pub trait ModeDependent { WhatAmI::Client => self.client(), } } + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T>; } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -59,6 +60,15 @@ impl ModeDependent for ModeValues { fn client(&self) -> Option<&T> { self.client.as_ref() } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match whatami { + WhatAmI::Router => self.router.as_mut(), + WhatAmI::Peer => self.peer.as_mut(), + WhatAmI::Client => self.client.as_mut(), + } + } } #[derive(Clone, Debug)] @@ -67,6 +77,15 @@ pub enum ModeDependentValue { Dependent(ModeValues), } +impl ModeDependentValue { + #[inline] + pub fn set(&mut self, value: T) -> Result, ModeDependentValue> { + let mut value = ModeDependentValue::Unique(value); + std::mem::swap(self, &mut value); + Ok(value) + } +} + impl ModeDependent for ModeDependentValue { #[inline] fn router(&self) -> Option<&T> { @@ -91,6 +110,14 @@ impl ModeDependent for ModeDependentValue { Self::Dependent(o) => o.client(), } } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match self { + Self::Unique(v) => Some(v), + Self::Dependent(o) => o.get_mut(whatami), + } + } } impl serde::Serialize for ModeDependentValue @@ -249,31 +276,62 @@ impl<'a> serde::Deserialize<'a> for ModeDependentValue { } } +impl<'a> serde::Deserialize<'a> for ModeDependentValue> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { + struct UniqueOrDependent(PhantomData U>); + + impl<'de> Visitor<'de> for UniqueOrDependent>> { + type Value = ModeDependentValue>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("list of endpoints or mode dependent list of endpoints") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut v = seq.size_hint().map_or_else(Vec::new, Vec::with_capacity); + + while let Some(s) = seq.next_element()? { + v.push(s); + } + Ok(ModeDependentValue::Unique(v)) + } + + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + ModeValues::deserialize(de::value::MapAccessDeserializer::new(map)) + .map(ModeDependentValue::Dependent) + } + } + deserializer.deserialize_any(UniqueOrDependent(PhantomData)) + } +} + impl ModeDependent for Option> { #[inline] fn router(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.router(), - None => None, - } + self.as_ref().and_then(|m| m.router()) } #[inline] fn peer(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.peer(), - None => None, - } + self.as_ref().and_then(|m| m.peer()) } #[inline] fn client(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.client(), - None => None, - } + self.as_ref().and_then(|m| m.client()) + } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + self.as_mut().and_then(|m| m.get_mut(whatami)) } } diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 10c5b42c78..9eb9628e3f 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -18,9 +18,10 @@ use const_format::formatcp; use zenoh_result::{bail, ZError}; #[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum WhatAmI { Router = 0b001, + #[default] Peer = 0b010, Client = 0b100, } diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 1ab27dfc8f..e863e1457c 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -56,10 +56,18 @@ impl From<&CommonArgs> for Config { } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if value.no_multicast_scouting { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 5f7d466f13..e3fae4d285 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -110,13 +110,15 @@ fn parse_args() -> Config { config .connect .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/zenoh-ext/examples/src/lib.rs b/zenoh-ext/examples/src/lib.rs index b3e675b046..881d60c138 100644 --- a/zenoh-ext/examples/src/lib.rs +++ b/zenoh-ext/examples/src/lib.rs @@ -50,10 +50,18 @@ impl From<&CommonArgs> for Config { } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } config } diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs index 23e901d458..97dc817394 100644 --- a/zenoh-ext/tests/liveliness.rs +++ b/zenoh-ext/tests/liveliness.rs @@ -37,7 +37,8 @@ async fn test_liveliness_querying_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -49,7 +50,8 @@ async fn test_liveliness_querying_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -107,7 +109,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -119,7 +122,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -131,7 +135,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -143,7 +148,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client3 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -201,7 +207,8 @@ async fn test_liveliness_fetching_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -213,7 +220,8 @@ async fn test_liveliness_fetching_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -275,7 +283,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -287,7 +296,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -299,7 +309,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -311,7 +322,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client3 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index f97e5d7541..2c50560d77 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2830,7 +2830,8 @@ impl crate::net::primitives::EPrimitives for Session { /// /// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); +/// config.connect.endpoints.set( +/// ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"].iter().map(|s|s.parse().unwrap()).collect()); /// /// let session = zenoh::open(config).await.unwrap(); /// # } diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index da7739e3be..0bd2b8ef33 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -43,8 +43,6 @@ const RCV_BUF_SIZE: usize = u16::MAX as usize; const SCOUT_INITIAL_PERIOD: Duration = Duration::from_millis(1_000); const SCOUT_MAX_PERIOD: Duration = Duration::from_millis(8_000); const SCOUT_PERIOD_INCREASE_FACTOR: u32 = 2; -const ROUTER_DEFAULT_LISTENER: &str = "tcp/[::]:7447"; -const PEER_DEFAULT_LISTENER: &str = "tcp/[::]:0"; pub enum Loop { Continue, @@ -130,7 +128,12 @@ impl Runtime { let (peers, scouting, addr, ifaces, timeout, multicast_ttl) = { let guard = self.state.config.lock(); ( - guard.connect().endpoints().clone(), + guard + .connect() + .endpoints() + .client() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), @@ -168,27 +171,14 @@ impl Runtime { async fn start_peer(&self) -> ZResult<()> { let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay, linkstate) = { let guard = &self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = PEER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard.listen().endpoints().peer().unwrap_or(&vec![]).clone(), + guard + .connect() + .endpoints() + .peer() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().peer()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().peer()), @@ -223,27 +213,19 @@ impl Runtime { async fn start_router(&self) -> ZResult<()> { let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { let guard = self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = ROUTER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard + .listen() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), + guard + .connect() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().router()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().router()), @@ -422,7 +404,16 @@ impl Runtime { } pub(crate) async fn update_peers(&self) -> ZResult<()> { - let peers = { self.state.config.lock().connect().endpoints().clone() }; + let peers = { + self.state + .config + .lock() + .connect() + .endpoints() + .get(self.state.whatami) + .unwrap_or(&vec![]) + .clone() + }; let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { @@ -1163,6 +1154,8 @@ impl Runtime { .lock() .connect() .endpoints() + .get(session.runtime.state.whatami) + .unwrap_or(&vec![]) .clone() }; if peers.contains(endpoint) { diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index bbadd0dcf3..d1790dc009 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -47,7 +47,11 @@ mod test { async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index f8dcf74bc4..39daff0199 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -236,7 +236,11 @@ client2name:client2passwd"; let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]; + config + .listen + .endpoints + .set(vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( @@ -278,7 +282,11 @@ client2name:client2passwd"; let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]; + config + .listen + .endpoints + .set(vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( @@ -327,7 +335,11 @@ client2name:client2passwd"; async fn get_basic_router_config_usrpswd() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 9bee87f199..78814556f7 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -16,6 +16,7 @@ use zenoh::{ prelude::*, Config, }; +use zenoh_config::ModeDependent; #[test] fn retry_config_overriding() { @@ -74,7 +75,14 @@ fn retry_config_overriding() { }, ]; - for (i, endpoint) in config.listen().endpoints().iter().enumerate() { + for (i, endpoint) in config + .listen() + .endpoints() + .get(config.mode().unwrap_or_default()) + .unwrap_or(&vec![]) + .iter() + .enumerate() + { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); assert_eq!(retry_config, expected[i]); } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 267b30442f..c6931f1c2c 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -20,14 +20,26 @@ const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { let mut config = config::peer(); - config.listen.endpoints = listen - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); - config.connect.endpoints = connect - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + listen + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); + config + .connect + .endpoints + .set( + connect + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening session"); ztimeout!(zenoh::open(config)).unwrap() diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 7b82b23814..1e5ef13799 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -56,8 +56,16 @@ fn build_config( .set_enabled(Some(false)) .unwrap(); - sub_config.listen.endpoints = vec![locator.parse().unwrap()]; - pub_config.connect.endpoints = vec![locator.parse().unwrap()]; + sub_config + .listen + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); + pub_config + .connect + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); match flow { InterceptorFlow::Egress => pub_config.set_downsampling(ds_config).unwrap(), diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index dbd850da24..72dab9bd29 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -32,7 +32,8 @@ async fn test_liveliness_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -44,7 +45,8 @@ async fn test_liveliness_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -89,7 +91,8 @@ async fn test_liveliness_query_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -101,7 +104,8 @@ async fn test_liveliness_query_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -140,7 +144,8 @@ async fn test_liveliness_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -152,7 +157,8 @@ async fn test_liveliness_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -164,7 +170,8 @@ async fn test_liveliness_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -209,7 +216,8 @@ async fn test_liveliness_query_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -221,7 +229,8 @@ async fn test_liveliness_query_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -233,7 +242,8 @@ async fn test_liveliness_query_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 13a05a268e..da0ba0a6d1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -27,7 +27,8 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .listen - .set_endpoints(vec![locator.parse().unwrap()]) + .endpoints + .set(vec![locator.parse().unwrap()]) .unwrap(); config }; diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs index a6336e863a..7f1c2b2972 100644 --- a/zenoh/tests/open_time.rs +++ b/zenoh/tests/open_time.rs @@ -41,7 +41,8 @@ async fn time_open( router_config.set_mode(Some(WhatAmI::Router)).unwrap(); router_config .listen - .set_endpoints(vec![listen_endpoint.clone()]) + .endpoints + .set(vec![listen_endpoint.clone()]) .unwrap(); router_config .transport @@ -70,7 +71,8 @@ async fn time_open( app_config.set_mode(Some(connect_mode)).unwrap(); app_config .connect - .set_endpoints(vec![connect_endpoint.clone()]) + .endpoints + .set(vec![connect_endpoint.clone()]) .unwrap(); app_config .transport diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 2256455be5..fd680ae545 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -268,16 +268,20 @@ impl Recipe { let mut config = node.config.unwrap_or_default(); config.set_mode(Some(node.mode)).unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .listen - .set_endpoints(node.listen.iter().map(|x| x.parse().unwrap()).collect()) - .unwrap(); - config - .connect - .set_endpoints( - node.connect.iter().map(|x| x.parse().unwrap()).collect(), - ) - .unwrap(); + if !node.listen.is_empty() { + config + .listen + .endpoints + .set(node.listen.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } + if !node.connect.is_empty() { + config + .connect + .endpoints + .set(node.connect.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } config }; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 859ff43f7d..916b0c4fb3 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -36,19 +36,31 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -59,13 +71,21 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -266,20 +286,32 @@ async fn zenoh_session_multicast() { async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); let mut r1 = RuntimeBuilder::new(config).build().await.unwrap(); r1.start().await.unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); let mut r2 = RuntimeBuilder::new(config).build().await.unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 33665913ed..e47de65812 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -41,20 +41,32 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); @@ -66,14 +78,22 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index a89ddb4b04..49663249ad 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -38,23 +38,39 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_p2p_sessions() -> (Session, Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening s01 session"); let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27448".parse().unwrap()]; - config.connect.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27448".parse().unwrap()]) + .unwrap(); + config + .connect + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening s02 session"); let s02 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = vec![ - "tcp/127.0.0.1:27447".parse().unwrap(), - "tcp/127.0.0.1:27448".parse().unwrap(), - ]; + config + .connect + .endpoints + .set(vec![ + "tcp/127.0.0.1:27447".parse().unwrap(), + "tcp/127.0.0.1:27448".parse().unwrap(), + ]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][03a] Opening s03 session"); let s03 = ztimeout!(zenoh::open(config)).unwrap(); @@ -66,7 +82,11 @@ async fn open_router_session() -> Session { // Open the sessions let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][00a] Opening router session"); ztimeout!(zenoh::open(config)).unwrap() diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index e69dd3d263..71fa0bce34 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -37,8 +37,6 @@ lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); -const DEFAULT_LISTENER: &str = "tcp/[::]:7447"; - #[derive(Debug, Parser)] #[command(version=GIT_VERSION, long_version=LONG_VERSION.as_str(), about="The zenoh router")] struct Args { @@ -168,7 +166,8 @@ fn config_from_args(args: &Args) -> Config { if !args.connect.is_empty() { config .connect - .set_endpoints( + .endpoints + .set( args.connect .iter() .map(|v| match v.parse::() { @@ -184,7 +183,8 @@ fn config_from_args(args: &Args) -> Config { if !args.listen.is_empty() { config .listen - .set_endpoints( + .endpoints + .set( args.listen .iter() .map(|v| match v.parse::() { @@ -197,12 +197,6 @@ fn config_from_args(args: &Args) -> Config { ) .unwrap(); } - if config.listen.endpoints.is_empty() { - config - .listen - .endpoints - .push(DEFAULT_LISTENER.parse().unwrap()) - } if args.no_timestamp { config .timestamping From 54d12e1b57b1aee2f65c92d43408e930f76e4690 Mon Sep 17 00:00:00 2001 From: Julien Enoch Date: Fri, 5 Jul 2024 09:49:04 +0200 Subject: [PATCH 18/29] Bump uhlc to 0.8.0 - changing formatting of Timestamps as Strings (#1218) * Bump uhlc to 0.8.0 * Remove irrelevant tests in storages replication * Convert Timestamps in some unit tests * Make cargo fmt happy --- Cargo.lock | 4 +- Cargo.toml | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 6 +- .../src/replica/digest.rs | 381 ------------------ .../tests/operations.rs | 9 +- .../tests/wildcard.rs | 15 +- 6 files changed, 14 insertions(+), 403 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5344e0135c..3438a2d2c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4715,9 +4715,9 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uhlc" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b6df3f3e948b40e20c38a6d1fd6d8f91b3573922fc164e068ad3331560487e" +checksum = "79ac3c37bd9506595768f0387bd39d644525728b4a1d783218acabfb56356db7" dependencies = [ "humantime", "lazy_static", diff --git a/Cargo.toml b/Cargo.toml index a1820cb495..ce8ae1643d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -171,7 +171,7 @@ tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) thread-priority = "1.1.0" typenum = "1.16.0" -uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates +uhlc = { version = "0.8.0", default-features = false } # Default features are disabled due to usage in no_std crates unwrap-infallible = "0.1.5" unzip-n = "0.1.2" url = "2.3.1" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e3dcc0130e..d6db7c74cb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -56,7 +56,7 @@ struct JSONSample { key: String, value: serde_json::Value, encoding: String, - time: Option, + timestamp: Option, } pub fn base64_encode(data: &[u8]) -> String { @@ -100,7 +100,7 @@ fn sample_to_json(sample: &Sample) -> JSONSample { key: sample.key_expr().as_str().to_string(), value: payload_to_json(sample.payload(), sample.encoding()), encoding: sample.encoding().to_string(), - time: sample.timestamp().map(|ts| ts.to_string()), + timestamp: sample.timestamp().map(|ts| ts.to_string()), } } @@ -111,7 +111,7 @@ fn result_to_json(sample: Result<&Sample, &ReplyError>) -> JSONSample { key: "ERROR".into(), value: payload_to_json(err.payload(), err.encoding()), encoding: err.encoding().to_string(), - time: None, + timestamp: None, }, } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index bf06c61f25..07ba7e9ea3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -833,384 +833,3 @@ impl Digest { } } } - -#[test] -fn test_create_digest_empty_initial() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_hot() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634800, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Hot, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_warm() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634810, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Warm, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_cold() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634910, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_add_content() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - HashSet::new(), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_remove_content() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 3304302629246049840, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 8238986480495191270, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 12344398372324783476, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10007212639402189432, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_remove_digest() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let added = Digest::update_digest( - created.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_ne!(created, added); - - let removed = Digest::update_digest( - added.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - ); - assert_eq!(created, removed); - - let added_again = Digest::update_digest( - removed, - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_eq!(added, added_again); -} diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 505634e6fb..c1ed09b1a7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -86,8 +86,7 @@ async fn test_updates_in_order() { &session, "operation/test/a", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -102,8 +101,7 @@ async fn test_updates_in_order() { &session, "operation/test/b", "2", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -117,8 +115,7 @@ async fn test_updates_in_order() { delete_data( &session, "operation/test/a", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 04e4549508..d6e94ecb1f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -87,8 +87,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/*", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -102,8 +101,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/a", "2", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -119,8 +117,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/b", "3", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -150,8 +147,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/*", "4", - Timestamp::from_str("2022-01-17T10:43:12.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123832858541151/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -168,8 +164,7 @@ async fn test_wild_card_in_order() { delete_data( &session, "wild/test/*", - Timestamp::from_str("2022-01-17T13:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054170209915403359/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; From bf94b9bb137557c722382cb7ff329ed89111018f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Jul 2024 10:50:19 +0200 Subject: [PATCH 19/29] Add establishment trace logs --- .../src/unicast/establishment/accept.rs | 31 ++++++++++++++++--- .../src/unicast/establishment/open.rs | 12 +++++-- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 3f71d7b6da..64949357c6 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -163,6 +163,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept InitSyn: {}. Received: {:?}", + self.link, + msg + ); + let init_syn = match msg.body { TransportBody::InitSyn(init_syn) => init_syn, _ => { @@ -362,7 +368,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let cookie: ZSlice = encrypted.into(); // Send the message on the link - let message: TransportMessage = InitAck { + let msg: TransportMessage = InitAck { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, @@ -381,10 +387,16 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let _ = self .link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; + tracing::trace!( + "Establishment Accept InitAck: {}. Sent: {:?}", + self.link, + msg + ); + let output = SendInitAckOut { cookie_nonce, #[cfg(feature = "shared-memory")] @@ -405,6 +417,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept OpenSyn: {}. Received: {:?}", + self.link, + msg + ); + let open_syn = match msg.body { TransportBody::OpenSyn(open_syn) => open_syn, TransportBody::Close(Close { reason, .. }) => { @@ -594,7 +612,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Build OpenAck message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let open_ack = OpenAck { + let msg = OpenAck { lease: input.mine_lease, initial_sn: mine_initial_sn, ext_qos, @@ -607,8 +625,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { }; // Do not send the OpenAck right now since we might still incur in MAX_LINKS error + tracing::trace!( + "Establishment Accept OpenAck: {}. Sent: {:?}", + self.link, + msg + ); - let output = SendOpenAckOut { open_ack }; + let output = SendOpenAckOut { open_ack: msg }; Ok(output) } } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 9f6f2e61a7..a9e797228e 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -208,6 +208,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { } .into(); + tracing::trace!("Establishment Open InitSyn: {}. Sent: {:?}", link, msg); + let _ = link .send(&msg) .await @@ -229,6 +231,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open InitAck: {}. Received: {:?}", link, msg); + let init_ack = match msg.body { TransportBody::InitAck(init_ack) => init_ack, TransportBody::Close(Close { reason, .. }) => { @@ -414,7 +418,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Build and send an OpenSyn message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let message: TransportMessage = OpenSyn { + let msg: TransportMessage = OpenSyn { lease: input.mine_lease, initial_sn: mine_initial_sn, cookie: input.other_cookie, @@ -429,10 +433,12 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .into(); let _ = link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; + tracing::trace!("Establishment Open OpenSyn: {}. Sent: {:?}", link, msg); + let output = SendOpenSynOut { mine_initial_sn, #[cfg(feature = "shared-memory")] @@ -454,6 +460,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open OpenAck: {}. Received: {:?}", link, msg); + let open_ack = match msg.body { TransportBody::OpenAck(open_ack) => open_ack, TransportBody::Close(Close { reason, .. }) => { From c7e418f526277c3aa8f7810a006264208659772e Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Fri, 5 Jul 2024 11:22:26 +0200 Subject: [PATCH 20/29] fix: typos (#1220) * zenoh/src/api/handlers/ring.rs: synchrounous -> synchronous * zenoh/src/api/selector.rs: intendend -> intended Signed-off-by: Julien Loudet --- zenoh/src/api/handlers/ring.rs | 2 +- zenoh/src/api/selector.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 18ca6f495c..7b058d1905 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -24,7 +24,7 @@ use zenoh_result::ZResult; use super::{callback::Callback, Dyn, IntoHandler}; use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; -/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +/// A synchronous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 813ae0528d..d7b7466be2 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -26,7 +26,7 @@ use super::{key_expr::KeyExpr, queryable::Query}; /// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters -/// with a few intendend uses: +/// with a few intended uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters /// - filtering by value, /// - filtering by metadata, such as the timestamp of a value, From 47f5347c0aff93e61bdaa28b1c89425f04260ec3 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Jul 2024 12:31:56 +0200 Subject: [PATCH 21/29] Compute TCP/TLS MTU upon link creation --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 55 ++++++++++---------- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 54 +++++++++---------- 2 files changed, 53 insertions(+), 56 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 99d2d44c36..7532055f8e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -43,6 +43,8 @@ pub struct LinkUnicastTcp { // The destination socket address of this link (address used on the remote host) dst_addr: SocketAddr, dst_locator: Locator, + // The computed mtu + mtu: BatchSize, } unsafe impl Sync for LinkUnicastTcp {} @@ -71,6 +73,29 @@ impl LinkUnicastTcp { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TCP_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&socket); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tcp object LinkUnicastTcp { socket: UnsafeCell::new(socket), @@ -78,14 +103,10 @@ impl LinkUnicastTcp { src_locator: Locator::new(TCP_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), dst_addr, dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), + mtu, } } - #[cfg(target_family = "unix")] - fn get_socket(&self) -> &TcpStream { - unsafe { &*self.socket.get() } - } - #[allow(clippy::mut_from_ref)] fn get_mut_socket(&self) -> &mut TcpStream { unsafe { &mut *self.socket.get() } @@ -153,29 +174,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - #[allow(unused_mut)] // mut is not needed when target_family != unix - let mut mtu = *TCP_DEFAULT_MTU - header; - - // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(target_family = "unix")] - { - let socket = socket2::SockRef::from(self.get_socket()); - // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS - let mss = socket.mss().unwrap_or(mtu as u32) / 2; - // Compute largest multiple of TCP MSS that is smaller of default MTU - let mut tgt = mss; - while (tgt + mss) < mtu as u32 { - tgt += mss; - } - mtu = (mtu as u32).min(tgt) as BatchSize; - } - - mtu + self.mtu } #[inline(always)] diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 4ab21d9993..716eac2121 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -61,6 +61,7 @@ pub struct LinkUnicastTls { write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, auth_identifier: LinkAuthId, + mtu: BatchSize, } unsafe impl Send for LinkUnicastTls {} @@ -96,6 +97,29 @@ impl LinkUnicastTls { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TLS_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&tcp_stream); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tls object LinkUnicastTls { inner: UnsafeCell::new(socket), @@ -106,14 +130,10 @@ impl LinkUnicastTls { write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), auth_identifier, + mtu, } } - #[cfg(target_family = "unix")] - fn get_socket(&self) -> &TlsStream { - unsafe { &*self.inner.get() } - } - // NOTE: It is safe to suppress Clippy warning since no concurrent reads // or concurrent writes will ever happen. The read_mtx and write_mtx // are respectively acquired in any read and write operation. @@ -188,29 +208,7 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - #[allow(unused_mut)] // mut is not needed when target_family != unix - let mut mtu = *TLS_DEFAULT_MTU - header; - - // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(target_family = "unix")] - { - let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS - let mss = socket.mss().unwrap_or(mtu as u32) / 2; - // Compute largest multiple of TCP MSS that is smaller of default MTU - let mut tgt = mss; - while (tgt + mss) < mtu as u32 { - tgt += mss; - } - mtu = (mtu as u32).min(tgt) as BatchSize; - } - - mtu + self.mtu } #[inline(always)] From 2b2064cf180c86d8869a55fa49ce073ae880588c Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Fri, 5 Jul 2024 11:22:26 +0200 Subject: [PATCH 22/29] fix: typos (#1220) * zenoh/src/api/handlers/ring.rs: synchrounous -> synchronous * zenoh/src/api/selector.rs: intendend -> intended Signed-off-by: Julien Loudet --- zenoh/src/api/handlers/ring.rs | 2 +- zenoh/src/api/selector.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 18ca6f495c..7b058d1905 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -24,7 +24,7 @@ use zenoh_result::ZResult; use super::{callback::Callback, Dyn, IntoHandler}; use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; -/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +/// A synchronous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 813ae0528d..d7b7466be2 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -26,7 +26,7 @@ use super::{key_expr::KeyExpr, queryable::Query}; /// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters -/// with a few intendend uses: +/// with a few intended uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters /// - filtering by value, /// - filtering by metadata, such as the timestamp of a value, From 55557f943d97dc3db8b428cae930e98b163f988d Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Fri, 5 Jul 2024 13:59:59 +0000 Subject: [PATCH 23/29] =?UTF-8?q?remove=20`new=5Ftimestamp`=20fn=20,=20`ti?= =?UTF-8?q?me`=20module,=20reworked=20plugin=20storage=20=E2=80=A6=20(#118?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: ChenYing Kuo Co-authored-by: Luca Cominardi Co-authored-by: ChenYing Kuo (CY) --- .../src/replica/mod.rs | 6 ++--- .../src/replica/snapshotter.rs | 22 +++++++++-------- .../src/replica/storage.rs | 7 ++---- zenoh-ext/src/querying_subscriber.rs | 14 +++++++---- zenoh/src/api/mod.rs | 1 - zenoh/src/api/time.rs | 24 ------------------- zenoh/src/lib.rs | 2 -- 7 files changed, 27 insertions(+), 49 deletions(-) delete mode 100644 zenoh/src/api/time.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 114e5c206b..014fdc697e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -112,11 +112,10 @@ impl Replica { }; // Zid of session for generating timestamps - let zid = session.zid(); let replica = Replica { name: name.to_string(), - session, + session: session.clone(), key_expr: storage_config.key_expr.clone(), replica_config: storage_config.replica_config.clone().unwrap(), digests_published: RwLock::new(HashSet::new()), @@ -131,7 +130,8 @@ impl Replica { let config = replica.replica_config.clone(); // snapshotter - let snapshotter = Arc::new(Snapshotter::new(zid, rx_log, &startup_entries, &config).await); + let snapshotter = + Arc::new(Snapshotter::new(session, rx_log, &startup_entries, &config).await); // digest sub let digest_sub = replica.start_digest_sub(tx_digest).fuse(); // queryable for alignment diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index 6bb2cf113b..190cf6005b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,14 +24,14 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{key_expr::OwnedKeyExpr, session::ZenohId, time::Timestamp}; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, Session}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; pub struct Snapshotter { - // session id for timestamp generation - id: ZenohId, + // session ref for timestamp generation + session: Arc, // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, // configuration parameters of the replica @@ -57,7 +57,7 @@ pub struct ReplicationInfo { impl Snapshotter { // Initialize the snapshot parameters, logs and digest pub async fn new( - id: ZenohId, + session: Arc, rx_sample: Receiver<(OwnedKeyExpr, Timestamp)>, initial_entries: &Vec<(OwnedKeyExpr, Timestamp)>, replica_config: &ReplicaConfig, @@ -66,12 +66,12 @@ impl Snapshotter { // from initial entries, populate the log - stable and volatile // compute digest let (last_snapshot_time, last_interval) = Snapshotter::compute_snapshot_params( - id, + session.clone(), replica_config.propagation_delay, replica_config.delta, ); let snapshotter = Snapshotter { - id, + session, storage_update: rx_sample, replica_config: replica_config.clone(), content: ReplicationInfo { @@ -131,7 +131,7 @@ impl Snapshotter { let mut last_snapshot_time = self.content.last_snapshot_time.write().await; let mut last_interval = self.content.last_interval.write().await; let (time, interval) = Snapshotter::compute_snapshot_params( - self.id, + self.session.clone(), self.replica_config.propagation_delay, self.replica_config.delta, ); @@ -143,13 +143,15 @@ impl Snapshotter { } } + // TODO // Compute latest snapshot time and latest interval with respect to the current time pub fn compute_snapshot_params( - id: ZenohId, + session: Arc, propagation_delay: Duration, delta: Duration, ) -> (Timestamp, u64) { - let now = zenoh::time::new_timestamp(id); + let now = session.new_timestamp(); + let latest_interval = (now .get_time() .to_system_time() @@ -206,7 +208,7 @@ impl Snapshotter { // Create digest from the stable log at startup async fn initialize_digest(&self) { - let now = zenoh::time::new_timestamp(self.id); + let now = self.session.new_timestamp(); let replica_data = &self.content; let log_locked = replica_data.stable_log.read().await; let latest_interval = replica_data.last_interval.read().await; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f926417743..17be005f08 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -37,7 +37,7 @@ use zenoh::{ query::{ConsolidationMode, QueryTarget}, sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, session::{Session, SessionDeclarations}, - time::{new_timestamp, Timestamp, NTP64}, + time::{Timestamp, NTP64}, Result as ZResult, }; use zenoh_backend_traits::{ @@ -148,9 +148,6 @@ impl StorageService { ); t.add_async(gc).await; - // get session id for timestamp generation - let zid = self.session.info().zid().await; - // subscribe on key_expr let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, @@ -240,7 +237,7 @@ impl StorageService { continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_timestamp(zid)); + let timestamp = sample.timestamp().cloned().unwrap_or(self.session.new_timestamp()); let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index baf486601d..6134e4d2d7 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,7 @@ use std::{ future::{IntoFuture, Ready}, mem::swap, sync::{Arc, Mutex}, - time::Duration, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use zenoh::{ @@ -29,7 +29,7 @@ use zenoh::{ query::{QueryConsolidation, QueryTarget, ReplyKeyExpr, Selector}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, session::{SessionDeclarations, SessionRef}, - time::{new_timestamp, Timestamp}, + time::Timestamp, Error, Resolvable, Resolve, Result as ZResult, }; @@ -654,7 +654,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, TryIntoSample: ExtractSample + Send + Sync, { - let zid = conf.session.zid(); + let session_id = conf.session.zid(); + let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), @@ -672,9 +673,14 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { tracing::trace!( "Sample received while fetch in progress: push it to merge_queue" ); + // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let timestamp = s.timestamp().cloned().unwrap_or(new_timestamp(zid)); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let timestamp = s + .timestamp() + .cloned() + .unwrap_or(Timestamp::new(now, session_id.into())); state .merge_queue .push(SampleBuilder::from(s).timestamp(timestamp).into()); diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index 91ae6bed67..d3053cb3c9 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -35,5 +35,4 @@ pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; -pub(crate) mod time; pub(crate) mod value; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs deleted file mode 100644 index 1879143389..0000000000 --- a/zenoh/src/api/time.rs +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::time::{SystemTime, UNIX_EPOCH}; - -use zenoh_protocol::core::{Timestamp, TimestampId}; - -// TODO: Shall we remove this new_timestamp in favoir of the src/api/session::Session::new_timestamp(); -/// Generates a [`Timestamp`] with [`TimestampId`] and current system time -/// The [`TimestampId`] can be taken from session id returned by [`SessionInfo::zid()`](crate::api::info::SessionInfo::zid). -pub fn new_timestamp>(id: T) -> Timestamp { - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); - Timestamp::new(now, id.into()) -} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 77db49f525..024c1303af 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -353,8 +353,6 @@ pub mod liveliness { /// Timestamp support pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - - pub use crate::api::time::new_timestamp; } /// Configuration to pass to [`open`] and [`scout`] functions and associated constants From b2df2b711e12a20e3d0950bc4af019a4893842d6 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Jul 2024 17:05:33 +0200 Subject: [PATCH 24/29] Make adminspace return current metadata (#1221) --- zenoh/src/net/runtime/adminspace.rs | 5 +---- zenoh/src/net/runtime/mod.rs | 3 --- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 26807e8907..e2dad5c844 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -57,7 +57,6 @@ use crate::{ pub struct AdminContext { runtime: Runtime, version: String, - metadata: serde_json::Value, } type Handler = Arc; @@ -153,7 +152,6 @@ impl AdminSpace { let zid_str = runtime.state.zid.to_string(); let whatami_str = runtime.state.whatami.to_str(); let mut config = runtime.config().lock(); - let metadata = runtime.state.metadata.clone(); let root_key: OwnedKeyExpr = format!("@/{whatami_str}/{zid_str}").try_into().unwrap(); let mut handlers: HashMap<_, Handler> = HashMap::new(); @@ -221,7 +219,6 @@ impl AdminSpace { let context = Arc::new(AdminContext { runtime: runtime.clone(), version, - metadata, }); let admin = Arc::new(AdminSpace { zid: runtime.zid(), @@ -601,7 +598,7 @@ fn local_data(context: &AdminContext, query: Query) { let mut json = json!({ "zid": context.runtime.state.zid, "version": context.version, - "metadata": context.metadata, + "metadata": context.runtime.config().lock().metadata(), "locators": locators, "sessions": transports, "plugins": plugins, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 4f3c6974f7..b7ba0d11da 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -70,7 +70,6 @@ pub(crate) struct RuntimeState { zid: ZenohId, whatami: WhatAmI, next_id: AtomicU32, - metadata: serde_json::Value, router: Arc, config: Notifier, manager: TransportManager, @@ -138,7 +137,6 @@ impl RuntimeBuilder { tracing::info!("Using ZID: {}", zid); let whatami = unwrap_or_default!(config.mode()); - let metadata = config.metadata().clone(); let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); @@ -179,7 +177,6 @@ impl RuntimeBuilder { zid: zid.into(), whatami, next_id: AtomicU32::new(1), // 0 is reserved for routing core - metadata, router, config: config.clone(), manager: transport_manager, From cae8697e7da3f5d3ec75d0dd92d6180196b00efc Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Fri, 5 Jul 2024 15:09:30 +0000 Subject: [PATCH 25/29] simplify timestamp id, remove allocation. (#1223) --- zenoh/src/api/session.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 2c50560d77..f5890edc3a 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -675,9 +675,15 @@ impl Session { /// # } /// ``` pub fn new_timestamp(&self) -> Timestamp { - let id = self.runtime.zid(); - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here - Timestamp::new(now, id.into()) + match self.hlc() { + Some(hlc) => hlc.new_timestamp(), + None => { + // Called in the case that the runtime is not initialized with an hlc + // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + Timestamp::new(now, self.runtime.zid().into()) + } + } } } From 418b5a628c7587815eae593a8df32aa5e9f45983 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 5 Jul 2024 22:03:38 +0200 Subject: [PATCH 26/29] Fix typos ("nof" -> "not") (#1227) --- zenoh/src/net/routing/dispatcher/queries.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 6ce9046a4a..445f138d8d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -722,7 +722,7 @@ pub(crate) fn route_send_response( )); } None => tracing::warn!( - "Route reply {}:{} from {}: Query nof found!", + "Route reply {}:{} from {}: Query not found!", face, qid, face @@ -748,7 +748,7 @@ pub(crate) fn route_send_response_final( finalize_pending_query(query); } None => tracing::warn!( - "Route final reply {}:{} from {}: Query nof found!", + "Route final reply {}:{} from {}: Query not found!", face, qid, face From 6df74c7bebe216ebfe792972052cfc16f98b0e99 Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Sun, 7 Jul 2024 10:40:31 +0200 Subject: [PATCH 27/29] fix(storage-manager): do not start when 'timestamping' is disabled (#1219) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All storage must have a timestamp associated with a Sample. As it is possible to publish without adding a timestamp, it means that a Zenoh node must add this timestamp "at some point". Up until now, the default configuration of a router ('timestamping' enabled) combined with the fact that only routers could load plugins (and, thus, storage) made it so that a timestamp was (by default) always added. Recent changes in Zenoh — namely the fact that not only routers can load plugins and that peers and client have, by default, the 'timestamping' configuration disabled — invalidate these assumptions. We should then enforce at runtime, that the 'timestamping' configuration is enabled when attempting to load the storage manager. This commit adds this check by verifying that there is an HLC associated with the Zenoh Session — the HLC is only created if 'timestamping' is enabled (see `zenoh/zenoh/src/net/runtime/mod.rs::142`). * plugins/zenoh-plugin-storage-manager/src/lib.rs: return an error if the storage manager is started while the configuration option 'timestamping' is disabled. * plugins/zenoh-plugin-storage-manager/tests/operations.rs: updated the `config` used in the test to enable 'timestamping'. * plugins/zenoh-plugin-storage-manager/tests/wildcard.rs: updated the `config` used in the test to enable 'timestamping'. Signed-off-by: Julien Loudet --- plugins/zenoh-plugin-storage-manager/src/lib.rs | 17 +++++++++++++++++ .../tests/operations.rs | 12 ++++++++++++ .../tests/wildcard.rs | 12 ++++++++++++ 3 files changed, 41 insertions(+) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index c916b649d9..7399d3e507 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -31,6 +31,7 @@ use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ internal::{ + bail, plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, runtime::Runtime, zlock, LibLoader, @@ -120,6 +121,22 @@ impl StorageRuntimeInner { let session = Arc::new(zenoh::session::init(runtime.clone()).wait()?); + // NOTE: All storage **must** have a timestamp associated with a Sample. Considering that it is possible to make + // a publication without associating a timestamp, that means that the node managing the storage (be it a + // Zenoh client / peer / router) has to add it. + // + // If the `timestamping` configuration setting is disabled then there is no HLC associated with the + // Session. That eventually means that no timestamp can be generated which goes against the previous + // requirement. + // + // Hence, in that scenario, we refuse to start the storage manager and any storage. + if session.hlc().is_none() { + tracing::error!( + "Cannot start storage manager (and thus any storage) without the 'timestamping' setting enabled in the Zenoh configuration" + ); + bail!("Cannot start storage manager, 'timestamping' is disabled in the configuration"); + } + // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. let mut new_self = StorageRuntimeInner { diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index c1ed09b1a7..d8ada83e4c 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -70,6 +70,18 @@ async fn test_updates_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index d6e94ecb1f..d1633a28d4 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -71,6 +71,18 @@ async fn test_wild_card_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() From 12b11ee6f2f1a9390dccfeb3633be2f88b568a50 Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Mon, 8 Jul 2024 15:08:09 +0200 Subject: [PATCH 28/29] fix(storage-manager): validate presence of timestamp (#1229) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces checks before accessing the `timestamp` associated with a Sample — instead of calling `unwrap()`. In theory, a Sample should never arrive to a Storage without a Timestamp. In practice, we cannot guarantee this invariant with certainty (future modifications of the code base?). With these checks, the Storage will simply discard the Sample instead of panicking the entire storage manager. * plugins/zenoh-plugin-storage-manager/src/replica/storage.rs: add checks when accessing the timestamp and remove `unwrap`. Signed-off-by: Julien Loudet --- .../src/replica/storage.rs | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 17be005f08..d12b51042c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -271,6 +271,17 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { tracing::trace!("[STORAGE] Processing sample: {:?}", sample); + + // A Sample, in theory, will not arrive to a Storage without a Timestamp. This check (which, again, should + // never enter the `None` branch) ensures that the Storage Manager does not panic even if it ever happens. + let sample_timestamp = match sample.timestamp() { + Some(timestamp) => timestamp, + None => { + tracing::error!("Discarding Sample that has no Timestamp: {:?}", sample); + return; + } + }; + // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -288,12 +299,10 @@ impl StorageService { ); for k in matching_keys { - if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) - .await + if !self.is_deleted(&k.clone(), sample_timestamp).await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, sample_timestamp).await)) { tracing::trace!( "Sample `{:?}` identified as needed processing for key {}", @@ -302,9 +311,8 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = if let Some(update) = self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await + let sample_to_store: Sample = if let Some(update) = + self.ovderriding_wild_update(&k, sample_timestamp).await { match update.kind { SampleKind::Put => { @@ -323,6 +331,16 @@ impl StorageService { .into() }; + // A Sample that is to be stored **must** have a Timestamp. In theory, the Sample generated should have + // a Timestamp and, in theory, this check is unneeded. + let sample_to_store_timestamp = match sample_to_store.timestamp() { + Some(timestamp) => *timestamp, + None => { + tracing::error!("Discarding `Sample` generated through `SampleBuilder` that has no Timestamp: {:?}", sample_to_store); + continue; + } + }; + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { @@ -340,16 +358,15 @@ impl StorageService { sample_to_store.payload().clone(), sample_to_store.encoding().clone(), ), - *sample_to_store.timestamp().unwrap(), + sample_to_store_timestamp, ) .await } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; + self.mark_tombstone(&k, sample_to_store_timestamp).await; storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) + .delete(stripped_key, sample_to_store_timestamp) .await } }; @@ -363,7 +380,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store_timestamp)); match sending { Ok(_) => (), Err(e) => { From 9e1c4a8680a04b34f09dd2776d07958c8aae6f92 Mon Sep 17 00:00:00 2001 From: Diogo Matsubara Date: Mon, 8 Jul 2024 17:55:56 +0200 Subject: [PATCH 29/29] fix: Update zenoh-macros category (#1232) --- commons/zenoh-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-macros/Cargo.toml b/commons/zenoh-macros/Cargo.toml index 7d06482e48..6adfe63deb 100644 --- a/commons/zenoh-macros/Cargo.toml +++ b/commons/zenoh-macros/Cargo.toml @@ -20,7 +20,7 @@ homepage = { workspace = true } authors = { workspace = true } edition = { workspace = true } license = { workspace = true } -categories = ["proc-macros"] +categories = ["development-tools::procedural-macro-helpers"] description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html