From de7822157df70cdff2dbfa99a6fb8471e7d336e4 Mon Sep 17 00:00:00 2001 From: Tavo Annus Date: Mon, 10 Jun 2024 13:19:33 +0300 Subject: [PATCH] Add typos check to CI (#1065) * Fix typos * Add typos check to CI --- .github/workflows/ci.yml | 12 +++++- DEFAULT_CONFIG.json5 | 18 ++++----- README.md | 6 +-- _typos.toml | 11 +++++ commons/zenoh-codec/src/core/zint.rs | 4 +- commons/zenoh-collections/src/properties.rs | 2 +- commons/zenoh-config/src/connection_retry.rs | 2 +- commons/zenoh-config/src/include.rs | 2 +- commons/zenoh-config/src/lib.rs | 14 +++---- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 12 +++--- .../zenoh-keyexpr/src/key_expr/format/mod.rs | 4 +- .../src/key_expr/intersect/classical.rs | 2 +- .../src/keyexpr_tree/arc_tree.rs | 4 +- commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- .../src/keyexpr_tree/traits/mod.rs | 2 +- commons/zenoh-macros/src/lib.rs | 2 +- commons/zenoh-protocol/src/core/encoding.rs | 2 +- commons/zenoh-protocol/src/core/resolution.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 2 +- commons/zenoh-protocol/src/network/declare.rs | 4 +- commons/zenoh-protocol/src/transport/close.rs | 2 +- .../zenoh-protocol/src/transport/fragment.rs | 2 +- commons/zenoh-protocol/src/transport/frame.rs | 2 +- .../zenoh-protocol/src/transport/keepalive.rs | 2 +- commons/zenoh-protocol/src/transport/open.rs | 2 +- commons/zenoh-protocol/src/zenoh/put.rs | 2 +- commons/zenoh-protocol/src/zenoh/reply.rs | 2 +- commons/zenoh-shm/src/lib.rs | 4 +- commons/zenoh-sync/src/condition.rs | 4 +- commons/zenoh-util/src/std_only/lib_loader.rs | 4 +- commons/zenoh-util/src/std_only/mod.rs | 2 +- commons/zenoh-util/src/std_only/net/mod.rs | 12 +++--- commons/zenoh-util/src/std_only/time_range.rs | 4 +- commons/zenoh-util/src/std_only/timer.rs | 4 +- examples/README.md | 4 +- examples/examples/z_pub_shm.rs | 6 +-- examples/examples/z_sub_thr.rs | 8 ++-- .../zenoh-link-udp/src/multicast.rs | 2 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 20 +++++----- .../zenoh-link-unixsock_stream/src/unicast.rs | 2 +- io/zenoh-transport/src/common/batch.rs | 2 +- io/zenoh-transport/src/common/pipeline.rs | 2 +- io/zenoh-transport/src/common/seq_num.rs | 4 +- io/zenoh-transport/src/manager.rs | 2 +- io/zenoh-transport/src/multicast/rx.rs | 12 +++--- .../src/unicast/establishment/accept.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/replica/storage.rs | 2 +- plugins/zenoh-plugin-trait/src/lib.rs | 4 +- plugins/zenoh-plugin-trait/src/vtable.rs | 2 +- zenoh-ext/examples/examples/README.md | 2 +- zenoh-ext/examples/examples/z_pub_cache.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 8 ++-- zenoh-ext/src/subscriber_ext.rs | 12 +++--- zenoh/src/info.rs | 2 +- zenoh/src/key_expr.rs | 6 +-- zenoh/src/lib.rs | 2 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 2 +- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/net/routing/dispatcher/resource.rs | 40 +++++++++---------- .../src/net/routing/hat/linkstate_peer/mod.rs | 6 +-- .../net/routing/hat/linkstate_peer/network.rs | 27 +++++++------ .../net/routing/hat/linkstate_peer/pubsub.rs | 32 +++++++-------- .../net/routing/hat/linkstate_peer/queries.rs | 32 +++++++-------- zenoh/src/net/routing/hat/router/mod.rs | 6 +-- zenoh/src/net/routing/hat/router/network.rs | 27 +++++++------ zenoh/src/net/routing/hat/router/pubsub.rs | 32 +++++++-------- zenoh/src/net/routing/hat/router/queries.rs | 32 +++++++-------- .../net/routing/interceptor/downsampling.rs | 4 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/net/runtime/mod.rs | 12 +++--- zenoh/src/net/runtime/orchestrator.rs | 8 ++-- zenoh/src/net/tests/tables.rs | 20 +++++----- zenoh/src/plugins/sealed.rs | 6 +-- zenoh/src/publication.rs | 2 +- zenoh/src/sample.rs | 4 +- zenoh/src/selector.rs | 4 +- zenoh/src/session.rs | 22 +++++----- zenoh/src/subscriber.rs | 4 +- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 88 files changed, 317 insertions(+), 294 deletions(-) create mode 100644 _typos.toml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2aaf1b076..6320464db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,6 +133,16 @@ jobs: run: ci/valgrind-check/run.sh shell: bash + typos: + name: Typos Check + runs-on: ubuntu-latest + steps: + - name: Clone this repository + uses: actions/checkout@v4 + + - name: Check spelling + uses: crate-ci/typos@master + # NOTE: In GitHub repository settings, the "Require status checks to pass # before merging" branch protection rule ensures that commits are only merged # from branches where specific status checks have passed. These checks are @@ -141,7 +151,7 @@ jobs: ci: name: CI status checks runs-on: ubuntu-latest - needs: [check, test, valgrind] + needs: [check, test, valgrind, typos] if: always() steps: - name: Check whether all jobs pass diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index ab1201a6a..b33dbeb8c 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -40,7 +40,7 @@ exit_on_failure: { router: false, peer: false, client: true }, /// connect establishing retry configuration retry: { - /// intial wait timeout until next connect try + /// initial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try period_max_ms: 4000, @@ -73,7 +73,7 @@ exit_on_failure: true, /// listen retry configuration retry: { - /// intial wait timeout until next try + /// initial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try period_max_ms: 4000, @@ -108,8 +108,8 @@ gossip: { /// Whether gossip scouting is enabled or not enabled: true, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -267,7 +267,7 @@ /// set the actual keep_alive interval to one fourth of the lease time: i.e. send /// 4 keep_alive messages in a lease period. Changing the lease time will have the /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity /// check which considers a link as failed when no messages are received in 3.5 times the /// target interval. keep_alive: 4, @@ -293,7 +293,7 @@ background: 4, }, /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. congestion_control: { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -308,7 +308,7 @@ rx: { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: 65535, @@ -345,7 +345,7 @@ enabled: false, }, auth: { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. usrpwd: { user: null, @@ -398,7 +398,7 @@ // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively // /// This is used in the 'storage_manager' which supports subplugins, each with it's own config // /// - // /// See below exapmle of plugin configuration using `__config__` property + // /// See below example of plugin configuration using `__config__` property // // /// Configure the REST API plugin // rest: { diff --git a/README.md b/README.md index b09ea73d8..af08db726 100644 --- a/README.md +++ b/README.md @@ -62,9 +62,9 @@ Then you can start run `zenohd`. ## How to build it > [!WARNING] -> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in mantaining compatibility between the various git repositories in the Zenoh project. +> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in maintaining compatibility between the various git repositories in the Zenoh project. -Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be succesfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: +Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash $ rustup update @@ -170,7 +170,7 @@ See other examples of Zenoh usage in [examples/](examples) * `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: - a port number - a string with format `:` (to bind the HTTP server to a specific interface) - - `"None"` to desactivate the REST plugin + - `"None"` to deactivate the REST plugin If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 000000000..eb9952004 --- /dev/null +++ b/_typos.toml @@ -0,0 +1,11 @@ +[files] +extend-exclude = [ + # Ignore all files in transport tests as they contain + # hashes that are treated as typos. + "io/zenoh-transport/tests/*.rs", +] + + +[default.extend-words] +mis = "mis" # mismatch +thr = "thr" # throughput diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 1c2f5a28e..8167d895c 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -274,7 +274,7 @@ zint_impl!(usize); // // guarantees at this point that `x` is never `0`. Since `x` is 64bit, // // then `n` is guaranteed to have a value between 1 and 8, both inclusives. // // `into` is guaranteed to be exactly 9 bytes long. Therefore, copying at most -// // 8 bytes with a pointer offest of 1 is actually safe. +// // 8 bytes with a pointer offset of 1 is actually safe. // let n = 8 - (x.leading_zeros() / 8) as usize; // unsafe { // core::ptr::copy_nonoverlapping( @@ -348,7 +348,7 @@ zint_impl!(usize); // macro_rules! non_zero_array { // ($($i: expr,)*) => { -// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros litteral")}),*] +// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros literal")}),*] // }; // } diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 281ac8ca6..6a3c96241 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -24,7 +24,7 @@ const COMMENT_PREFIX: char = '#'; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimed. +/// and `=` as separator between a key and its value. Keys and values are trimmed. #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties(HashMap); diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index a845fbfe6..e1d383749 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -27,7 +27,7 @@ use crate::mode_dependent::*; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { - // intial wait timeout until next try + // initial wait timeout until next try pub period_init_ms: Option>, // maximum wait timeout until next try pub period_max_ms: Option>, diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs index 709cd7c29..b89d78d1c 100644 --- a/commons/zenoh-config/src/include.rs +++ b/commons/zenoh-config/src/include.rs @@ -65,7 +65,7 @@ pub(crate) fn recursive_include

( where P: AsRef, { - // if include property is present, read the file and remove properites found in file from values + // if include property is present, read the file and remove properties found in file from values let include_object = if let Some(include_path) = values.get(include_property_name) { let Some(include_path) = include_path.as_str() else { bail!( diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index f40a52832..c54d75a82 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -153,7 +153,7 @@ pub trait ConfigValidator: Send + Sync { } } -// Necessary to allow to set default emplty weak referece value to plugin.validator field +// Necessary to allow to set default emplty weak reference value to plugin.validator field // because empty weak value is not allowed for Arc impl ConfigValidator for () {} @@ -267,8 +267,8 @@ validated_struct::validator! { GossipConf { /// Whether gossip scouting is enabled or not. enabled: Option, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -375,7 +375,7 @@ validated_struct::validator! { sequence_number_resolution: Bits where (sequence_number_resolution_validator), /// Link lease duration in milliseconds (default: 10000) lease: u64, - /// Number fo keep-alive messages in a link lease duration (default: 4) + /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, /// Zenoh's MTU equivalent (default: 2^16-1) batch_size: BatchSize, @@ -396,7 +396,7 @@ validated_struct::validator! { background: usize, } where (queue_size_validator), /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. pub congestion_control: CongestionControlConf { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -412,7 +412,7 @@ validated_struct::validator! { pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: usize, @@ -455,7 +455,7 @@ validated_struct::validator! { }, pub auth: #[derive(Default)] AuthConf { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. pub usrpwd: #[derive(Default)] UsrPwdConf { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 85b4ef79e..429188349 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -107,7 +107,7 @@ impl keyexpr { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -137,7 +137,7 @@ impl keyexpr { /// /// NOTE: this operation can typically be used in a backend implementation, at creation of a Storage to get the keys prefix, /// and then in `zenoh_backend_traits::Storage::on_sample()` this prefix has to be stripped from all received - /// `Sample::key_expr` to retrieve the corrsponding key. + /// `Sample::key_expr` to retrieve the corresponding key. /// /// # Examples: /// ``` @@ -172,12 +172,12 @@ impl keyexpr { } /// Remove the specified `prefix` from `self`. - /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the begining of the `self` key expression. + /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the beginning of the `self` key expression. /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` /// So the result is `["*", "**/c/*"]`. - /// If `prefix` cannot match the begining of `self`, an empty list is reuturned. + /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// /// See below more examples. /// @@ -581,7 +581,7 @@ enum KeyExprConstructionError { LoneDollarStar = -1, SingleStarAfterDoubleStar = -2, DoubleStarAfterDoubleStar = -3, - EmpyChunk = -4, + EmptyChunk = -4, StarsInChunk = -5, DollarAfterDollarOrStar = -6, ContainsSharpOrQMark = -7, @@ -595,7 +595,7 @@ impl<'a> TryFrom<&'a str> for &'a keyexpr { let mut in_big_wild = false; for chunk in value.split('/') { if chunk.is_empty() { - bail!((KeyExprConstructionError::EmpyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) + bail!((KeyExprConstructionError::EmptyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) } if chunk == "$*" { bail!((KeyExprConstructionError::LoneDollarStar) diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index 3a03d8a51..bf5536ec6 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -23,7 +23,7 @@ //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. //! These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -//! - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +//! - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. //! - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. //! - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. //! @@ -73,7 +73,7 @@ use support::{IterativeConstructor, Spec}; /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. /// These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -/// - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +/// - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. /// - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. /// - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. /// diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index fa346a2d4..cc28ef2c4 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -110,7 +110,7 @@ fn it_intersect(mut it1: &[u8], mut it2: &[u8]) -> bool { } (it1.is_empty() || it1 == b"**") && (it2.is_empty() || it2 == b"**") } -/// Retruns `true` if the given key expressions intersect. +/// Returns `true` if the given key expressions intersect. /// /// I.e. if it exists a resource key (with no wildcards) that matches /// both given key expressions. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index a0428ac56..dfb7e6826 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -88,10 +88,10 @@ impl< /// # Type inference papercut /// Despite some of `KeArcTree`'s generic parameters having default values, those are only taken into /// account by the compiler when a type is named with some parameters omitted, and not when a type is - /// infered with the same parameters unconstrained. + /// inferred with the same parameters unconstrained. /// /// The simplest way to resolve this is to eventually assign to tree part of the return value - /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be infered). + /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be inferred). pub fn new() -> Result<(Self, DefaultToken), ::ConstructionError> { let token = DefaultToken::new()?; Ok((Self::with_token(&token), token)) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index e2833a912..5d7991289 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -42,7 +42,7 @@ //! KeTrees were designed to maximize code reuse. As such, their core properties are reflected through the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. //! //! KeTrees are made up of node, where nodes may or may not have a value (called `weight`) associated with them. To access these weighs, as well as other -//! properties of a node, you can go throught the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. +//! properties of a node, you can go through the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. //! //! # Iterators //! KeTrees provide iterators for the following operations: diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index dd06cf14b..cee2bd916 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -192,7 +192,7 @@ pub trait IKeyExprTreeMut<'a, Weight>: IKeyExprTree<'a, Weight> { self.prune_where(|node| node.weight().is_none()) } } -/// The basic operations of a KeTree when a Token is necessary to acess data. +/// The basic operations of a KeTree when a Token is necessary to access data. pub trait ITokenKeyExprTree<'a, Weight, Token> { /// An immutable guard to a node of the tree. type Node: IKeyExprTreeNode; diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 774bebc80..3118399dc 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -451,7 +451,7 @@ mod zenoh_runtime_derive; use syn::DeriveInput; use zenoh_runtime_derive::{derive_generic_runtime_param, derive_register_param}; -/// Make the underlying struct `Param` be generic over any `T` satifying a generated `trait DefaultParam { fn param() -> Param; }` +/// Make the underlying struct `Param` be generic over any `T` satisfying a generated `trait DefaultParam { fn param() -> Param; }` /// ```rust,ignore /// #[derive(GenericRuntimeParam)] /// struct Param { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index f202b8e79..b0b089d9b 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -148,7 +148,7 @@ impl Encoding { } /// Returns `true`if the string representation of this encoding starts with - /// the string representation of ther given encoding. + /// the string representation of their given encoding. pub fn starts_with(&self, with: T) -> bool where T: Into, diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index 093fd33bb..bfce6c646 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -111,7 +111,7 @@ impl fmt::Display for Bits { } #[repr(u8)] -// The value indicates the bit offest +// The value indicates the bit offset #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Field { FrameSN = 0, diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 2e1a2fa7c..074aae49a 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -73,7 +73,7 @@ pub const VERSION: u8 = 0x08; // # Array field // // An array contains a fixed number of elements whose number is known a priori or indicated by -// another field. Each element can be either a single byte field or a variable legnth field. +// another field. Each element can be either a single byte field or a variable length field. // // ```text // 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 76415d52f..396caf187 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -693,8 +693,8 @@ pub mod interest { /// /// The DECLARE INTEREST message is sent to request the transmission of existing and future /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to - /// request the transmisison of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to - /// mark the end of the transmission of exisiting matching declarations. + /// request the transmission of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to + /// mark the end of the transmission of existing matching declarations. /// /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: /// diff --git a/commons/zenoh-protocol/src/transport/close.rs b/commons/zenoh-protocol/src/transport/close.rs index 4e760400b..b93fe6d6b 100644 --- a/commons/zenoh-protocol/src/transport/close.rs +++ b/commons/zenoh-protocol/src/transport/close.rs @@ -16,7 +16,7 @@ /// /// The [`Close`] message is sent in any of the following two cases: /// 1) in response to an INIT or OPEN message which are not accepted; -/// 2) at any time to arbitrarly close the transport with the corresponding zenoh node. +/// 2) at any time to arbitrarily close the transport with the corresponding zenoh node. /// /// The [`Close`] message flow is the following: /// diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 3e80c9cfb..5af22db4f 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -18,7 +18,7 @@ use zenoh_buffers::ZSlice; /// # Fragment message /// /// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] -/// that require fragmentation because they are larger thatn the maximum batch size +/// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// /// The [`Fragment`] message flow is the following: diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 184784f9f..7afce036c 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -20,7 +20,7 @@ use alloc::vec::Vec; /// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the /// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. -/// The [`Frame`] message is used as means to aggreate multiple +/// The [`Frame`] message is used as means to aggregate multiple /// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. diff --git a/commons/zenoh-protocol/src/transport/keepalive.rs b/commons/zenoh-protocol/src/transport/keepalive.rs index 927b0cd46..cc9ccfad9 100644 --- a/commons/zenoh-protocol/src/transport/keepalive.rs +++ b/commons/zenoh-protocol/src/transport/keepalive.rs @@ -49,7 +49,7 @@ /// /// NOTE: In order to consider eventual packet loss, transmission latency and jitter, the time /// interval between two subsequent [`KeepAlive`] messages SHOULD be set to one fourth of -/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continous +/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continuous /// connectivity check which considers a link as failed when no messages are received in /// 3.5 times the target keep alive interval. /// diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index d793671b0..f899e8cc2 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -17,7 +17,7 @@ use zenoh_buffers::ZSlice; /// # Open message /// -/// After having succesfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, +/// After having successfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, /// the OPEN message is sent on a link to finalize the initialization of the link and /// associated transport with a zenoh node. /// For convenience, we call [`OpenSyn`] and [`OpenAck`] an OPEN message with the A flag diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 14674e9ad..ac18aaf00 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -66,7 +66,7 @@ pub mod ext { pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x2, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 2395e1e9b..0cdbcd2cd 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -74,7 +74,7 @@ pub mod ext { pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x3, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 82f361438..a75e17448 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -62,7 +62,7 @@ impl PartialEq for Chunk { } } -/// Informations about a [`SharedMemoryBuf`]. +/// Information about a [`SharedMemoryBuf`]. /// /// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] @@ -274,7 +274,7 @@ impl fmt::Debug for SharedMemoryReader { /// A shared memory segment manager. /// -/// Allows to access a shared memory segment and reserve some parts of this segment for writting. +/// Allows to access a shared memory segment and reserve some parts of this segment for writing. pub struct SharedMemoryManager { segment_path: String, size: usize, diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 098aa0541..ba615d888 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -13,7 +13,7 @@ // use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; -use tokio::sync::MutexGuard as AysncMutexGuard; +use tokio::sync::MutexGuard as AsyncMutexGuard; pub type ConditionWaiter = Pin>; /// This is a Condition Variable similar to that provided by POSIX. @@ -44,7 +44,7 @@ impl Condition { /// Waits for the condition to be notified #[inline] - pub async fn wait(&self, guard: AysncMutexGuard<'_, T>) { + pub async fn wait(&self, guard: AsyncMutexGuard<'_, T>) { let listener = self.event.listen(); drop(guard); listener.await; diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 9c682e434..4f3621e1c 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -29,7 +29,7 @@ zconfigurable! { pub static ref LIB_DEFAULT_SEARCH_PATHS: String = ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib".to_string(); } -/// LibLoader allows search for librairies and to load them. +/// LibLoader allows search for libraries and to load them. #[derive(Clone, Debug)] pub struct LibLoader { search_paths: Vec, @@ -142,7 +142,7 @@ impl LibLoader { bail!("Library file '{}' not found", filename) } - /// Search and load all librairies with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. + /// Search and load all libraries with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. /// The result is a list of tuple with: /// * the [Library] /// * its full path diff --git a/commons/zenoh-util/src/std_only/mod.rs b/commons/zenoh-util/src/std_only/mod.rs index 1cb406374..bfd24b652 100644 --- a/commons/zenoh-util/src/std_only/mod.rs +++ b/commons/zenoh-util/src/std_only/mod.rs @@ -8,7 +8,7 @@ pub use timer::*; pub mod log; pub use log::*; -/// The "ZENOH_HOME" environement variable name +/// The "ZENOH_HOME" environment variable name pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index 83ab08d67..239cdd664 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -24,7 +24,7 @@ zconfigurable! { } #[cfg(windows)] -unsafe fn get_adapters_adresses(af_spec: i32) -> ZResult> { +unsafe fn get_adapters_addresses(af_spec: i32) -> ZResult> { use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; let mut ret; @@ -81,7 +81,7 @@ pub fn get_interface(name: &str) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -165,7 +165,7 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -245,7 +245,7 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -284,7 +284,7 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -327,7 +327,7 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 50e5542fc..886083b2f 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -41,7 +41,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// - the "offset" syntax, which is written `now()`, and allows to specify a target instant as /// an offset applied to an instant of evaluation. These offset are resolved at the evaluation site. /// -/// In range syntax, omiting `` and/or `` implies that the range is unbounded in that direction. +/// In range syntax, omitting `` and/or `` implies that the range is unbounded in that direction. /// /// Exclusive bounds are represented by their respective delimiters pointing towards the exterior. /// Interior bounds are represented by the opposite. @@ -283,7 +283,7 @@ impl TimeExpr { }), } } - /// Substracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible + /// Subtracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible /// because the result would be outside the bounds of the underlying data structure (see [`SystemTime::checked_sub`]). /// Otherwise returns `Some(time_expr)`. pub fn checked_sub(&self, duration: f64) -> Option { diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/std_only/timer.rs index 6e7dde065..e6eefd933 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/std_only/timer.rs @@ -86,8 +86,8 @@ impl Eq for TimedEvent {} impl Ord for TimedEvent { fn cmp(&self, other: &Self) -> ComparisonOrdering { // The usual cmp is defined as: self.when.cmp(&other.when) - // This would make the events odered from largets to the smallest in the heap. - // However, we want the events to be ordered from the smallets to the largest. + // This would make the events ordered from largest to the smallest in the heap. + // However, we want the events to be ordered from the smallest to the largest. // As a consequence of this, we swap the comparison terms, converting the heap // from a max-heap into a min-heap. other.when.cmp(&self.when) diff --git a/examples/README.md b/examples/README.md index 0d38e3218..bd846a14f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -213,7 +213,7 @@ Declares a liveliness token on a given key expression (`group1/zenoh-rs` by default). This token will be seen alive byt the `z_get_liveliness` and `z_sub_liveliness` until - user explicitely drops the token by pressing `'d'` or implicitely dropped by terminating + user explicitly drops the token by pressing `'d'` or implicitly dropped by terminating or killing the `z_liveliness` example. Typical usage: @@ -245,7 +245,7 @@ liveliness tokens being dropped) that match a given key expression (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. - Note: the `z_sub_liveliness` example will not receive informations about + Note: the `z_sub_liveliness` example will not receive information about matching liveliness tokens that were alive before it's start. Typical usage: diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 542cff3b6..3601680bf 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -51,7 +51,7 @@ async fn main() -> Result<(), zenoh::Error> { Err(_) => { tokio::time::sleep(Duration::from_millis(100)).await; println!( - "Afer failing allocation the GC collected: {} bytes -- retrying", + "After failing allocation the GC collected: {} bytes -- retrying", shm.garbage_collect() ); println!( @@ -67,7 +67,7 @@ async fn main() -> Result<(), zenoh::Error> { let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); - // Retrive a mutable slice from the SharedMemoryBuf. + // Retrieve a mutable slice from the SharedMemoryBuf. // // This operation is marked unsafe since we cannot guarantee a single mutable reference // across multiple processes. Thus if you use it, and you'll inevitable have to use it, @@ -93,7 +93,7 @@ async fn main() -> Result<(), zenoh::Error> { let freed = shm.garbage_collect(); println!("The Gargabe collector freed {freed} bytes"); let defrag = shm.defragment(); - println!("De-framented {defrag} bytes"); + println!("De-fragmented {defrag} bytes"); } // Dropping the SharedMemoryBuf means to free it. drop(sbuf); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 2a3511b0b..7e2018b84 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -51,8 +51,8 @@ impl Stats { } fn print_round(&self) { let elapsed = self.round_start.elapsed().as_secs_f64(); - let throughtput = (self.round_size as f64) / elapsed; - println!("{throughtput} msg/s"); + let throughput = (self.round_size as f64) / elapsed; + println!("{throughput} msg/s"); } } impl Drop for Stats { @@ -62,8 +62,8 @@ impl Drop for Stats { }; let elapsed = global_start.elapsed().as_secs_f64(); let total = self.round_size * self.finished_rounds + self.round_count; - let throughtput = total as f64 / elapsed; - println!("Received {total} messages over {elapsed:.2}s: {throughtput}msg/s"); + let throughput = total as f64 / elapsed; + println!("Received {total} messages over {elapsed:.2}s: {throughput}msg/s"); } } diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index 59848b95c..94d79739b 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -275,7 +275,7 @@ impl LinkManagerMulticastUdp { .map_err(|e| zerror!("{}: {}", mcast_addr, e))?; } } - IpAddr::V6(src_ip6) => bail!("{}: unexepcted IPv6 source address", src_ip6), + IpAddr::V6(src_ip6) => bail!("{}: unexpected IPv6 source address", src_ip6), }, IpAddr::V6(dst_ip6) => { // Join default multicast group diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 090ef0a34..896495514 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -81,12 +81,12 @@ impl Invitation { } async fn expect(expected_suffix: u32, pipe: &mut PipeR) -> ZResult<()> { - let recived_suffix = Self::receive(pipe).await?; - if recived_suffix != expected_suffix { + let received_suffix = Self::receive(pipe).await?; + if received_suffix != expected_suffix { bail!( "Suffix mismatch: expected {} got {}", expected_suffix, - recived_suffix + received_suffix ) } Ok(()) @@ -244,7 +244,7 @@ async fn handle_incoming_connections( // read invitation from the request channel let suffix = Invitation::receive(request_channel).await?; - // gererate uplink and downlink names + // generate uplink and downlink names let (dedicated_downlink_path, dedicated_uplink_path) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); @@ -252,10 +252,10 @@ async fn handle_incoming_connections( let mut dedicated_downlink = PipeW::new(&dedicated_downlink_path).await?; let mut dedicated_uplink = PipeR::new(&dedicated_uplink_path, access_mode).await?; - // confirm over the dedicated chanel + // confirm over the dedicated channel Invitation::confirm(suffix, &mut dedicated_downlink).await?; - // got confirmation over the dedicated chanel + // got confirmation over the dedicated channel Invitation::expect(suffix, &mut dedicated_uplink).await?; // create Locators @@ -353,7 +353,7 @@ async fn create_pipe( // generate random suffix let suffix: u32 = rand::thread_rng().gen(); - // gererate uplink and downlink names + // generate uplink and downlink names let (path_downlink, path_uplink) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); // try create uplink and downlink pipes to ensure that the selected suffix is available @@ -390,7 +390,7 @@ impl UnicastPipeClient { // listener owns the request channel, so failure of this call means that there is nobody listening on the provided endpoint let mut request_channel = PipeW::new(&path_uplink).await?; - // create dedicated channel prerequisities. The creation code also ensures that nobody else would use the same channel concurrently + // create dedicated channel prerequisites. The creation code also ensures that nobody else would use the same channel concurrently let ( mut dedicated_downlink, dedicated_suffix, @@ -398,10 +398,10 @@ impl UnicastPipeClient { dedicated_uplink_path, ) = dedicate_pipe(&path_uplink, &path_downlink, access_mode).await?; - // invite the listener to our dedicated channel over the requet channel + // invite the listener to our dedicated channel over the request channel Invitation::send(dedicated_suffix, &mut request_channel).await?; - // read responce that should be sent over the dedicated channel, confirming that everything is OK + // read response that should be sent over the dedicated channel, confirming that everything is OK // on the listener's side and it is already working with the dedicated channel Invitation::expect(dedicated_suffix, &mut dedicated_downlink).await?; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index fa1c2d9d0..1d95af7ea 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -368,7 +368,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { e })?; - // Update the endpoint with the acutal local path + // Update the endpoint with the actual local path endpoint = EndPoint::new( endpoint.protocol(), local_path_str, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 4139a65a0..efae77698 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -327,7 +327,7 @@ impl WBatch { }) .map_err(|_| zerror!("Compression error"))?; - // Verify wether the resulting compressed data is smaller than the initial input + // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { Ok(Finalize::Buffer) } else { diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index e3a4068b2..37351596c 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -250,7 +250,7 @@ impl StageIn { // Treat all messages as non-droppable once we start fragmenting batch = zgetbatch_rets!(true, tch.sn.set(sn).unwrap()); - // Serialize the message fragmnet + // Serialize the message fragment match batch.encode((&mut reader, &mut fragment)) { Ok(_) => { // Update the SN diff --git a/io/zenoh-transport/src/common/seq_num.rs b/io/zenoh-transport/src/common/seq_num.rs index f286d1474..ecbfd8a94 100644 --- a/io/zenoh-transport/src/common/seq_num.rs +++ b/io/zenoh-transport/src/common/seq_num.rs @@ -57,7 +57,7 @@ impl SeqNum { /// - 16_386 (i.e., 2^14) /// - 2_097_152 (i.e., 2^21) /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(value: TransportSn, resolution: Bits) -> ZResult { @@ -179,7 +179,7 @@ impl SeqNumGenerator { /// As a consequence of wire zenoh's representation of sequence numbers /// this should be a multiple of 7. /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(initial_sn: TransportSn, resolution: Bits) -> ZResult { diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index a52a35af8..2657f5cbd 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -74,7 +74,7 @@ use zenoh_task::TaskController; /// .lease(Duration::from_secs(1)) /// .keep_alive(4) // Send a KeepAlive every 250 ms /// .accept_timeout(Duration::from_secs(1)) -/// .accept_pending(10) // Set to 10 the number of simultanous pending incoming transports +/// .accept_pending(10) // Set to 10 the number of simultaneous pending incoming transports /// .max_sessions(5); // Allow max 5 transports open /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 4927c179d..6d662f287 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -63,7 +63,7 @@ impl TransportMulticastInner { || join.ext_qos.is_some() != peer.is_qos() { let e = format!( - "Ingoring Join on {} of peer: {}. Inconsistent parameters.", + "Ignoring Join on {} of peer: {}. Inconsistent parameters.", peer.locator, peer.zid, ); tracing::debug!("{}", e); @@ -81,7 +81,7 @@ impl TransportMulticastInner { ) -> ZResult<()> { if zread!(self.peers).len() >= self.manager.config.multicast.max_sessions { tracing::debug!( - "Ingoring Join on {} from peer: {}. Max sessions reached: {}.", + "Ignoring Join on {} from peer: {}. Max sessions reached: {}.", locator, join.zid, self.manager.config.multicast.max_sessions, @@ -91,7 +91,7 @@ impl TransportMulticastInner { if join.version != self.manager.config.version { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", + "Ignoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", locator, join.zid, join.version, @@ -102,7 +102,7 @@ impl TransportMulticastInner { if join.resolution != self.manager.config.resolution { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", locator, join.zid, join.resolution, @@ -113,7 +113,7 @@ impl TransportMulticastInner { if join.batch_size != batch_size { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", locator, join.zid, join.batch_size, @@ -124,7 +124,7 @@ impl TransportMulticastInner { if !self.manager.config.multicast.is_qos && join.ext_qos.is_some() { tracing::debug!( - "Ingoring Join on {} from peer: {}. QoS is not supported.", + "Ignoring Join on {} from peer: {}. QoS is not supported.", locator, join.zid, ); diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index ce9229db4..a901aba6e 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -429,7 +429,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Verify that the cookie is the one we sent if input.cookie_nonce != cookie.nonce { - let e = zerror!("Rejecting OpenSyn on: {}. Unkwown cookie.", self.link); + let e = zerror!("Rejecting OpenSyn on: {}. Unknown cookie.", self.link); return Err((e.into(), Some(close::reason::INVALID))); } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 899887bea..708d0d39e 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -95,7 +95,7 @@ pub struct TransportManagerParamsUnicast { pub struct TransportManagerBuilderUnicast { // NOTE: In order to consider eventual packet loss and transmission latency and jitter, // set the actual keep_alive timeout to one fourth of the lease time. - // This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + // This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity // check which considers a link as failed when no messages are received in 3.5 times the // target interval. pub(super) lease: Duration, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 33cfbceb1..4ddacef6b 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -41,7 +41,7 @@ use zenoh_transport::{ TransportPeerEventHandler, }; -// These keys and certificates below are purposedly generated to run TLS and mTLS tests. +// These keys and certificates below are purposely generated to run TLS and mTLS tests. // // With 2 way authentication (mTLS), using TLS 1.3, we need two pairs of keys and certificates: one // for the "server" and another one for the "client". diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index d3ddbd43c..ca97e4791 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -87,7 +87,7 @@ impl Default for ReplicaConfig { // This will determine the time upto which replicas might be diverged // This can be different for each replica if not used to compute hot and warm publication_interval: Duration::from_secs(5), - // This indicates the uncertainity due to the network + // This indicates the uncertainty due to the network // The messages might still be in transit in the network propagation_delay: Duration::from_millis(200), // This is the chunk that you would like your data to be divide into in time. diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 1660d83c3..f185aaa25 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -102,7 +102,7 @@ //! //! async fn put(&mut self, key: Option, value: Value, timestamp: Timestamp) -> ZResult { //! // the key will be None if it exactly matched with the strip_prefix -//! // create a storge specific special structure to store it +//! // create a storage specific special structure to store it //! // Store the data with timestamp //! // @TODO: //! // store (key, value, timestamp) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 48d200ffb..b17e4dcb9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -224,7 +224,7 @@ impl StorageRuntimeInner { config.volume_id ); // let _ = async_std::task::block_on(storage.send(StorageMessage::Stop)); - let _ = storage.send(StorageMessage::Stop); // TODO: was previosuly spawning a task. do we need that? + let _ = storage.send(StorageMessage::Stop); // TODO: was previously spawning a task. do we need that? } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 413147197..63352fab0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -295,7 +295,7 @@ impl StorageService { && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { tracing::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{}` identified as needed processing for key {}", sample, k ); diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index 6d9ac35fe..b9dbb455a 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping pluign is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! //! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. //! -//! Dynamic pluign is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/vtable.rs b/plugins/zenoh-plugin-trait/src/vtable.rs index e1108f87f..74c7479c3 100644 --- a/plugins/zenoh-plugin-trait/src/vtable.rs +++ b/plugins/zenoh-plugin-trait/src/vtable.rs @@ -48,7 +48,7 @@ impl PluginVTable { /// This macro adds non-mangled functions which provides plugin version and loads it into the host. /// If plugin library should work also as static, consider calling this macro under feature condition /// -/// The funcitons declared by this macro are: +/// The functions declared by this macro are: /// /// - `get_plugin_loader_version` - returns `PLUGIN_LOADER_VERSION` const of the crate. The [`PluginsManager`](crate::manager::PluginsManager) /// will check if this version is compatible with the host. diff --git a/zenoh-ext/examples/examples/README.md b/zenoh-ext/examples/examples/README.md index 892bded1c..498a1ca6f 100644 --- a/zenoh-ext/examples/examples/README.md +++ b/zenoh-ext/examples/examples/README.md @@ -17,7 +17,7 @@ ### z_pub_cache - Declares a publisher and an assiciated publication cache with a given key expression. + Declares a publisher and an associated publication cache with a given key expression. All the publications are locally cached (with a configurable history size - i.e. max number of cached data per resource). The cache can be queried by a QueryingSubscriber at startup (see next example). Typical usage: diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 58eb7962c..982829f84 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -59,7 +59,7 @@ struct Args { /// The number of publications to keep in cache. history: usize, #[arg(short = 'o', long)] - /// Set `complete` option to true. This means that this queryable is ulitmate data source, no need to scan other queryables. + /// Set `complete` option to true. This means that this queryable is ultimate data source, no need to scan other queryables. complete: bool, #[arg(short = 'x', long)] /// An optional queryable prefix. diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index a1442fa5c..5d19964d1 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -//! To manage groups and group memeberships +//! To manage groups and group memberships use flume::{Receiver, Sender}; use futures::prelude::*; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 344fe99d3..431ccd2dd 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -202,7 +202,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cached content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr.as_str().contains('*') { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4d97670e1..3c738b7da 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -96,7 +96,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle self.callback(locked(callback)) } - /// Use the given handler to recieve Samples. + /// Use the given handler to receive Samples. #[inline] pub fn with( self, @@ -585,9 +585,9 @@ where } } -/// A Subscriber that will run the given user defined `fetch` funtion at startup. +/// A Subscriber that will run the given user defined `fetch` function at startup. /// -/// The user defined `fetch` funtion should fetch some samples and return them through the callback funtion +/// The user defined `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// @@ -726,7 +726,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { /// Perform an additional `fetch`. /// - /// The provided `fetch` funtion should fetch some samples and return them through the callback funtion + /// The provided `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// /// # Examples diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 192a0a312..5a9c05972 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -49,8 +49,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -133,8 +133,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -246,8 +246,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the fetching liveliness subscriber is to retrieve existing liveliness tokens while susbcribing to diff --git a/zenoh/src/info.rs b/zenoh/src/info.rs index 3e0efdf13..36910c666 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/info.rs @@ -154,7 +154,7 @@ impl<'a> AsyncResolve for PeersZidBuilder<'a> { } /// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows -/// to access informations about the current zenoh [`Session`](crate::Session). +/// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples /// ``` diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index b8837ba31..c3117561c 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -81,7 +81,7 @@ pub(crate) enum KeyExprInner<'a> { /// A possibly-owned version of [`keyexpr`] that may carry optimisations for use with a [`Session`] that may have declared it. /// -/// Check [`keyexpr`]'s documentation for detailed explainations of the Key Expression Language. +/// Check [`keyexpr`]'s documentation for detailed explanations of the Key Expression Language. #[repr(transparent)] #[derive(Clone, serde::Deserialize, serde::Serialize)] #[serde(from = "OwnedKeyExpr")] @@ -230,7 +230,7 @@ impl<'a> KeyExpr<'a> { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -264,7 +264,7 @@ impl<'a> KeyExpr<'a> { /// Performs string concatenation and returns the result as a [`KeyExpr`] if possible. /// - /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierachical separation it inserts. + /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierarchical separation it inserts. pub fn concat + ?Sized>(&self, s: &S) -> ZResult> { let s = s.as_ref(); self._concat(s) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 369321829..e8db68b79 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -45,7 +45,7 @@ //! ``` //! //! ### Subscribe -//! The example below shows how to consume values for a key expresison. +//! The example below shows how to consume values for a key expressions. //! ```no_run //! use futures::prelude::*; //! use zenoh::prelude::r#async::*; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 42f10517e..6ec4bbf73 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -194,7 +194,7 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { pub(crate) fn update_query_routes_from(tables: &mut Tables, res: &mut Arc) { update_query_routes(tables, res); let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { + for child in res.children.values_mut() { update_query_routes_from(tables, child); } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 88c690802..34f122913 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -173,7 +173,7 @@ pub struct Resource { pub(crate) parent: Option>, pub(crate) suffix: String, pub(crate) nonwild_prefix: Option<(Arc, String)>, - pub(crate) childs: HashMap>, + pub(crate) children: HashMap>, pub(crate) context: Option, pub(crate) session_ctxs: HashMap>, } @@ -208,7 +208,7 @@ impl Resource { parent: Some(parent.clone()), suffix: String::from(suffix), nonwild_prefix, - childs: HashMap::new(), + children: HashMap::new(), context, session_ctxs: HashMap::new(), } @@ -282,7 +282,7 @@ impl Resource { parent: None, suffix: String::from(""), nonwild_prefix: None, - childs: HashMap::new(), + children: HashMap::new(), context: None, session_ctxs: HashMap::new(), }) @@ -292,7 +292,7 @@ impl Resource { let mut resclone = res.clone(); let mutres = get_mut_unchecked(&mut resclone); if let Some(ref mut parent) = mutres.parent { - if Arc::strong_count(res) <= 3 && res.childs.is_empty() { + if Arc::strong_count(res) <= 3 && res.children.is_empty() { // consider only childless resource held by only one external object (+ 1 strong count for resclone, + 1 strong count for res.parent to a total of 3 ) tracing::debug!("Unregister resource {}", res.expr()); if let Some(context) = mutres.context.as_mut() { @@ -309,7 +309,7 @@ impl Resource { } mutres.nonwild_prefix.take(); { - get_mut_unchecked(parent).childs.remove(&res.suffix); + get_mut_unchecked(parent).children.remove(&res.suffix); } Resource::clean(parent); } @@ -318,11 +318,11 @@ impl Resource { pub fn close(self: &mut Arc) { let r = get_mut_unchecked(self); - for c in r.childs.values_mut() { + for c in r.children.values_mut() { Self::close(c); } r.parent.take(); - r.childs.clear(); + r.children.clear(); r.nonwild_prefix.take(); r.session_ctxs.clear(); } @@ -331,7 +331,7 @@ impl Resource { pub fn print_tree(from: &Arc) -> String { let mut result = from.expr(); result.push('\n'); - for child in from.childs.values() { + for child in from.children.values() { result.push_str(&Resource::print_tree(child)); } result @@ -351,7 +351,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -360,7 +360,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -376,7 +376,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -385,7 +385,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -405,7 +405,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -418,7 +418,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -516,7 +516,7 @@ impl Resource { ) -> WireExpr<'a> { if checkchilds && !suffix.is_empty() { let (chunk, rest) = suffix.split_at(suffix.find('/').unwrap_or(suffix.len())); - if let Some(child) = prefix.childs.get(chunk) { + if let Some(child) = prefix.children.get(chunk) { return get_best_key_(child, rest, sid, true); } } @@ -550,7 +550,7 @@ impl Resource { if from.context.is_some() { matches.push(Arc::downgrade(from)); } - for child in from.childs.values() { + for child in from.children.values() { recursive_push(child, matches) } } @@ -560,7 +560,7 @@ impl Resource { matches: &mut Vec>, ) { if from.parent.is_none() || from.suffix == "/" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches); } return; @@ -582,12 +582,12 @@ impl Resource { matches.push(Arc::downgrade(from)); } if suffix.as_bytes() == b"**" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches) } } if let Some(child) = - from.childs.get("/**").or_else(|| from.childs.get("**")) + from.children.get("/**").or_else(|| from.children.get("**")) { if child.context.is_some() { matches.push(Arc::downgrade(child)) @@ -599,7 +599,7 @@ impl Resource { Some(rest) => { let recheck_keyexpr_one_level_lower = chunk.as_bytes() == b"**" || suffix.as_bytes() == b"**"; - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(rest, child, matches); if recheck_keyexpr_one_level_lower { get_matches_from(key_expr, child, matches) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index d75c8faf1..808acef23 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -148,11 +148,11 @@ impl HatTables { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs); - queries::queries_tree_change(&mut tables, &new_childs); + pubsub::pubsub_tree_change(&mut tables, &new_children); + queries::queries_tree_change(&mut tables, &new_children); tracing::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 16844643c..7d6e3d285 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -101,7 +101,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -152,7 +152,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -890,12 +890,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -929,7 +930,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -967,22 +968,22 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index f0f8b7711..232e24167 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -37,16 +37,16 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -135,10 +135,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -274,15 +274,15 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -344,10 +344,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -499,10 +499,10 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { } } -pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec]) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { +pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec]) { + // propagate subs to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -518,10 +518,10 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, face: &Arc) } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -203,10 +203,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -330,15 +330,15 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -401,10 +401,10 @@ fn propagate_forget_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -557,10 +557,10 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { } } -pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec]) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { +pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec]) { + // propagate qabls to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -571,10 +571,10 @@ pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec hat_mut!(tables) .routers_net .as_mut() @@ -277,8 +277,8 @@ impl HatTables { }; tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); + pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); + queries::queries_tree_change(&mut tables, &new_children, net_type); tracing::trace!("Computations completed"); match net_type { diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 5089ce989..e8e3a56aa 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -100,7 +100,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -151,7 +151,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -893,12 +893,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -932,7 +933,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -970,23 +971,23 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } #[inline] diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index b7d00227c..e8c6cb4e6 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -37,16 +37,16 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -153,10 +153,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -341,15 +341,15 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -453,10 +453,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -710,12 +710,12 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn pubsub_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate subs to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -737,10 +737,10 @@ pub(super) fn pubsub_tree_change( reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, }; - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - tree_childs, + tree_children, res, None, &sub_info, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 28ff0800d..76ddba723 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -188,16 +188,16 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -285,10 +285,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -471,15 +471,15 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -584,10 +584,10 @@ fn propagate_forget_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -932,12 +932,12 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links pub(super) fn queries_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate qabls to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { @@ -954,10 +954,10 @@ pub(super) fn queries_tree_change( _ => &res_hat!(res).peer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - tree_childs, + tree_children, res, qabl_info, None, diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index cda132e80..34c59ac07 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -149,11 +149,11 @@ impl InterceptorTrait for DownsamplingInterceptor { return None; } } else { - tracing::debug!("unxpected cache ID {}", id); + tracing::debug!("unexpected cache ID {}", id); } } } else { - tracing::debug!("unxpected cache type {:?}", ctx.full_expr()); + tracing::debug!("unexpected cache type {:?}", ctx.full_expr()); } } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2a8ec088b..0ba661c8f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -620,7 +620,7 @@ fn metrics(context: &AdminContext, query: Query) { .unwrap(); #[allow(unused_mut)] let mut metrics = format!( - r#"# HELP zenoh_build Informations about zenoh. + r#"# HELP zenoh_build Information about zenoh. # TYPE zenoh_build gauge zenoh_build{{version="{}"}} 1 "#, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 364891460..c3f8815a5 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -362,7 +362,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { .state .router .new_transport_multicast(transport.clone())?; - Ok(Arc::new(RuntimeMuticastGroup { + Ok(Arc::new(RuntimeMulticastGroup { runtime: runtime.clone(), transport, slave_handlers, @@ -419,20 +419,20 @@ impl TransportPeerEventHandler for RuntimeSession { } } -pub(super) struct RuntimeMuticastGroup { +pub(super) struct RuntimeMulticastGroup { pub(super) runtime: Runtime, pub(super) transport: TransportMulticast, pub(super) slave_handlers: Vec>, } -impl TransportMulticastEventHandler for RuntimeMuticastGroup { +impl TransportMulticastEventHandler for RuntimeMulticastGroup { fn new_peer(&self, peer: TransportPeer) -> ZResult> { let slave_handlers: Vec> = self .slave_handlers .iter() .filter_map(|handler| handler.new_peer(peer.clone()).ok()) .collect(); - Ok(Arc::new(RuntimeMuticastSession { + Ok(Arc::new(RuntimeMulticastSession { main_handler: self .runtime .state @@ -459,12 +459,12 @@ impl TransportMulticastEventHandler for RuntimeMuticastGroup { } } -pub(super) struct RuntimeMuticastSession { +pub(super) struct RuntimeMulticastSession { pub(super) main_handler: Arc, pub(super) slave_handlers: Vec>, } -impl TransportPeerEventHandler for RuntimeMuticastSession { +impl TransportPeerEventHandler for RuntimeMulticastSession { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { self.main_handler.handle_message(msg) } diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 75ad7bdf9..798a3fc69 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -83,7 +83,7 @@ impl Runtime { } } } else { - bail!("No peer specified and multicast scouting desactivated!") + bail!("No peer specified and multicast scouting deactivated!") } } _ => self.connect_peers(&peers, true).await, @@ -335,10 +335,10 @@ impl Runtime { pub(crate) async fn update_peers(&self) -> ZResult<()> { let peers = { self.state.config.lock().connect().endpoints().clone() }; - let tranports = self.manager().get_transports_unicast().await; + let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { - for transport in tranports { + for transport in transports { let should_close = if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() @@ -361,7 +361,7 @@ impl Runtime { } } else { for peer in peers { - if !tranports.iter().any(|transport| { + if !transports.iter().any(|transport| { if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index f5e65f0bd..bc889d720 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -638,13 +638,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check @@ -672,13 +672,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr2".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check @@ -706,13 +706,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/**"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/**".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/**"); // mapping strategy check @@ -740,13 +740,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check @@ -774,13 +774,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/z2_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/plugins/sealed.rs index a3bfdc3aa..8bfc1f1da 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/plugins/sealed.rs @@ -100,14 +100,14 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" /// as they doesn't match the query. - /// * `Err(ZError)`: Problem occured when processing the query. + /// * `Err(ZError)`: Problem occurred when processing the query. /// /// If plugin implements subplugins (as the storage plugin), then it should also reply with information about its subplugins with the same rules. /// /// TODO: /// * add example - /// * rework the admin space: rework "with_extented_string" function, provide it as utility for plugins - /// * reorder paramaters: plugin_status_key should be first as it describes the root of pluginb's admin space + /// * rework the admin space: rework "with_extended_string" function, provide it as utility for plugins + /// * reorder parameters: plugin_status_key should be first as it describes the root of pluginb's admin space /// * Instead of ZResult return just Vec. Check, do we really need ZResult? If yes, make it separate for each status record. /// fn adminspace_getter<'a>( diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f5af22d0e..9373fa021 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -281,7 +281,7 @@ impl<'a> Publisher<'a> { /// pointer to it (`Arc`). This is equivalent to `Arc::new(Publisher)`. /// /// This is useful to share ownership of the `Publisher` between several threads - /// and tasks. It also alows to create [`MatchingListener`] with static + /// and tasks. It also allows to create [`MatchingListener`] with static /// lifetime that can be moved to several threads and tasks. /// /// Note: the given zenoh `Publisher` will be undeclared when the last reference to diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 4bef4bca1..ae9cd7846 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -55,7 +55,7 @@ pub(crate) struct DataInfo { pub qos: QoS, } -/// Informations on the source of a zenoh [`Sample`]. +/// Information on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { @@ -565,7 +565,7 @@ impl QoS { self } - /// Sets express flag vlaue. + /// Sets express flag value. pub fn with_express(mut self, is_express: bool) -> Self { self.inner.set_is_express(is_express); self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02..a5f761a32 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -161,7 +161,7 @@ impl<'a> Selector<'a> { selector.push('&') } use std::fmt::Write; - write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallibe. + write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallible. } pub fn remove_time_range(&mut self) { @@ -328,7 +328,7 @@ pub trait Parameters<'a> { where ::Item: Parameter; - /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arrise. + /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arise. fn decode_into_map(&'a self) -> ZResult> where ::Item: Parameter, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 368cded24..0763018c7 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -435,7 +435,7 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also alows to create [`Subscriber`](Subscriber) and + /// and tasks. It also allows to create [`Subscriber`](Subscriber) and /// [`Queryable`](Queryable) with static lifetime that can be moved to several /// threads and tasks /// @@ -552,7 +552,7 @@ impl Session { /// The returned configuration [`Notifier`](Notifier) can be used to read the current /// zenoh configuration through the `get` function or /// modify the zenoh configuration through the `insert`, - /// or `insert_json5` funtion. + /// or `insert_json5` function. /// /// # Examples /// ### Read current zenoh configuration @@ -1686,7 +1686,7 @@ impl Session { } } Err(err) => { - tracing::error!("Received Data for unkown key_expr: {}", err); + tracing::error!("Received Data for unknown key_expr: {}", err); return; } } @@ -1920,7 +1920,7 @@ impl Session { ) } Err(err) => { - error!("Received Query for unkown key_expr: {}", err); + error!("Received Query for unknown key_expr: {}", err); return; } } @@ -2167,7 +2167,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received DeclareSubscriber for unkown wire_expr: {}", + "Received DeclareSubscriber for unknown wire_expr: {}", err ) } @@ -2204,7 +2204,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received Forget Subscriber for unkown key_expr: {}", + "Received Forget Subscriber for unknown key_expr: {}", err ) } @@ -2327,7 +2327,7 @@ impl Primitives for Session { callback(new_reply); } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2336,7 +2336,7 @@ impl Primitives for Session { let key_expr = match state.remote_key_to_expr(&msg.wire_expr) { Ok(key) => key.into_owned(), Err(e) => { - error!("Received ReplyData for unkown key_expr: {}", e); + error!("Received ReplyData for unknown key_expr: {}", e); return; } }; @@ -2488,7 +2488,7 @@ impl Primitives for Session { } } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2513,7 +2513,7 @@ impl Primitives for Session { } } None => { - warn!("Received ResponseFinal for unkown Request: {}", msg.rid); + warn!("Received ResponseFinal for unknown Request: {}", msg.rid); } } } @@ -2680,7 +2680,7 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` #[zenoh_macros::unstable] fn liveliness(&'s self) -> Liveliness<'a>; - /// Get informations about the zenoh [`Session`](Session). + /// Get information about the zenoh [`Session`](Session). /// /// # Examples /// ``` diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index dc53120ff..c4ecd6cbd 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -81,7 +81,7 @@ pub(crate) struct SubscriberInner<'a> { /// A [`PullMode`] subscriber that provides data through a callback. /// -/// CallbackPullSubscribers only provide data when explicitely pulled by the +/// CallbackPullSubscribers only provide data when explicitly pulled by the /// application with the [`pull`](CallbackPullSubscriber::pull) function. /// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, @@ -671,7 +671,7 @@ pub struct Subscriber<'a, Receiver> { /// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// -/// PullSubscribers only provide data when explicitely pulled by the +/// PullSubscribers only provide data when explicitly pulled by the /// application with the [`pull`](PullSubscriber::pull) function. /// PullSubscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 91614fe43..234cb5045 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -133,7 +133,7 @@ fn retry_config_const_period() { } #[test] -fn retry_config_infinit_period() { +fn retry_config_infinite_period() { let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 8d83d6a10..be479756b 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -320,7 +320,7 @@ impl Recipe { // node_task_tracker.close(); // node_task_tracker.wait().await; - // Close the session once all the task assoicated with the node are done. + // Close the session once all the task associated with the node are done. Arc::try_unwrap(session) .unwrap() .close() diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 471b78380..850676d90 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -131,7 +131,7 @@ fn config_from_args(args: &Args) -> Config { if let Some(id) = &args.id { config.set_id(id.parse().unwrap()).unwrap(); } - // apply '--rest-http-port' to config only if explicitly set (overwritting config), + // apply '--rest-http-port' to config only if explicitly set (overwriting config), // or if no config file is set (to apply its default value) if args.rest_http_port.is_some() || args.config.is_none() { let value = args.rest_http_port.as_deref().unwrap_or("8000");