From 8d854724043fd199b8231596e00077ee2b4b6832 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Mon, 4 Sep 2023 13:24:34 -0700 Subject: [PATCH] feat(maitake): introduce a separate `maitake-sync` crate (#462) The `maitake` crate provides async synchronization primitives in a `maitake::sync` module. These APIs don't require the rest of the runtime, and would be nice to be able to use in other crates. `maitake` is not yet published to crates.io, as it contains Git-only dependencies. However, the `sync` module can be implemented entirely using published code, and therefore, a crate containing only `maitake::sync` could be published. This branch introduces a new `maitake-sync` crate, containing the synchronization primitives from `maitake::sync`. The `maitake::sync` module now re-exports `maitake-sync`, so there should be no impact for existing uses of the crate. The new `maitake-sync` crate can easily be published to crates.io, and depended on from released code. --- .github/workflows/ci.yml | 6 +- Cargo.lock | 63 ++- Cargo.toml | 1 + cordyceps/Cargo.toml | 2 +- maitake-sync/Cargo.toml | 58 ++ maitake-sync/LICENSE | 21 + maitake-sync/README.md | 173 ++++++ maitake-sync/src/lib.rs | 75 +++ maitake-sync/src/loom.rs | 496 ++++++++++++++++++ .../src/sync => maitake-sync/src}/mutex.rs | 40 +- .../sync => maitake-sync/src}/mutex/tests.rs | 2 +- .../src/sync => maitake-sync/src}/rwlock.rs | 88 ++-- .../sync => maitake-sync/src}/rwlock/owned.rs | 38 +- .../sync => maitake-sync/src}/rwlock/tests.rs | 0 .../src}/rwlock/tests/loom.rs | 0 .../src}/rwlock/tests/sequential.rs | 0 .../sync => maitake-sync/src}/semaphore.rs | 62 ++- .../src}/semaphore/tests.rs | 2 +- .../src}/semaphore/tests/alloc_tests.rs | 42 +- .../src}/semaphore/tests/loom.rs | 0 maitake-sync/src/spin.rs | 37 ++ maitake-sync/src/spin/mutex.rs | 261 +++++++++ .../sync => maitake-sync/src/spin}/once.rs | 15 +- maitake-sync/src/util.rs | 267 ++++++++++ maitake-sync/src/util/backoff.rs | 70 +++ maitake-sync/src/util/cache_pad.rs | 108 ++++ maitake-sync/src/util/fmt.rs | 88 ++++ maitake-sync/src/util/maybe_uninit.rs | 435 +++++++++++++++ .../src/util/wake_batch.rs | 2 +- .../sync => maitake-sync/src}/wait_cell.rs | 118 +---- .../src/sync => maitake-sync/src}/wait_map.rs | 26 +- .../src}/wait_map/tests.rs | 0 .../src}/wait_map/tests/alloc_tests.rs | 325 ++++++------ .../src}/wait_map/tests/loom.rs | 0 .../sync => maitake-sync/src}/wait_queue.rs | 128 +++-- .../src}/wait_queue/tests.rs | 0 .../src}/wait_queue/tests/alloc_tests.rs | 113 ++-- .../src}/wait_queue/tests/loom.rs | 0 maitake/Cargo.toml | 9 +- maitake/src/lib.rs | 45 +- maitake/src/scheduler/tests.rs | 60 ++- maitake/src/sync.rs | 93 ---- maitake/src/task/state.rs | 12 +- maitake/src/task/tests/alloc_tests.rs | 12 +- maitake/src/trace.rs | 12 - maitake/src/util.rs | 25 - util/Cargo.toml | 8 +- util/src/macros.rs | 52 -- util/src/sync.rs | 15 +- 49 files changed, 2740 insertions(+), 765 deletions(-) create mode 100644 maitake-sync/Cargo.toml create mode 100644 maitake-sync/LICENSE create mode 100644 maitake-sync/README.md create mode 100644 maitake-sync/src/lib.rs create mode 100644 maitake-sync/src/loom.rs rename {maitake/src/sync => maitake-sync/src}/mutex.rs (93%) rename {maitake/src/sync => maitake-sync/src}/mutex/tests.rs (98%) rename {maitake/src/sync => maitake-sync/src}/rwlock.rs (91%) rename {maitake/src/sync => maitake-sync/src}/rwlock/owned.rs (96%) rename {maitake/src/sync => maitake-sync/src}/rwlock/tests.rs (100%) rename {maitake/src/sync => maitake-sync/src}/rwlock/tests/loom.rs (100%) rename {maitake/src/sync => maitake-sync/src}/rwlock/tests/sequential.rs (100%) rename {maitake/src/sync => maitake-sync/src}/semaphore.rs (97%) rename {maitake/src/sync => maitake-sync/src}/semaphore/tests.rs (89%) rename {maitake/src/sync => maitake-sync/src}/semaphore/tests/alloc_tests.rs (69%) rename {maitake/src/sync => maitake-sync/src}/semaphore/tests/loom.rs (100%) create mode 100644 maitake-sync/src/spin.rs create mode 100644 maitake-sync/src/spin/mutex.rs rename {util/src/sync => maitake-sync/src/spin}/once.rs (97%) create mode 100644 maitake-sync/src/util.rs create mode 100644 maitake-sync/src/util/backoff.rs create mode 100644 maitake-sync/src/util/cache_pad.rs create mode 100644 maitake-sync/src/util/fmt.rs create mode 100644 maitake-sync/src/util/maybe_uninit.rs rename {maitake => maitake-sync}/src/util/wake_batch.rs (98%) rename {maitake/src/sync => maitake-sync/src}/wait_cell.rs (89%) rename {maitake/src/sync => maitake-sync/src}/wait_map.rs (98%) rename {maitake/src/sync => maitake-sync/src}/wait_map/tests.rs (100%) rename {maitake/src/sync => maitake-sync/src}/wait_map/tests/alloc_tests.rs (58%) rename {maitake/src/sync => maitake-sync/src}/wait_map/tests/loom.rs (100%) rename {maitake/src/sync => maitake-sync/src}/wait_queue.rs (95%) rename {maitake/src/sync => maitake-sync/src}/wait_queue/tests.rs (100%) rename {maitake/src/sync => maitake-sync/src}/wait_queue/tests/alloc_tests.rs (58%) rename {maitake/src/sync => maitake-sync/src}/wait_queue/tests/loom.rs (100%) delete mode 100644 maitake/src/sync.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8494c63b..2e593876 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,8 @@ jobs: maitake: paths: - 'maitake/**/*.rs' + - 'maitake-sync/**/*.rs' + - 'maitake-sync/Cargo.toml' - 'maitake/Cargo.toml' - '.github/workflows/ci.yml' - 'justfile' @@ -275,8 +277,10 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: install nextest uses: taiki-e/install-action@nextest - - name: run loom tests + - name: run loom tests (maitake) run: just loom maitake + - name: run loom tests (maitake-sync) + run: just loom maitake-sync ### mycelium-util ### diff --git a/Cargo.lock b/Cargo.lock index 47fc4229..44b5cfde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,7 +109,7 @@ dependencies = [ "parking", "polling", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", "windows-sys 0.42.0", ] @@ -1075,7 +1075,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing 0.1.37", @@ -1215,9 +1215,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libgit2-sys" @@ -1281,9 +1281,9 @@ dependencies = [ [[package]] name = "loom" -version = "0.5.6" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff50ecb28bb86013e935fb6683ab1f6d3a20016f123c76fd4c27470076ac30f5" +checksum = "86a17963e5073acf8d3e2637402657c6b467218f36fe10d696b3e1095ae019bf" dependencies = [ "cfg-if", "generator", @@ -1304,6 +1304,7 @@ dependencies = [ "futures", "futures-util", "loom", + "maitake-sync", "mycelium-bitfield", "mycelium-util", "pin-project", @@ -1316,6 +1317,24 @@ dependencies = [ "tracing-subscriber 0.3.16", ] +[[package]] +name = "maitake-sync" +version = "0.1.0" +dependencies = [ + "cordyceps", + "futures", + "futures-util", + "loom", + "mycelium-bitfield", + "pin-project", + "portable-atomic", + "proptest", + "tokio", + "tokio-test", + "tracing 0.1.37", + "tracing-subscriber 0.3.16", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1474,6 +1493,7 @@ version = "0.1.0" dependencies = [ "cordyceps", "loom", + "maitake-sync", "mycelium-bitfield", "proptest", "tracing 0.1.37", @@ -1686,9 +1706,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -2112,6 +2132,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "strsim" version = "0.10.0" @@ -2262,20 +2292,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", - "memchr", "mio", "pin-project-lite", - "socket2", + "socket2 0.5.3", "tokio-macros", "tracing 0.1.37", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2290,13 +2319,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.13", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e43ec50f..10ef8e5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "hal-x86_64", "inoculate", "maitake", + "maitake-sync", "mycotest", "trace", "pci", diff --git a/cordyceps/Cargo.toml b/cordyceps/Cargo.toml index e75acf6f..93e47fa2 100644 --- a/cordyceps/Cargo.toml +++ b/cordyceps/Cargo.toml @@ -30,7 +30,7 @@ tracing-subscriber = { version = "0.3", features = ["fmt"] } pin-project = "1" [target.'cfg(loom)'.dependencies] -loom = "0.5.5" +loom = "0.7" tracing = { version = "0.1" } [package.metadata.docs.rs] diff --git a/maitake-sync/Cargo.toml b/maitake-sync/Cargo.toml new file mode 100644 index 00000000..27bd7d35 --- /dev/null +++ b/maitake-sync/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "maitake-sync" +version = "0.1.0" +authors = [ + "Eliza Weisman ", +] +description = "No-std async synchronization primitives from Maitake" +repository = "https://github.com/hawkw/mycelium" +documentation = "https://docs.rs/maitake-sync" +homepage = "https://mycelium.elizas.website" +license = "MIT" +readme = "README.md" +keywords = ["async", "no_std", "sync", "mutex", "rwlock"] +categories = [ + "no-std", + "async", + "concurrency" +] +edition = "2021" +rust-version = "1.61.0" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["alloc"] +alloc = ["cordyceps/alloc"] +no-cache-pad = ["cordyceps/no-cache-pad"] +core-error = [] + +[dependencies] +mycelium-bitfield = { version = "0.1.3", path = "../bitfield" } +cordyceps = { version = "0.3.0", path = "../cordyceps" } +pin-project = "1" +portable-atomic = "1.2" +tracing = { version = "0.1", default_features = false, optional = true } + +# this is a normal dependency, rather than a dev dependency, so that +# `maitake-sync` may be used in other crates' loom tests as well (but only when +# the cfg is enabled). +[target.'cfg(loom)'.dependencies] +loom = { version = "0.7", default_features = false } + +[dev-dependencies] +futures-util = "0.3" +futures = "0.3" +tokio-test = "0.4" +tracing = { version = "0.1", default_features = false, features = ["std"] } +tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter"] } + +[target.'cfg(not(loom))'.dev-dependencies] +proptest = "1" +tokio = { version = "1.32", features = ["rt", "macros"] } + +[target.'cfg(loom)'.dev-dependencies] +loom = { version = "0.7", features = ["futures", "checkpoint"] } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] \ No newline at end of file diff --git a/maitake-sync/LICENSE b/maitake-sync/LICENSE new file mode 100644 index 00000000..8e911184 --- /dev/null +++ b/maitake-sync/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Eliza Weisman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/maitake-sync/README.md b/maitake-sync/README.md new file mode 100644 index 00000000..47bdf26a --- /dev/null +++ b/maitake-sync/README.md @@ -0,0 +1,173 @@ +# maitake-sync + +🎶🍄 ["Dancing mushroom"][maitake-wiki] — asynchronous synchronization +primitives from [`maitake`] + +[![crates.io][crates-badge]][crates-url] +[![Documentation][docs-badge]][docs-url] +[![Documentation (HEAD)][docs-main-badge]][docs-main-url] +[![MIT licensed][mit-badge]][mit-url] +[![Test Status][tests-badge]][tests-url] +[![Sponsor @hawkw on GitHub Sponsors][sponsor-badge]][sponsor-url] + +[crates-badge]: https://img.shields.io/crates/v/maitake-sync.svg +[crates-url]: https://crates.io/crates/maitake-sync-sync +[docs-badge]: https://docs.rs/maitake-sync/badge.svg +[docs-url]: https://docs.rs/maitake-sync +[docs-main-badge]: https://img.shields.io/netlify/3ec00bb5-251a-4f83-ac7f-3799d95db0e6?label=docs%20%28main%20branch%29 +[docs-main-url]: https://mycelium.elizas.website/maitake-sync +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: ../LICENSE +[tests-badge]: https://github.com/hawkw/mycelium/actions/workflows/ci.yml/badge.svg?branch=main +[tests-url]: https://github.com/hawkw/mycelium/actions/workflows/ci.yml +[sponsor-badge]: https://img.shields.io/badge/sponsor-%F0%9F%A4%8D-ff69b4 +[sponsor-url]: https://github.com/sponsors/hawkw +[maitake-wiki]: https://en.wikipedia.org/wiki/Grifola_frondosa + +## what is it? + +This library is a collection of synchronization primitives for asynchronous Rust +software based on [`core::task`] and [`core::future`], with a focus on +supporting `#![no_std]` projects. It was initially developed as part of +[`maitake`], an "async runtime construction kit" intended for use in the +[mycelium] and [mnemOS] operating systems, but it may be useful for other +projects as well. + +> **Note** +> +> This is a hobby project. I'm working on it in my spare time, for my own +> personal use. I'm very happy to share it with the broader Rust community, and +> [contributions] and [bug reports] are always welcome. However, please remember +> that I'm working on this library _for fun_, and if it stops being fun...well, +> you get the idea. +> +> Anyway, feel free to use and enjoy this crate, and to contribute back as much +> as you want to! + +[contributions]: https://github.com/hawkw/mycelium/compare +[bug reports]: https://github.com/hawkw/mycelium/issues/new + +[_Synchronization primitives_][primitives] are tools for implementing +synchronization between [tasks][`core::task`]: to control which tasks can run at +any given time, and in what order, and to coordinate tasks' access to shared +resources. Typically, this synchronization involves some form of _waiting_. In +asynchronous systems, synchronization primitives allow tasks to wait by yielding +to the runtime scheduler, so that other tasks may run while they are waiting. + +## a tour of `maitake-sync` + +The following synchronization primitives are provided: + +- [`Mutex`]: a fairly queued, asynchronous [mutual exclusion lock], for + protecting shared data +- [`RwLock`]: a fairly queued, asynchronous [readers-writer lock], which + allows concurrent read access to shared data while ensuring write + access is exclusive +- [`Semaphore`]: an asynchronous [counting semaphore], for limiting the + number of tasks which may run concurrently +- [`WaitCell`], a cell that stores a *single* waiting task's [`Waker`], so + that the task can be woken by another task, +- [`WaitQueue`], a queue of waiting tasks, which are woken in first-in, + first-out order +- [`WaitMap`], a set of waiting tasks associated with keys, in which a task + can be woken by its key + +In addition, the [`util` module] contains a collection of general-purpose +utilities for implementing synchronization primitives, and the [`spin` module] +contains implementations of *non-async*, spinning-based synchronization +primitives. + +[`core::task`]: https://doc.rust-lang.org/stable/core/task/index.html +[`core::future`]: https://doc.rust-lang.org/stable/core/future/index.html +[`maitake`]: https://mycelium.elizas.website/maitake +[mycelium]: https://github.com/hawkw/mycelium +[mnemOS]: https://mnemos.dev +[primitives]: https://wiki.osdev.org/Synchronization_Primitives +[mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion +[readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock +[counting semaphore]: https://en.wikipedia.org/wiki/Semaphore_(programming) +[`Waker`]: core::task::Waker +[`Mutex`]: https://docs.rs/maitake-sync/latest/maitake_sync/struct.Mutex.html +[`RwLock`]: https://docs.rs/maitake-sync/latest/maitake_sync/struct.RwLock.html +[`Semaphore`]: https://docs.rs/maitake-sync/latest/maitake_sync/struct.Semaphore.html +[`WaitCell`]: https://docs.rs/maitake-sync/latest/maitake_sync/struct.WaitCell.html +[`WaitQueue`]: + https://docs.rs/maitake-sync/latest/maitake_sync/struct.WaitQueue.html +[`WaitMap`]: + https://docs.rs/maitake-sync/latest/maitake_sync/struct.WaitMap.html +[`util` module]: + https://docs.rs/maitake-sync/latest/maitake_sync/util/index.html +[`spin` module]: + https://docs.rs/maitake-sync/latest/maitake_sync/spin/index.html + +## usage considerations + +`maitake-sync` is intended primarily for use in bare-metal projects, such as +operating systems, operating system components, and embedded systems. These +bare-metal systems typically do not use the Rust standard library, so +`maitake-sync` supports `#![no_std]` by default, and the use of [`liballoc`] is +feature-flagged for systems where [`liballoc`] is unavailable. + +### support for atomic operations + +In general, `maitake-sync` is a platform-agnostic library. It does not interact +directly with the underlying hardware, or use platform-specific features. +However, one aspect of `maitake-sync`'s implementation may differ slightly +across different target architectures: `maitake-sync` relies on atomic +operations integers. Sometimes, atomic operations on integers of specific widths +are needed (e.g., [`AtomicU64`]), which may not be available on all architectures. + +In order to work on architectures which lack atomic operations on 64-bit +integers, `maitake-sync` uses the [`portable-atomic`] crate by Taiki Endo. This +crate crate polyfills atomic operations on integers larger than the platform's +pointer width, when these are not supported in hardware. + +In most cases, users of `maitake-sync` don't need to be aware of `maitake-sync`'s use of +`portable-atomic`. If compiling `maitake-sync` for a target architecture that has +native support for 64-bit atomic operations (such as `x86_64` or `aarch64`), the +native atomics are used automatically. Similarly, if compiling `maitake` for any +target that has atomic compare-and-swap operations on any size integer, but +lacks 64-bit atomics (i.e., 32-bit x86 targets like `i686`, or 32-bit ARM +targets with atomic operations), the `portable-atomic` polyfill is used +automatically. Finally, when compiling for target architectures which lack +atomic operations because they are *always* single-core, such as MSP430 or AVR +microcontrollers, `portable-atomic` simply uses unsynchronized operations with +interrupts temporarily disabled. + +**The only case where the user must be aware of `portable-atomic` is when +compiling for targets which lack atomic operations but are not guaranteed to +always be single-core**. This includes ARMv6-M (`thumbv6m`), pre-v6 ARM (e.g., +`thumbv4t`, `thumbv5te`), and RISC-V targets without the A extension. On these +architectures, the user must manually enable the [`RUSTFLAGS`] configuration +[`--cfg portable_atomic_unsafe_assume_single_core`][single-core] if (and **only +if**) the specific target hardware is known to be single-core. Enabling this cfg +is unsafe, as it will cause unsound behavior on multi-core systems using these +architectures. + +Additional configurations for some single-core systems, which determine the +specific sets of interrupts that `portable-atomic` will disable when entering a +critical section, are described [here][interrupt-cfgs]. + +[`AtomicU64`]: https://doc.rust-lang.org/stable/core/sync/atomic/struct.AtomicU64.html +[`portable-atomic`]: https://crates.io/crates/portable-atomic +[`RUSTFLAGS`]: https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags +[single-core]: https://docs.rs/portable-atomic/latest/portable_atomic/#optional-cfg +[interrupt-cfgs]: https://github.com/taiki-e/portable-atomic/blob/HEAD/src/imp/interrupt/README.md + +## features + +The following features are available (this list is incomplete; you can help by [expanding it].) + +[expanding it]: https://github.com/hawkw/mycelium/edit/main/maitake-suync/README.md + +| Feature | Default | Explanation | +| :--- | :--- | :--- | +| `alloc` | `true` | Enables [`liballoc`] dependency | +| `no-cache-pad` | `false` | Inhibits cache padding for the [`CachePadded`] struct. When this feature is NOT enabled, the size will be determined based on target platform. | +| `tracing` | `false` | Enables support for [`tracing`] diagnostics. Requires `liballoc`.| +| `core-error` | `false` | Enables implementations of the [`core::error::Error` trait][core-error] for `maitake-sync`'s error types. *Requires a nightly Rust toolchain*. | + +[`liballoc`]: https://doc.rust-lang.org/alloc/ +[`CachePadded`]: https://docs.rs/maitake-sync/latest/maitake_sync/util/struct.CachePadded.html +[`tracing`]: https://crates.io/crates/tracing +[core-error]: https://doc.rust-lang.org/stable/core/error/index.html \ No newline at end of file diff --git a/maitake-sync/src/lib.rs b/maitake-sync/src/lib.rs new file mode 100644 index 00000000..5eb6fea8 --- /dev/null +++ b/maitake-sync/src/lib.rs @@ -0,0 +1,75 @@ +#![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg, doc_cfg_hide))] +#![cfg_attr(docsrs, doc(cfg_hide(docsrs, loom)))] +#![cfg_attr(not(test), no_std)] +#![cfg_attr(feature = "core-error", feature(error_in_core))] +#![warn(missing_docs, missing_debug_implementations)] + +#[cfg(feature = "alloc")] +extern crate alloc; + +pub(crate) mod loom; + +#[macro_use] +pub mod util; + +pub mod mutex; +pub mod rwlock; +pub mod semaphore; +pub mod spin; +pub mod wait_cell; +pub mod wait_map; +pub mod wait_queue; + +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use self::mutex::OwnedMutexGuard; +#[doc(inline)] +pub use self::mutex::{Mutex, MutexGuard}; +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use self::rwlock::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard}; +#[doc(inline)] +pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +#[doc(inline)] +pub use self::semaphore::Semaphore; +#[doc(inline)] +pub use self::wait_cell::WaitCell; +#[doc(inline)] +pub use self::wait_map::WaitMap; +#[doc(inline)] +pub use self::wait_queue::WaitQueue; + +use core::task::Poll; + +/// An error indicating that a [`WaitCell`], [`WaitQueue`] or [`Semaphore`] was +/// closed while attempting to register a waiting task. +/// +/// This error is returned by the [`WaitCell::wait`], [`WaitQueue::wait`] and +/// [`Semaphore::acquire`] methods. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct Closed(()); + +/// The result of waiting on a [`WaitQueue`] or [`Semaphore`]. +pub type WaitResult = Result; + +pub(crate) const fn closed() -> Poll> { + Poll::Ready(Err(Closed::new())) +} + +impl Closed { + pub(crate) const fn new() -> Self { + Self(()) + } +} + +impl core::fmt::Display for Closed { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.pad("closed") + } +} + +feature! { + #![feature = "core-error"] + impl core::error::Error for Closed {} +} diff --git a/maitake-sync/src/loom.rs b/maitake-sync/src/loom.rs new file mode 100644 index 00000000..86263e46 --- /dev/null +++ b/maitake-sync/src/loom.rs @@ -0,0 +1,496 @@ +#[allow(unused_imports)] +pub(crate) use self::inner::*; + +#[cfg(loom)] +mod inner { + #![allow(dead_code)] + #![allow(unused_imports)] + + #[cfg(feature = "alloc")] + pub(crate) mod alloc { + use super::sync::Arc; + use core::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + }; + pub(crate) use loom::alloc::*; + + #[derive(Debug)] + #[pin_project::pin_project] + pub(crate) struct TrackFuture { + #[pin] + inner: F, + track: Arc<()>, + } + + impl Future for TrackFuture { + type Output = TrackFuture; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.inner.poll(cx).map(|inner| TrackFuture { + inner, + track: this.track.clone(), + }) + } + } + + impl TrackFuture { + /// Wrap a `Future` in a `TrackFuture` that participates in Loom's + /// leak checking. + #[track_caller] + pub(crate) fn new(inner: F) -> Self { + Self { + inner, + track: Arc::new(()), + } + } + + /// Stop tracking this future, and return the inner value. + pub(crate) fn into_inner(self) -> F { + self.inner + } + } + + #[track_caller] + pub(crate) fn track_future(inner: F) -> TrackFuture { + TrackFuture::new(inner) + } + + // PartialEq impl so that `assert_eq!(..., Ok(...))` works + impl PartialEq for TrackFuture { + fn eq(&self, other: &Self) -> bool { + self.inner == other.inner + } + } + } + + #[cfg(test)] + pub(crate) use loom::future; + pub(crate) use loom::{cell, hint, model, thread}; + + pub(crate) mod sync { + pub(crate) use loom::sync::*; + + pub(crate) mod spin { + pub(crate) use loom::sync::MutexGuard; + + /// Mock version of mycelium's spinlock, but using + /// `loom::sync::Mutex`. The API is slightly different, since the + /// mycelium mutex does not support poisoning. + #[derive(Debug)] + pub(crate) struct Mutex(loom::sync::Mutex); + + impl Mutex { + #[track_caller] + pub(crate) fn new(t: T) -> Self { + Self(loom::sync::Mutex::new(t)) + } + + #[track_caller] + pub fn try_lock(&self) -> Option> { + self.0.try_lock().ok() + } + + #[track_caller] + pub fn lock(&self) -> MutexGuard<'_, T> { + self.0.lock().expect("loom mutex will never poison") + } + } + } + } +} + +#[cfg(not(loom))] +mod inner { + #![allow(dead_code, unused_imports)] + pub(crate) mod sync { + #[cfg(feature = "alloc")] + pub use alloc::sync::*; + pub use core::sync::*; + + pub use crate::spin; + } + + pub(crate) mod atomic { + pub use portable_atomic::*; + } + + pub(crate) use portable_atomic::hint; + + #[cfg(test)] + pub(crate) mod thread { + pub(crate) use std::thread::{yield_now, JoinHandle}; + pub(crate) fn spawn(f: F) -> JoinHandle + where + F: FnOnce() -> T + Send + 'static, + T: Send + 'static, + { + let track = super::alloc::track::Registry::current(); + std::thread::spawn(move || { + let _tracking = track.map(|track| track.set_default()); + f() + }) + } + } + + #[cfg(test)] + pub(crate) mod model { + #[non_exhaustive] + #[derive(Default)] + pub(crate) struct Builder { + pub(crate) max_threads: usize, + pub(crate) max_branches: usize, + pub(crate) max_permutations: Option, + // pub(crate) max_duration: Option, + pub(crate) preemption_bound: Option, + // pub(crate) checkpoint_file: Option, + pub(crate) checkpoint_interval: usize, + pub(crate) location: bool, + pub(crate) log: bool, + } + + impl Builder { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn check(&self, f: impl FnOnce()) { + let registry = super::alloc::track::Registry::default(); + let _tracking = registry.set_default(); + f(); + registry.check(); + } + } + } + + #[cfg(test)] + pub(crate) fn model(f: impl FnOnce()) { + let _trace = crate::util::test::trace_init(); + model::Builder::new().check(f) + } + + pub(crate) mod cell { + #[derive(Debug)] + pub(crate) struct UnsafeCell(core::cell::UnsafeCell); + + impl UnsafeCell { + pub const fn new(data: T) -> UnsafeCell { + UnsafeCell(core::cell::UnsafeCell::new(data)) + } + } + + impl UnsafeCell { + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*const T) -> R, + { + f(self.0.get()) + } + + #[inline(always)] + pub fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut T) -> R, + { + f(self.0.get()) + } + + #[inline(always)] + pub(crate) fn get(&self) -> ConstPtr { + ConstPtr(self.0.get()) + } + + #[inline(always)] + pub(crate) fn get_mut(&self) -> MutPtr { + MutPtr(self.0.get()) + } + } + + #[derive(Debug)] + pub(crate) struct ConstPtr(*const T); + + impl ConstPtr { + #[inline(always)] + pub(crate) unsafe fn deref(&self) -> &T { + &*self.0 + } + + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*const T) -> R, + { + f(self.0) + } + } + + #[derive(Debug)] + pub(crate) struct MutPtr(*mut T); + + impl MutPtr { + // Clippy knows that it's Bad and Wrong to construct a mutable reference + // from an immutable one...but this function is intended to simulate a raw + // pointer, so we have to do that here. + #[allow(clippy::mut_from_ref)] + #[inline(always)] + pub(crate) unsafe fn deref(&self) -> &mut T { + &mut *self.0 + } + + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*mut T) -> R, + { + f(self.0) + } + } + } + + pub(crate) mod alloc { + #[cfg(test)] + use core::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + }; + + #[cfg(test)] + use std::sync::Arc; + #[cfg(test)] + pub(in crate::loom) mod track { + use std::{ + cell::RefCell, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, Weak, + }, + }; + + #[derive(Clone, Debug, Default)] + pub(crate) struct Registry(Arc>); + + #[derive(Debug, Default)] + struct RegistryInner { + tracks: Vec>, + next_id: usize, + } + + #[derive(Debug)] + pub(super) struct TrackData { + was_leaked: AtomicBool, + type_name: &'static str, + location: &'static core::panic::Location<'static>, + id: usize, + } + + thread_local! { + static REGISTRY: RefCell> = RefCell::new(None); + } + + impl Registry { + pub(in crate::loom) fn current() -> Option { + REGISTRY.with(|current| current.borrow().clone()) + } + + pub(in crate::loom) fn set_default(&self) -> impl Drop { + struct Unset(Option); + impl Drop for Unset { + fn drop(&mut self) { + let _ = + REGISTRY.try_with(|current| *current.borrow_mut() = self.0.take()); + } + } + + REGISTRY.with(|current| { + let mut current = current.borrow_mut(); + let unset = Unset(current.clone()); + *current = Some(self.clone()); + unset + }) + } + + #[track_caller] + pub(super) fn start_tracking() -> Option> { + // we don't use `Option::map` here because it creates a + // closure, which breaks `#[track_caller]`, since the caller + // of `insert` becomes the closure, which cannot have a + // `#[track_caller]` attribute on it. + #[allow(clippy::manual_map)] + match Self::current() { + Some(registry) => Some(registry.insert::()), + _ => None, + } + } + + #[track_caller] + pub(super) fn insert(&self) -> Arc { + let mut inner = self.0.lock().unwrap(); + let id = inner.next_id; + inner.next_id += 1; + let location = core::panic::Location::caller(); + let type_name = std::any::type_name::(); + let data = Arc::new(TrackData { + type_name, + location, + id, + was_leaked: AtomicBool::new(false), + }); + let weak = Arc::downgrade(&data); + tracing::trace!( + target: "maitake_sync::alloc", + id, + "type" = %type_name, + %location, + "started tracking allocation", + ); + inner.tracks.push(weak); + data + } + + pub(in crate::loom) fn check(&self) { + let leaked = self + .0 + .lock() + .unwrap() + .tracks + .iter() + .filter_map(|weak| { + let data = weak.upgrade()?; + data.was_leaked.store(true, Ordering::SeqCst); + Some(format!( + " - id {}, {} allocated at {}", + data.id, data.type_name, data.location + )) + }) + .collect::>(); + if !leaked.is_empty() { + let leaked = leaked.join("\n "); + panic!("the following allocations were leaked:\n {leaked}"); + } + } + } + + impl Drop for TrackData { + fn drop(&mut self) { + if !self.was_leaked.load(Ordering::SeqCst) { + tracing::trace!( + target: "maitake_sync::alloc", + id = self.id, + "type" = %self.type_name, + location = %self.location, + "dropped all references to a tracked allocation", + ); + } + } + } + } + + #[cfg(test)] + #[derive(Debug)] + #[pin_project::pin_project] + pub(crate) struct TrackFuture { + #[pin] + inner: F, + track: Option>, + } + + #[cfg(test)] + impl Future for TrackFuture { + type Output = TrackFuture; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.inner.poll(cx).map(|inner| TrackFuture { + inner, + track: this.track.clone(), + }) + } + } + + #[cfg(test)] + impl TrackFuture { + /// Wrap a `Future` in a `TrackFuture` that participates in Loom's + /// leak checking. + #[track_caller] + pub(crate) fn new(inner: F) -> Self { + let track = track::Registry::start_tracking::(); + Self { inner, track } + } + + /// Stop tracking this future, and return the inner value. + pub(crate) fn into_inner(self) -> F { + self.inner + } + } + + #[cfg(test)] + #[track_caller] + pub(crate) fn track_future(inner: F) -> TrackFuture { + TrackFuture::new(inner) + } + + // PartialEq impl so that `assert_eq!(..., Ok(...))` works + #[cfg(test)] + impl PartialEq for TrackFuture { + fn eq(&self, other: &Self) -> bool { + self.inner == other.inner + } + } + + /// Track allocations, detecting leaks + #[derive(Debug, Default)] + pub struct Track { + value: T, + + #[cfg(test)] + track: Option>, + } + + impl Track { + /// Track a value for leaks + #[inline(always)] + #[track_caller] + pub fn new(value: T) -> Track { + Track { + value, + + #[cfg(test)] + track: track::Registry::start_tracking::(), + } + } + + /// Get a reference to the value + #[inline(always)] + pub fn get_ref(&self) -> &T { + &self.value + } + + /// Get a mutable reference to the value + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + /// Stop tracking the value for leaks + #[inline(always)] + pub fn into_inner(self) -> T { + self.value + } + } + } + + #[cfg(test)] + pub(crate) mod future { + pub(crate) use tokio_test::block_on; + } + + #[cfg(test)] + pub(crate) fn traceln(args: std::fmt::Arguments) { + eprintln!("{args}"); + } + + #[cfg(not(test))] + pub(crate) fn traceln(_: core::fmt::Arguments) {} +} diff --git a/maitake/src/sync/mutex.rs b/maitake-sync/src/mutex.rs similarity index 93% rename from maitake/src/sync/mutex.rs rename to maitake-sync/src/mutex.rs index e504744c..aca67123 100644 --- a/maitake/src/sync/mutex.rs +++ b/maitake-sync/src/mutex.rs @@ -5,7 +5,8 @@ //! [mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion use crate::{ loom::cell::{MutPtr, UnsafeCell}, - sync::wait_queue::{self, WaitQueue}, + util::fmt, + wait_queue::{self, WaitQueue}, }; use core::{ future::Future, @@ -13,7 +14,6 @@ use core::{ pin::Pin, task::{Context, Poll}, }; -use mycelium_util::{fmt, unreachable_unchecked}; use pin_project::pin_project; #[cfg(test)] @@ -31,8 +31,8 @@ mod tests; /// [`lock`] method will wait by causing the current [task] to yield until the /// shared data is available. This is in contrast to *blocking* mutices, such as /// [`std::sync::Mutex`], which wait by blocking the current thread[^1], or -/// *spinlock* based mutices, such as [`mycelium_util::sync::spin::Mutex`], -/// which wait by spinning in a busy loop. +/// *spinlock* based mutices, such as [`spin::Mutex`], which wait by spinning +/// in a busy loop. /// /// The [`futures-util`] crate also provides an implementation of an asynchronous /// mutex, [`futures_util::lock::Mutex`]. However, this mutex requires the Rust @@ -50,18 +50,18 @@ mod tests; /// will not acquire the lock until every other task ahead of it in the queue /// has had a chance to lock the shared data. Again, this is in contrast to /// [`std::sync::Mutex`], where fairness depends on the underlying OS' locking -/// primitives; and [`mycelium_util::sync::spin::Mutex`] and -/// [`futures_util::lock::Mutex`], which will never guarantee fairness. +/// primitives; and [`spin::Mutex`] and [`futures_util::lock::Mutex`], which +/// will never guarantee fairness. /// /// Finally, this mutex does not implement [poisoning][^3], unlike /// [`std::sync::Mutex`]. /// /// [^1]: And therefore require an operating system to manage threading. /// -/// [^2]: The [tasks][crate::task::Task] themselves must, of course, be stored +/// [^2]: The [tasks](core::task) themselves must, of course, be stored /// somewhere, but this need not be a heap allocation in systems with a -/// fixed set of statically-allocated tasks. And, when tasks *are* -/// heap-allocated, these allocations [need not be provided by +/// fixed set of statically-allocated tasks. And, when tasks *are* +/// heap-allocated, these allocations [need not be provided by /// `liballoc`][storage]. /// /// [^3]: In fact, this mutex _cannot_ implement poisoning, as poisoning @@ -72,17 +72,17 @@ mod tests; /// [RAII guards]: MutexGuard /// [`lock`]: Self::lock /// [`try_lock`]: Self::try_lock -/// [task]: crate::task +/// [task]: core::task /// [fairly queued]: https://en.wikipedia.org/wiki/Unbounded_nondeterminism#Fairness /// [`std::sync::Mutex`]: https://doc.rust-lang.org/stable/std/sync/struct.Mutex.html -/// [`mycelium_util::sync::spin::Mutex`]: https://mycelium.elizas.website/mycelium_util/sync/spin/struct.mutex +/// [`spin::Mutex`]: crate::spin::Mutex /// [`futures-util`]: https://crates.io/crate/futures-util /// [`futures_util::lock::Mutex`]: https://docs.rs/futures-util/latest/futures_util/lock/struct.Mutex.html -/// [intrusive linked list]: crate::sync::WaitQueue#implementation-notes +/// [intrusive linked list]: crate::WaitQueue#implementation-notes /// [poisoning]: https://doc.rust-lang.org/stable/std/sync/struct.Mutex.html#poisoning // for some reason, intra-doc links don't work in footnotes? -/// [storage]: ../task/trait.Storage.html -/// [no-unwinding]: ../index.html#maitake-does-not-support-unwinding +/// [storage]: https://mycelium.elizas.website/maitake/task/trait.Storage.html +/// [no-unwinding]: https://mycelium.elizas.website/maitake/index.html#maitake-does-not-support-unwinding pub struct Mutex { wait: WaitQueue, @@ -141,14 +141,14 @@ impl Mutex { /// # Examples /// /// ``` - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// /// let lock = Mutex::new(42); /// ``` /// /// As this is a `const fn`, it may be used in a `static` initializer: /// ``` - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// /// static GLOBAL_LOCK: Mutex = Mutex::new(42); /// ``` @@ -176,7 +176,7 @@ impl Mutex { /// # Examples /// /// ``` - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// /// async fn example() { /// let mutex = Mutex::new(1); @@ -204,7 +204,7 @@ impl Mutex { /// # Examples /// /// ``` - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// # async fn dox() -> Option<()> { /// /// let mutex = Mutex::new(1); @@ -374,7 +374,7 @@ feature! { /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// use alloc::sync::Arc; /// /// # fn main() { @@ -422,7 +422,7 @@ feature! { /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::sync::Mutex; + /// use maitake_sync::Mutex; /// use alloc::sync::Arc; /// /// # fn main() { diff --git a/maitake/src/sync/mutex/tests.rs b/maitake-sync/src/mutex/tests.rs similarity index 98% rename from maitake/src/sync/mutex/tests.rs rename to maitake-sync/src/mutex/tests.rs index 12e2fb5c..813b7f65 100644 --- a/maitake/src/sync/mutex/tests.rs +++ b/maitake-sync/src/mutex/tests.rs @@ -1,5 +1,5 @@ use crate::loom::{self, future}; -use crate::sync::Mutex; +use crate::Mutex; #[test] fn basic_single_threaded() { diff --git a/maitake/src/sync/rwlock.rs b/maitake-sync/src/rwlock.rs similarity index 91% rename from maitake/src/sync/rwlock.rs rename to maitake-sync/src/rwlock.rs index f95931a3..88d9e573 100644 --- a/maitake/src/sync/rwlock.rs +++ b/maitake-sync/src/rwlock.rs @@ -4,9 +4,11 @@ //! //! [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock use super::semaphore::{self, Semaphore}; -use crate::loom::cell::{self, UnsafeCell}; +use crate::{ + loom::cell::{self, UnsafeCell}, + util::fmt, +}; use core::ops::{Deref, DerefMut}; -use mycelium_util::fmt; #[cfg(test)] mod tests; @@ -58,34 +60,30 @@ mod tests; /// # Examples /// /// ``` -/// use maitake::sync::RwLock; +/// use maitake_sync::RwLock; /// -/// async fn example() { -/// let lock = RwLock::new(5); +/// # async fn example() { +/// let lock = RwLock::new(5); /// -/// // many reader locks can be held at once -/// { -/// let r1 = lock.read().await; -/// let r2 = lock.read().await; -/// assert_eq!(*r1, 5); -/// assert_eq!(*r2, 5); -/// } // read locks are dropped at this point +/// // many reader locks can be held at once +/// { +/// let r1 = lock.read().await; +/// let r2 = lock.read().await; +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // read locks are dropped at this point /// -/// // only one write lock may be held, however -/// { -/// let mut w = lock.write().await; -/// *w += 1; -/// assert_eq!(*w, 6); -/// } // write lock is dropped here -/// } -/// -/// # use maitake::scheduler::Scheduler; -/// # let scheduler = std::sync::Arc::new(Scheduler::new()); -/// # scheduler.spawn(example()); -/// # scheduler.tick(); +/// // only one write lock may be held, however +/// { +/// let mut w = lock.write().await; +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // write lock is dropped here +/// # } +/// # futures::executor::block_on(example()); /// ``` /// -/// [`Mutex`]: crate::sync::Mutex +/// [`Mutex`]: crate::Mutex /// [`read`]: Self::read /// [`write`]: Self::write /// [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock @@ -172,7 +170,7 @@ impl RwLock { /// # Examples /// /// ``` - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// /// let lock = RwLock::new(5); /// # drop(lock) @@ -182,7 +180,7 @@ impl RwLock { /// initializers: /// /// ``` - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// /// static LOCK: RwLock = RwLock::new(5); /// ``` @@ -224,25 +222,26 @@ impl RwLock { /// # Examples /// /// ``` - /// # fn main() { + /// # #[tokio::main(flavor="current_thread")] + /// # async fn test() { /// # // since we are targeting no-std, it makes more sense to use `alloc` /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::scheduler::Scheduler; - /// use maitake::sync::RwLock; + /// # use tokio::task; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// - /// let scheduler = Arc::new(Scheduler::new()); - /// /// let lock = Arc::new(RwLock::new(1)); + /// /// // hold the lock for reading in `main`. /// let n = lock /// .try_read() /// .expect("read lock must be acquired, as the lock is unlocked"); /// assert_eq!(*n, 1); /// - /// scheduler.spawn({ + /// # let task2 = + /// task::spawn({ /// let lock = lock.clone(); /// async move { /// // While main has an active read lock, this task can acquire @@ -252,8 +251,9 @@ impl RwLock { /// } /// }); /// - /// scheduler.tick(); + /// # task2.await.unwrap(); /// # } + /// # test(); /// ``` /// /// [priority policy]: Self#priority-policy @@ -289,26 +289,26 @@ impl RwLock { /// # Examples /// /// ``` - /// # fn main() { + /// # #[tokio::main(flavor="current_thread")] + /// # async fn test() { /// # // since we are targeting no-std, it makes more sense to use `alloc` /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::scheduler::Scheduler; - /// use maitake::sync::RwLock; + /// # use tokio::task; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// - /// let scheduler = Arc::new(Scheduler::new()); - /// /// let lock = Arc::new(RwLock::new(1)); /// - /// scheduler.spawn(async move { + /// # let task = + /// task::spawn(async move { /// let mut guard = lock.write().await; /// *guard += 1; /// }); - /// - /// scheduler.tick(); + /// # task.await.unwrap() /// # } + /// # test(); /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { let _permit = self @@ -334,7 +334,7 @@ impl RwLock { /// # Examples /// /// ``` - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// /// let lock = RwLock::new(1); /// @@ -374,7 +374,7 @@ impl RwLock { /// # Examples /// /// ``` - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// /// let lock = RwLock::new(1); /// diff --git a/maitake/src/sync/rwlock/owned.rs b/maitake-sync/src/rwlock/owned.rs similarity index 96% rename from maitake/src/sync/rwlock/owned.rs rename to maitake-sync/src/rwlock/owned.rs index 0e67d929..7400cfb4 100644 --- a/maitake/src/sync/rwlock/owned.rs +++ b/maitake-sync/src/rwlock/owned.rs @@ -1,5 +1,5 @@ use super::*; -use crate::sync::Semaphore; +use crate::Semaphore; use alloc::sync::Arc; /// Owned [RAII] structure used to release the shared read access of a @@ -127,17 +127,16 @@ impl RwLock { /// # Examples /// /// ``` - /// # fn main() { + /// # use tokio::task; + /// # #[tokio::main(flavor = "current_thread")] + /// # async fn test() { /// # // since we are targeting no-std, it makes more sense to use `alloc` /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::scheduler::Scheduler; - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// - /// let scheduler = Arc::new(Scheduler::new()); - /// /// let lock = Arc::new(RwLock::new(1)); /// // hold the lock for reading in `main`. /// let n = lock @@ -145,7 +144,8 @@ impl RwLock { /// .expect("read lock must be acquired, as the lock is unlocked"); /// assert_eq!(*n, 1); /// - /// scheduler.spawn({ + /// # let task = + /// task::spawn({ /// let lock = lock.clone(); /// async move { /// // While main has an active read lock, this task can acquire @@ -154,9 +154,9 @@ impl RwLock { /// assert_eq!(*n, 1); /// } /// }); - /// - /// scheduler.tick(); + /// # task.await.unwrap(); /// # } + /// # test(); /// ``` /// /// [priority policy]: Self#priority-policy @@ -193,26 +193,26 @@ impl RwLock { /// # Examples /// /// ``` - /// # fn main() { + /// # use tokio::task; + /// # #[tokio::main(flavor = "current_thread")] + /// # async fn test() { /// # // since we are targeting no-std, it makes more sense to use `alloc` /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::scheduler::Scheduler; - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// - /// let scheduler = Arc::new(Scheduler::new()); - /// /// let lock = Arc::new(RwLock::new(1)); /// - /// scheduler.spawn(async move { + /// # let task = + /// task::spawn(async move { /// let mut guard = lock.write_owned().await; /// *guard += 1; /// }); - /// - /// scheduler.tick(); + /// # task.await.unwrap(); /// # } + /// # test(); /// ``` /// /// [guard]: OwnedRwLockWriteGuard @@ -246,7 +246,7 @@ impl RwLock { /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// /// let lock = Arc::new(RwLock::new(1)); @@ -293,7 +293,7 @@ impl RwLock { /// # // in these examples, rather than `std`...but i don't want to make /// # // the tests actually `#![no_std]`... /// # use std as alloc; - /// use maitake::sync::RwLock; + /// use maitake_sync::RwLock; /// use alloc::sync::Arc; /// /// let lock = Arc::new(RwLock::new(1)); diff --git a/maitake/src/sync/rwlock/tests.rs b/maitake-sync/src/rwlock/tests.rs similarity index 100% rename from maitake/src/sync/rwlock/tests.rs rename to maitake-sync/src/rwlock/tests.rs diff --git a/maitake/src/sync/rwlock/tests/loom.rs b/maitake-sync/src/rwlock/tests/loom.rs similarity index 100% rename from maitake/src/sync/rwlock/tests/loom.rs rename to maitake-sync/src/rwlock/tests/loom.rs diff --git a/maitake/src/sync/rwlock/tests/sequential.rs b/maitake-sync/src/rwlock/tests/sequential.rs similarity index 100% rename from maitake/src/sync/rwlock/tests/sequential.rs rename to maitake-sync/src/rwlock/tests/sequential.rs diff --git a/maitake/src/sync/semaphore.rs b/maitake-sync/src/semaphore.rs similarity index 97% rename from maitake/src/sync/semaphore.rs rename to maitake-sync/src/semaphore.rs index 88d80a82..ca5a2d09 100644 --- a/maitake/src/sync/semaphore.rs +++ b/maitake-sync/src/semaphore.rs @@ -12,8 +12,8 @@ use crate::{ spin::{Mutex, MutexGuard}, }, }, - sync::{self, WaitResult}, - util::WakeBatch, + util::{fmt, CachePadded, WakeBatch}, + WaitResult, }; use cordyceps::{ list::{self, List}, @@ -27,7 +27,6 @@ use core::{ ptr::{self, NonNull}, task::{Context, Poll, Waker}, }; -use mycelium_util::{fmt, sync::CachePadded}; use pin_project::{pin_project, pinned_drop}; #[cfg(test)] @@ -63,18 +62,22 @@ mod tests; /// Using a semaphore to limit concurrency: /// /// ``` +/// # use tokio::task; +/// # #[tokio::main(flavor = "current_thread")] +/// # async fn test() { /// # use std as alloc; -/// use maitake::{scheduler::Scheduler, sync::Semaphore}; +/// use maitake_sync::Semaphore; /// use alloc::sync::Arc; /// -/// let scheduler = Scheduler::new(); +/// # let mut tasks = Vec::new(); /// // Allow 4 tasks to run concurrently at a time. /// let semaphore = Arc::new(Semaphore::new(4)); /// /// for _ in 0..8 { /// // Clone the `Arc` around the semaphore. /// let semaphore = semaphore.clone(); -/// scheduler.spawn(async move { +/// # let t = +/// task::spawn(async move { /// // Acquire a permit from the semaphore, returning a RAII guard that /// // releases the permit back to the semaphore when dropped. /// // @@ -87,9 +90,11 @@ mod tests; /// /// // do some work... /// }); +/// # tasks.push(t); /// } -/// -/// scheduler.tick(); +/// # for task in tasks { task.await.unwrap() }; +/// # } +/// # test(); /// ``` /// /// A semaphore may also be used to cause a task to run once all of a set of @@ -101,20 +106,22 @@ mod tests; /// For example: /// /// ``` +/// # use tokio::task; +/// # #[tokio::main(flavor = "current_thread")] +/// # async fn test() { /// # use std as alloc; -/// use maitake::{scheduler::Scheduler, sync::Semaphore}; +/// use maitake_sync::Semaphore; /// use alloc::sync::Arc; /// /// // How many tasks will we be waiting for the completion of? /// const TASKS: usize = 4; /// -/// let scheduler = Scheduler::new(); -/// /// // Create the semaphore with 0 permits. /// let semaphore = Arc::new(Semaphore::new(0)); /// /// // Spawn the "B" task that will wait for the 4 "A" tasks to complete. -/// scheduler.spawn({ +/// # let b_task = +/// task::spawn({ /// let semaphore = semaphore.clone(); /// async move { /// println!("Task B starting..."); @@ -132,9 +139,11 @@ mod tests; /// } /// }); /// +/// # let mut tasks = Vec::new(); /// for i in 0..TASKS { /// let semaphore = semaphore.clone(); -/// scheduler.spawn(async move { +/// # let t = +/// task::spawn(async move { /// println!("Task A {i} starting..."); /// /// // Add a single permit to the semaphore. Once all 4 tasks have @@ -146,9 +155,13 @@ mod tests; /// /// println!("Task A {i} done"); /// }); +/// # tasks.push(t); /// } /// -/// scheduler.tick(); +/// # for t in tasks { t.await.unwrap() }; +/// # b_task.await.unwrap(); +/// # } +/// # test(); /// ``` /// /// [counting semaphore]: https://en.wikipedia.org/wiki/Semaphore_(programming) @@ -322,21 +335,14 @@ impl Semaphore { /// were requested. If an [`Acquire`] future is dropped before it completes, /// the task will lose its place in the queue. /// - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [closed]: Semaphore::close pub fn acquire(&self, permits: usize) -> Acquire<'_> { Acquire { semaphore: self, queued: false, permits, - waiter: Waiter { - node: UnsafeCell::new(Node { - links: list::Links::new(), - waker: None, - _pin: PhantomPinned, - }), - remaining_permits: RemainingPermits(AtomicUsize::new(permits)), - }, + waiter: Waiter::new(permits), } } @@ -375,7 +381,7 @@ impl Semaphore { /// - `Err(`[`TryAcquireError::InsufficientPermits`]`)` if the semaphore had /// fewer than `permits` permits available. /// - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [closed]: Semaphore::close pub fn try_acquire(&self, permits: usize) -> Result, TryAcquireError> { trace!(permits, "Semaphore::try_acquire"); @@ -431,7 +437,7 @@ impl Semaphore { let mut waiters = loop { // semaphore has closed if sem_curr == Self::CLOSED { - return sync::closed(); + return crate::closed(); } // the total number of permits currently available to this waiter @@ -519,7 +525,7 @@ impl Semaphore { queued, "Semaphore::poll_acquire -> semaphore closed" ); - return sync::closed(); + return crate::closed(); } // add permits to the waiter, returning whether we added enough to wake @@ -861,7 +867,7 @@ feature! { /// completes, the task will lose its place in the queue. /// /// [`acquire`]: Semaphore::acquire - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [closed]: Semaphore::close pub fn acquire_owned(self: &Arc, permits: usize) -> AcquireOwned { AcquireOwned { @@ -891,7 +897,7 @@ feature! { /// /// /// [`try_acquire`]: Semaphore::try_acquire - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [closed]: Semaphore::close pub fn try_acquire_owned(self: &Arc, permits: usize) -> Result { trace!(permits, "Semaphore::try_acquire_owned"); diff --git a/maitake/src/sync/semaphore/tests.rs b/maitake-sync/src/semaphore/tests.rs similarity index 89% rename from maitake/src/sync/semaphore/tests.rs rename to maitake-sync/src/semaphore/tests.rs index 9d53b4cf..0cc68dc7 100644 --- a/maitake/src/sync/semaphore/tests.rs +++ b/maitake-sync/src/semaphore/tests.rs @@ -13,7 +13,7 @@ fn permit_is_send_and_sync() { #[test] fn acquire_is_send_and_sync() { - util::test::assert_send_sync::>(); + util::test::assert_send_sync::>(); } #[cfg(feature = "alloc")] diff --git a/maitake/src/sync/semaphore/tests/alloc_tests.rs b/maitake-sync/src/semaphore/tests/alloc_tests.rs similarity index 69% rename from maitake/src/sync/semaphore/tests/alloc_tests.rs rename to maitake-sync/src/semaphore/tests/alloc_tests.rs index 39eef71d..82c8edda 100644 --- a/maitake/src/sync/semaphore/tests/alloc_tests.rs +++ b/maitake-sync/src/semaphore/tests/alloc_tests.rs @@ -1,13 +1,11 @@ use super::*; -use crate::scheduler::Scheduler; use alloc::sync::Arc; -use core::sync::atomic::{AtomicBool, AtomicUsize}; +use core::sync::atomic::AtomicUsize; -#[test] -fn basic_concurrency_limit() { +#[tokio::test] +async fn basic_concurrency_limit() { const TASKS: usize = 8; const CONCURRENCY_LIMIT: usize = 4; - let scheduler = Scheduler::new(); let semaphore = Arc::new(Semaphore::new(CONCURRENCY_LIMIT)); let running = Arc::new(AtomicUsize::new(0)); let completed = Arc::new(AtomicUsize::new(0)); @@ -16,14 +14,14 @@ fn basic_concurrency_limit() { let semaphore = semaphore.clone(); let running = running.clone(); let completed = completed.clone(); - scheduler.spawn(async move { + tokio::spawn(async move { let permit = semaphore .acquire(1) .await .expect("semaphore will not be closed"); assert!(test_dbg!(running.fetch_add(1, Relaxed)) < CONCURRENCY_LIMIT); - crate::future::yield_now().await; + tokio::task::yield_now().await; drop(permit); assert!(test_dbg!(running.fetch_sub(1, Relaxed)) <= CONCURRENCY_LIMIT); @@ -32,26 +30,23 @@ fn basic_concurrency_limit() { } while completed.load(Relaxed) < TASKS { - scheduler.tick(); assert!(test_dbg!(running.load(Relaxed)) <= CONCURRENCY_LIMIT); + tokio::task::yield_now().await; } } -#[test] -fn countdown() { +#[tokio::test] +async fn countdown() { const TASKS: usize = 4; let _trace = crate::util::trace_init(); - let scheduler = Scheduler::new(); let semaphore = Arc::new(Semaphore::new(0)); let a_done = Arc::new(AtomicUsize::new(0)); - let b_done = Arc::new(AtomicBool::new(false)); - scheduler.spawn({ + let b = tokio::spawn({ let semaphore = semaphore.clone(); - let b_done = b_done.clone(); let a_done = a_done.clone(); async move { - info!("Task B starting..."); + tracing::info!("Task B starting..."); // Since the semaphore is created with 0 permits, this will // wait until all 4 "A" tasks have completed. @@ -63,31 +58,26 @@ fn countdown() { // ... do some work ... - info!("Task B done!"); - b_done.store(true, Relaxed); + tracing::info!("Task B done!"); } }); for i in 0..TASKS { let semaphore = semaphore.clone(); let a_done = a_done.clone(); - scheduler.spawn(async move { - info!("Task A {i} starting..."); + tokio::spawn(async move { + tracing::info!("Task A {i} starting..."); - crate::future::yield_now().await; + tokio::task::yield_now().await; a_done.fetch_add(1, Relaxed); semaphore.add_permits(1); // ... do some work ... - info!("Task A {i} done"); + tracing::info!("Task A {i} done"); }); } - while !b_done.load(Relaxed) { - scheduler.tick(); - } - + b.await.unwrap(); assert_eq!(a_done.load(Relaxed), TASKS); - assert!(b_done.load(Relaxed)); } diff --git a/maitake/src/sync/semaphore/tests/loom.rs b/maitake-sync/src/semaphore/tests/loom.rs similarity index 100% rename from maitake/src/sync/semaphore/tests/loom.rs rename to maitake-sync/src/semaphore/tests/loom.rs diff --git a/maitake-sync/src/spin.rs b/maitake-sync/src/spin.rs new file mode 100644 index 00000000..66ef22dd --- /dev/null +++ b/maitake-sync/src/spin.rs @@ -0,0 +1,37 @@ +//! Synchronous spinning-based synchronization primitives. +//! +//! The synchronization primitives in `maitake-sync` are _asynchronous_. They +//! are designed to be used with [`core::task`] and [`core::future`], and when +//! it is necessary to wait for another task to complete some work for the +//! current task to proceed, `maitake`'s synchronization primitives wait by +//! *yielding* to the asynchronous task scheduler to allow another task to +//! proceed. +//! +//! This module, on the other hand, provides _synchronous_ (or _blocking_) +//! synchronization primitives. Rather than yielding to the runtime, these +//! synchronization primitives will block the current CPU core (or thread, if +//! running in an environment with threads) until they are woken by other cores. +//! This is performed by *spinning*: issuing yield or pause instructions in a +//! loop until some value changes. These synchronization primitives are, in some +//! cases, necessary to implement the async synchronization primitives that form +//! `maitake-sync`'s core APIs. They are also exposed publicly so they can be +//! used in other projects, when a spinlock-based synchronization primitive is +//! needed. +//! +//! This module provides the following APIs: +//! +//! - [`Mutex`]: a synchronous [mutual exclusion] spinlock. +//! - [`InitOnce`]: a cell storing a [`MaybeUninit`](core::mem::MaybeUninit) +//! value which must be manually initialized prior to use. +//! - [`Lazy`]: an [`InitOnce`] cell coupled with an initializer function. The +//! [`Lazy`] cell ensures the initializer is called to initialize the +//! value the first time it is accessed. +//! +//! [mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion +mod mutex; +pub mod once; + +pub use self::{ + mutex::*, + once::{InitOnce, Lazy}, +}; diff --git a/maitake-sync/src/spin/mutex.rs b/maitake-sync/src/spin/mutex.rs new file mode 100644 index 00000000..84b0b566 --- /dev/null +++ b/maitake-sync/src/spin/mutex.rs @@ -0,0 +1,261 @@ +use crate::{ + loom::{ + cell::{MutPtr, UnsafeCell}, + sync::atomic::{AtomicBool, Ordering::*}, + }, + util::Backoff, +}; +use core::{ + fmt, + ops::{Deref, DerefMut}, +}; + +/// A spinlock-based mutual exclusion lock for protecting shared data +/// +/// This mutex will spin with an exponential backoff while waiting for the lock +/// to become available. Each mutex has a type parameter which represents +/// the data that it is protecting. The data can only be accessed through the +/// RAII guards returned from [`lock`] and [`try_lock`], which guarantees that +/// the data is only ever accessed when the mutex is locked. +/// +/// # Fairness +/// +/// This is *not* a fair mutex. +/// +/// # Loom-specific behavior +/// +/// When `cfg(loom)` is enabled, this mutex will use Loom's simulated atomics, +/// checked `UnsafeCell`, and simulated spin loop hints. +/// +/// [`lock`]: Mutex::lock +/// [`try_lock`]: Mutex::try_lock +#[derive(Debug)] +pub struct Mutex { + locked: AtomicBool, + data: UnsafeCell, +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// [`Deref`] and [`DerefMut`] implementations. +/// +/// This structure is created by the [`lock`] and [`try_lock`] methods on +/// [`Mutex`]. +/// +/// [`lock`]: Mutex::lock +/// [`try_lock`]: Mutex::try_lock +pub struct MutexGuard<'a, T> { + ptr: MutPtr, + locked: &'a AtomicBool, +} + +impl Mutex { + loom_const_fn! { + /// Returns a new `Mutex` protecting the provided `data`. + /// + /// The returned `Mutex` is in an unlocked state, ready for use. + /// + /// # Examples + /// + /// ``` + /// use maitake_sync::spin::Mutex; + /// + /// let mutex = Mutex::new(0); + /// ``` + #[must_use] + pub fn new(data: T) -> Self { + Self { + locked: AtomicBool::new(false), + data: UnsafeCell::new(data), + } + } + } + + /// Attempts to acquire this lock without spinning + /// + /// If the lock could not be acquired at this time, then [`None`] is returned. + /// Otherwise, an RAII guard is returned. The lock will be unlocked when the + /// guard is dropped. + /// + /// This function will never spin. + #[must_use] + #[cfg_attr(test, track_caller)] + pub fn try_lock(&self) -> Option> { + if test_dbg!(self + .locked + .compare_exchange(false, true, Acquire, Acquire) + .is_ok()) + { + Some(MutexGuard { + ptr: self.data.get_mut(), + locked: &self.locked, + }) + } else { + None + } + } + + /// Acquires a mutex, spinning until it is locked. + /// + /// This function will spin until the mutex is available to lock. Upon + /// returning, the thread is the only thread with the lock + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. + #[cfg_attr(test, track_caller)] + pub fn lock(&self) -> MutexGuard<'_, T> { + let mut boff = Backoff::default(); + while test_dbg!(self + .locked + .compare_exchange(false, true, Acquire, Acquire) + .is_err()) + { + while test_dbg!(self.locked.load(Relaxed)) { + boff.spin(); + } + } + + MutexGuard { + ptr: self.data.get_mut(), + locked: &self.locked, + } + } + + /// Forcibly unlock the mutex. + /// + /// If a lock is currently held, it will be released, regardless of who's + /// holding it. Of course, this is **outrageously, disgustingly unsafe** and + /// you should never do it. + /// + /// # Safety + /// + /// This deliberately violates mutual exclusion. + /// + /// Only call this method when it is _guaranteed_ that no stack frame that + /// has previously locked the mutex will ever continue executing. + /// Essentially, this is only okay to call when the kernel is oopsing and + /// all code running on other cores has already been killed. + pub unsafe fn force_unlock(&self) { + self.locked.store(false, Release); + } +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +// === impl MutexGuard === + +impl<'a, T> Deref for MutexGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { + // Safety: we are holding the lock, so it is okay to dereference the + // mut pointer. + &*self.ptr.deref() + } + } +} + +impl<'a, T> DerefMut for MutexGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { + // Safety: we are holding the lock, so it is okay to dereference the + // mut pointer. + self.ptr.deref() + } + } +} + +impl<'a, T, R: ?Sized> AsRef for MutexGuard<'a, T> +where + T: AsRef, +{ + #[inline] + fn as_ref(&self) -> &R { + self.deref().as_ref() + } +} + +impl<'a, T, R: ?Sized> AsMut for MutexGuard<'a, T> +where + T: AsMut, +{ + #[inline] + fn as_mut(&mut self) -> &mut R { + self.deref_mut().as_mut() + } +} + +impl<'a, T> Drop for MutexGuard<'a, T> { + fn drop(&mut self) { + test_dbg!(self.locked.store(false, Release)); + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(f) + } +} + +impl<'a, T: fmt::Display> fmt::Display for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(f) + } +} + +#[cfg(test)] +mod tests { + use crate::loom::{self, thread}; + use std::prelude::v1::*; + use std::sync::Arc; + + use super::*; + + #[test] + fn multithreaded() { + loom::model(|| { + let mutex = Arc::new(Mutex::new(String::new())); + let mutex2 = mutex.clone(); + + let t1 = thread::spawn(move || { + tracing::info!("t1: locking..."); + let mut lock = mutex2.lock(); + tracing::info!("t1: locked"); + lock.push_str("bbbbb"); + tracing::info!("t1: dropping..."); + }); + + { + tracing::info!("t2: locking..."); + let mut lock = mutex.lock(); + tracing::info!("t2: locked"); + lock.push_str("bbbbb"); + tracing::info!("t2: dropping..."); + } + t1.join().unwrap(); + }); + } + + #[test] + fn try_lock() { + loom::model(|| { + let mutex = Mutex::new(42); + // First lock succeeds + let a = mutex.try_lock(); + assert_eq!(a.as_ref().map(|r| **r), Some(42)); + + // Additional lock failes + let b = mutex.try_lock(); + assert!(b.is_none()); + + // After dropping lock, it succeeds again + ::core::mem::drop(a); + let c = mutex.try_lock(); + assert_eq!(c.as_ref().map(|r| **r), Some(42)); + }); + } +} diff --git a/util/src/sync/once.rs b/maitake-sync/src/spin/once.rs similarity index 97% rename from util/src/sync/once.rs rename to maitake-sync/src/spin/once.rs index a9062cdd..79aaa105 100644 --- a/util/src/sync/once.rs +++ b/maitake-sync/src/spin/once.rs @@ -2,18 +2,14 @@ //! //! This module provides: //! -//! - [`InitOnce`]: a cell storing a [`CheckedMaybeUninit`] value which must be -//! manually initialized prior to use. +//! - [`InitOnce`]: a cell storing a [`MaybeUninit`](core::mem::MaybeUninit) +//! value which must be manually initialized prior to use. //! - [`Lazy`]: an [`InitOnce`] cell coupled with an initializer function. The //! [`Lazy`] cell ensures the initializer is called to initialize the //! value the first time it is accessed. use crate::{ - mem::CheckedMaybeUninit, - sync::{ - atomic::{AtomicU8, Ordering}, - spin::Backoff, - }, - unreachable_unchecked, + loom::sync::atomic::{AtomicU8, Ordering}, + util::{Backoff, CheckedMaybeUninit}, }; use core::{ any, @@ -407,4 +403,5 @@ impl fmt::Display for TryInitError { } } -impl crate::error::Error for TryInitError {} +#[cfg(feature = "core-error")] +impl core::error::Error for TryInitError {} diff --git a/maitake-sync/src/util.rs b/maitake-sync/src/util.rs new file mode 100644 index 00000000..031217ab --- /dev/null +++ b/maitake-sync/src/util.rs @@ -0,0 +1,267 @@ +//! Reusable utilities for synchronization primitives. +//! +//! This module contains utility code used in the implementation of the +//! synchronization primitives provided by `maitake-sync`. To enable code reuse, +//! some of these utilities are exposed as public APIs in this module, so that +//! projects depending on `maitake-sync` can use them as well. +//! +//! This module exposes the following APIs: +//! +//! - [`Backoff`]: exponential backoff for spin loops +//! - [`CachePadded`]: pads and aligns a value to the size of a cache line +mod backoff; +mod cache_pad; +pub(crate) mod fmt; +mod maybe_uninit; +mod wake_batch; + +pub use self::{backoff::Backoff, cache_pad::CachePadded}; +pub(crate) use self::{maybe_uninit::CheckedMaybeUninit, wake_batch::WakeBatch}; + +#[cfg(any(test, feature = "tracing"))] +macro_rules! trace { + ($($t:tt)*) => { tracing::trace!($($t)*) } +} + +#[cfg(not(any(test, feature = "tracing")))] +macro_rules! trace { + ($($t:tt)*) => {}; +} + +#[cfg(all(not(test), not(all(maitake_ultraverbose, feature = "tracing"))))] +macro_rules! test_dbg { + ($e:expr) => { + $e + }; +} + +#[cfg(any(test, all(maitake_ultraverbose, feature = "tracing")))] +macro_rules! test_dbg { + ($e:expr) => { + match $e { + e => { + tracing::debug!( + location = %core::panic::Location::caller(), + "{} = {:?}", + stringify!($e), + &e + ); + e + } + } + }; +} + +#[cfg(all(not(test), not(all(maitake_ultraverbose, feature = "tracing"))))] +macro_rules! test_debug { + ($($t:tt)*) => {}; +} + +#[cfg(any(test, all(maitake_ultraverbose, feature = "tracing")))] +macro_rules! test_debug { + ($($t:tt)*) => { tracing::debug!($($t)*) } +} + +#[cfg(all(not(test), not(all(maitake_ultraverbose, feature = "tracing"))))] +macro_rules! test_trace { + ($($t:tt)*) => {}; +} + +#[cfg(any(test, all(maitake_ultraverbose, feature = "tracing")))] +macro_rules! test_trace { + ($($t:tt)*) => { tracing::trace!($($t)*) } +} + +#[cfg(all(not(test), not(all(maitake_ultraverbose, feature = "tracing"))))] +macro_rules! enter_test_debug_span { + ($($args:tt)+) => {}; +} + +#[cfg(any(test, all(maitake_ultraverbose, feature = "tracing")))] +macro_rules! enter_test_debug_span { + ($($args:tt)+) => { + let _span = tracing::debug_span!($($args)+).entered(); + }; +} + +macro_rules! fmt_bits { + ($self: expr, $f: expr, $has_states: ident, $($name: ident),+) => { + $( + if $self.contains(Self::$name) { + if $has_states { + $f.write_str(" | ")?; + } + $f.write_str(stringify!($name))?; + $has_states = true; + } + )+ + + }; +} + +macro_rules! feature { + ( + #![$meta:meta] + $($item:item)* + ) => { + $( + #[cfg($meta)] + #[cfg_attr(docsrs, doc(cfg($meta)))] + $item + )* + } +} + +macro_rules! loom_const_fn { + ( + $(#[$meta:meta])* + $vis:vis unsafe fn $name:ident($($arg:ident: $T:ty),*) -> $Ret:ty $body:block + ) => { + $(#[$meta])* + #[cfg(not(loom))] + $vis const unsafe fn $name($($arg: $T),*) -> $Ret $body + + $(#[$meta])* + #[cfg(loom)] + $vis unsafe fn $name($($arg: $T),*) -> $Ret $body + }; + ( + $(#[$meta:meta])* + $vis:vis fn $name:ident($($arg:ident: $T:ty),*) -> $Ret:ty $body:block + ) => { + $(#[$meta])* + #[cfg(not(loom))] + $vis const fn $name($($arg: $T),*) -> $Ret $body + + $(#[$meta])* + #[cfg(loom)] + $vis fn $name($($arg: $T),*) -> $Ret $body + } +} + +/// Indicates unreachable code that we are confident is *truly* unreachable. +/// +/// This is essentially a compromise between `core::unreachable!()` and +/// `core::hint::unreachable_unchecked()`. In debug mode builds and in tests, +/// this expands to `unreachable!()`, causing a panic. However, in release mode +/// non-test builds, this expands to `unreachable_unchecked`. Thus, this is a +/// somewhat safer form of `unreachable_unchecked` that will allow cases where +/// `unreachable_unchecked` would be invalid to be detected early. +/// +/// Nonetheless, this must still be used with caution! If code is not adequately +/// tested, it is entirely possible for the `unreachable_unchecked` to be +/// reached in a scenario that was not reachable in tests. +macro_rules! unreachable_unchecked { + () => ({ + #[cfg(any(test, debug_assertions))] + panic!( + concat!( + env!("CARGO_PKG_NAME"), + "internal error: entered unreachable code \n", + "/!\\ EXTREMELY SERIOUS WARNING: in release mode, this would have been\n", + " `unreachable_unchecked`! This could result in undefine behavior.\n", + " Please double- or triple-check any assumptions about code which\n,", + " could lead to this being triggered." + ), + ); + #[allow(unreachable_code)] // lol + { + core::hint::unreachable_unchecked(); + } + }); + ($msg:expr) => ({ + unreachable_unchecked!("{}", $msg) + }); + ($msg:expr,) => ({ + unreachable_unchecked!($msg) + }); + ($fmt:expr, $($arg:tt)*) => ({ + #[cfg(any(test, debug_assertions))] + panic!( + concat!( + env!("CARGO_PKG_NAME"), + "internal error: entered unreachable code: ", + $fmt, + "\n/!\\ EXTREMELY SERIOUS WARNING: in release mode, this would have been \n\ + \x32 `unreachable_unchecked`! This could result in undefine behavior. \n\ + \x32 Please double- or triple-check any assumptions about code which \n\ + \x32 could lead to this being triggered." + ), + $($arg)* + ); + #[allow(unreachable_code)] // lol + { + core::hint::unreachable_unchecked(); + } + }); +} + +#[cfg(all(test, not(loom)))] +pub(crate) use self::test::trace_init; + +#[cfg(test)] +pub(crate) mod test { + /// A guard that represents the tracing default subscriber guard + /// + /// *should* be held until the end of the test, to ensure that tracing messages + /// actually make it to the fmt subscriber for the entire test. + /// + /// Exists to abstract over tracing 01/02 guard type differences. + #[must_use] + #[cfg(all(test, not(loom)))] + pub struct TestGuard { + _x1: tracing::subscriber::DefaultGuard, + } + + /// Initialize tracing with a default filter directive + /// + /// Returns a [TestGuard] that must be held for the duration of test to ensure + /// tracing messages are correctly output + + #[cfg(all(test, not(loom)))] + pub(crate) fn trace_init() -> TestGuard { + trace_init_with_default("maitake=debug,cordyceps=debug") + } + + /// Initialize tracing with the given filter directive + /// + /// Returns a [TestGuard] that must be held for the duration of test to ensure + /// tracing messages are correctly output + #[cfg(all(test, not(loom)))] + pub(crate) fn trace_init_with_default(default: &str) -> TestGuard { + use tracing_subscriber::{ + filter::{EnvFilter, LevelFilter}, + util::SubscriberInitExt, + }; + const ENV: &str = if cfg!(loom) { "LOOM_LOG" } else { "RUST_LOG" }; + + let env = std::env::var(ENV).unwrap_or_default(); + let builder = EnvFilter::builder().with_default_directive(LevelFilter::INFO.into()); + let filter = if env.is_empty() { + builder + .parse(default) + .unwrap() + // enable "loom=info" if using the default, so that we get + // loom's thread number and iteration count traces. + .add_directive("loom=info".parse().unwrap()) + } else { + builder.parse_lossy(env) + }; + let collector = tracing_subscriber::fmt() + .with_env_filter(filter) + .with_test_writer() + .without_time() + .finish(); + + TestGuard { + _x1: collector.set_default(), + } + } + + #[allow(dead_code)] + pub(crate) fn assert_send() {} + + #[allow(dead_code)] + pub(crate) fn assert_sync() {} + pub(crate) fn assert_send_sync() {} +} diff --git a/maitake-sync/src/util/backoff.rs b/maitake-sync/src/util/backoff.rs new file mode 100644 index 00000000..2c49976f --- /dev/null +++ b/maitake-sync/src/util/backoff.rs @@ -0,0 +1,70 @@ +use crate::loom::hint; + +/// An [exponential backoff] for spin loops. +/// +/// This is a helper struct for spinning in a busy loop, with an exponentially +/// increasing number of spins up to a maximum value. +/// +/// [exponential backoff]: https://en.wikipedia.org/wiki/Exponential_backoff +#[derive(Debug, Copy, Clone)] +pub struct Backoff { + exp: u8, + max: u8, +} + +// === impl Backoff === + +impl Backoff { + /// The default maximum exponent (2^8). + /// + /// This is the maximum exponent returned by [`Backoff::new()`] and + /// [`Backoff::default()`]. To override the maximum exponent, use + /// [`Backoff::with_max_exponent()`]. + pub const DEFAULT_MAX_EXPONENT: u8 = 8; + + /// Returns a new exponential backoff with the maximum exponent set to + /// [`Self::DEFAULT_MAX_EXPONENT`]. + #[must_use] + pub const fn new() -> Self { + Self { + exp: 0, + max: Self::DEFAULT_MAX_EXPONENT, + } + } + + /// Returns a new exponential backoff with the provided max exponent. + #[must_use] + pub fn with_max_exponent(max: u8) -> Self { + assert!(max <= Self::DEFAULT_MAX_EXPONENT); + Self { exp: 0, max } + } + + /// Backs off in a spin loop. + /// + /// This should be used when an operation needs to be retried because + /// another thread or core made progress. Depending on the target + /// architecture, this will generally issue a sequence of `yield` or `pause` + /// instructions. + /// + /// Each time this function is called, it will issue `2^exp` [spin loop + /// hints], where `exp` is the current exponent value (starting at 0). If + /// `exp` is less than the configured maximum exponent, the exponent is + /// incremented once the spin is complete. + #[inline(always)] + pub fn spin(&mut self) { + // Issue 2^exp pause instructions. + for _ in 0..1 << self.exp { + hint::spin_loop(); + } + + if self.exp < self.max { + self.exp += 1 + } + } +} + +impl Default for Backoff { + fn default() -> Self { + Self::new() + } +} diff --git a/maitake-sync/src/util/cache_pad.rs b/maitake-sync/src/util/cache_pad.rs new file mode 100644 index 00000000..bde21b6e --- /dev/null +++ b/maitake-sync/src/util/cache_pad.rs @@ -0,0 +1,108 @@ +use core::{ + fmt, + ops::{Deref, DerefMut}, +}; + +pub use self::inner::CachePadded; + +/// When configured not to pad to cache alignment, just provide a no-op wrapper struct +/// This feature is useful for platforms with no data cache, such as many Cortex-M +/// targets. +#[cfg(feature = "no-cache-pad")] +mod inner { + /// Aligns the wrapped value to the size of a cache line. + /// + /// This is used to avoid [false sharing] for values that may be + /// accessed concurrently. + /// + /// # Size/Alignment + /// + /// The size and alignment of this type depends on the target architecture, + /// and on whether or not the `no-cache-pad` feature flag is enabled. + /// + /// When the `no-cache-pad` crate feature flag is enabled, this is simply a + /// no-op wrapper struct. This is intended for use on useful for platforms + /// with no data cache, such as many Cortex-M targets. + /// + /// In other cases, this type is always aligned to the size of a cache line, + /// based on the target architecture. On `x86_64`/`aarch64`, a cache line is + /// 128 bytes. On all other targets, a cache line is assumed to 64 bytes + /// long. This type's size will always be a multiple of the cache line size; + /// if the wrapped type is longer than the alignment of a cache line, then + /// this type will be padded to multiple cache lines. + /// + /// [false sharing]: https://en.wikipedia.org/wiki/False_sharing + #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] + pub struct CachePadded(pub(super) T); +} + +/// When not inhibited, determine cache alignment based on target architecture. +/// Align to 128 bytes on 64-bit x86/ARM targets, otherwise align to 64 bytes. +#[cfg(not(feature = "no-cache-pad"))] +mod inner { + /// Aligns the wrapped value to the size of a cache line. + /// + /// This is used to avoid [false sharing] for values that may be + /// accessed concurrently. + /// + /// # Size/Alignment + /// + /// The size and alignment of this type depends on the target architecture, + /// and on whether or not the `no-cache-pad` feature flag is enabled. + /// + /// When the `no-cache-pad` crate feature flag is enabled, this is simply a + /// no-op wrapper struct. This is intended for use on useful for platforms + /// with no data cache, such as many Cortex-M targets. + /// + /// In other cases, this type is always aligned to the size of a cache line, + /// based on the target architecture. On `x86_64`/`aarch64`, a cache line is + /// 128 bytes. On all other targets, a cache line is assumed to 64 bytes + /// long. This type's size will always be a multiple of the cache line size; + /// if the wrapped type is longer than the alignment of a cache line, then + /// this type will be padded to multiple cache lines. + /// + /// [false sharing]: https://en.wikipedia.org/wiki/False_sharing + #[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))] + #[cfg_attr( + not(any(target_arch = "x86_64", target_arch = "aarch64")), + repr(align(64)) + )] + #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] + pub struct CachePadded(pub(super) T); +} + +// === impl CachePadded === + +impl CachePadded { + /// Pads `value` to the length of a cache line. + pub const fn new(value: T) -> Self { + Self(value) + } + + /// Unwraps the inner value and returns it. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl Deref for CachePadded { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.0 + } +} + +impl DerefMut for CachePadded { + #[inline] + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } +} + +impl fmt::Debug for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} diff --git a/maitake-sync/src/util/fmt.rs b/maitake-sync/src/util/fmt.rs new file mode 100644 index 00000000..e42da133 --- /dev/null +++ b/maitake-sync/src/util/fmt.rs @@ -0,0 +1,88 @@ +pub use core::fmt::*; + +/// A wrapper type that formats the wrapped value using a provided function. +/// +/// This is used to implement the `ptr` and `display` util functions. +pub(crate) struct FormatWith) -> Result> +where + F: Fn(&T, &mut Formatter<'_>) -> Result, +{ + value: T, + fmt: F, +} + +#[derive(Clone)] +pub(crate) struct FmtOption<'a, T> { + opt: Option<&'a T>, + or_else: &'a str, +} + +// === impl FormatWith === + +#[cfg(any(test, feature = "tracing"))] +#[inline] +#[must_use] +pub(crate) fn ptr(value: T) -> FormatWith { + FormatWith { + value, + fmt: Pointer::fmt, + } +} + +#[inline] +#[must_use] +pub(crate) fn display(value: T) -> FormatWith { + FormatWith { + value, + fmt: Display::fmt, + } +} + +impl Debug for FormatWith +where + F: Fn(&T, &mut Formatter<'_>) -> Result, +{ + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + (self.fmt)(&self.value, f) + } +} + +// === impl FmtOption ==== + +#[must_use] +#[inline] +pub(crate) const fn opt(value: &Option) -> FmtOption<'_, T> { + FmtOption { + opt: value.as_ref(), + or_else: "", + } +} + +impl<'a, T> FmtOption<'a, T> { + #[must_use] + #[inline] + pub(crate) fn or_else(self, or_else: &'a str) -> Self { + Self { or_else, ..self } + } +} + +impl Debug for FmtOption<'_, T> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + match self.opt { + Some(val) => val.fmt(f), + None => f.write_str(self.or_else), + } + } +} + +impl Display for FmtOption<'_, T> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + match self.opt { + Some(val) => val.fmt(f), + None => f.write_str(self.or_else), + } + } +} diff --git a/maitake-sync/src/util/maybe_uninit.rs b/maitake-sync/src/util/maybe_uninit.rs new file mode 100644 index 00000000..1b2ebf66 --- /dev/null +++ b/maitake-sync/src/util/maybe_uninit.rs @@ -0,0 +1,435 @@ +#![allow(dead_code)] // most of this isn't used yet... +use core::{fmt, mem::MaybeUninit}; + +/// A checked version of [`core::mem::MaybeUninit`]. +/// +/// This is similar to [`core::mem::MaybeUninit`] in release builds. In debug +/// mode builds, it additionally stores a flag tracking whether the value is +/// initialized, and asserts that the cell is initialized when it is accessed. +/// +/// # Differences from `MaybeUninit` +/// +/// This type is **not** capable of tracking initialization of +/// partially-initialized values, so it lacks `core::mem::MaybeUninit`'s array +/// and slice methods. Additionally, it does not implement a version of +/// [`MaybeUninit::zeroed`], because it does not know whether a zeroed `T` is +/// valid or not. +pub struct CheckedMaybeUninit { + value: MaybeUninit, + #[cfg(debug_assertions)] + initialized: bool, +} + +impl CheckedMaybeUninit { + /// Creates a new `CheckedMaybeUninit` initialized with the given value. + /// It is safe to call [`assume_init`] on the return value of this function. + /// + /// Note that dropping a `CheckedMaybeUninit` will never call `T`'s drop code. + /// It is your responsibility to make sure `T` gets dropped if it got initialized. + /// + /// [`assume_init`]: Self::assume_init + #[must_use = "use `forget` to avoid running Drop code"] + #[inline(always)] + pub const fn new(val: T) -> Self { + Self { + value: MaybeUninit::new(val), + #[cfg(debug_assertions)] + initialized: true, + } + } + + /// Creates a new `CheckedMaybeUninit` in an uninitialized state. + /// + /// Note that dropping a `CheckedMaybeUninit` will never call `T`'s drop code. + /// It is your responsibility to make sure `T` gets dropped if it got initialized. + /// + /// See the [type-level documentation][CheckedMaybeUninit] for some examples. + #[must_use] + #[inline(always)] + pub const fn uninit() -> Self { + Self { + value: MaybeUninit::uninit(), + #[cfg(debug_assertions)] + initialized: false, + } + } + + /// Sets the value of the `CheckedMaybeUninit`. + /// + /// This overwrites any previous value without dropping it, so be careful + /// not to use this twice unless you want to skip running the destructor. + /// For your convenience, this also returns a mutable reference to the + /// (now safely initialized) contents of `self`. + /// + /// As the content is stored inside a `CheckedMaybeUninit`, the destructor is not + /// run for the inner data if the MaybeUninit leaves scope without a call to + /// [`assume_init`], [`assume_init_drop`], or similar. Code that receives + /// the mutable reference returned by this function needs to keep this in + /// mind. The safety model of Rust regards leaks as safe, but they are + /// usually still undesirable. This being said, the mutable reference + /// behaves like any other mutable reference would, so assigning a new value + /// to it will drop the old content. + /// + /// [`assume_init`]: Self::assume_init + /// [`assume_init_drop`]: Self::assume_init_drop + #[inline(always)] + pub fn write(&mut self, val: T) -> &mut T { + self.init().write(val) + } + + /// Gets a pointer to the contained value. Reading from this pointer or turning it + /// into a reference is undefined behavior unless the `CheckedMaybeUninit` is initialized. + /// Writing to memory that this pointer (non-transitively) points to is undefined behavior + /// (except inside an `UnsafeCell`). + #[inline(always)] + #[track_caller] + pub fn as_ptr(&self) -> *const T { + self.assert_init("as_ptr").as_ptr() + } + + /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it + /// into a reference is undefined behavior unless the `CheckedMaybeUninit` is initialized. + #[inline(always)] + #[track_caller] + pub fn as_mut_ptr(&mut self) -> *mut T { + self.assert_init_mut("as_mut_ptr").as_mut_ptr() + } + + /// Extracts the value from the `CheckedMaybeUninit` container. This is a great way + /// to ensure that the data will get dropped, because the resulting `T` is + /// subject to the usual drop handling. + /// + /// # Safety + /// + /// It is up to the caller to guarantee that the `CheckedMaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes immediate undefined + /// behavior. The [type-level documentation][inv] contains more information about + /// this initialization invariant. + /// + /// [inv]: #initialization-invariant + /// + /// On top of that, remember that most types have additional invariants beyond merely + /// being considered initialized at the type level. For example, a `1`-initialized [`Vec`] + /// is considered initialized (under the current implementation; this does not constitute + /// a stable guarantee) because the only requirement the compiler knows about it + /// is that the data pointer must be non-null. Creating such a `Vec` does not cause + /// *immediate* undefined behavior, but will cause undefined behavior with most + /// safe operations (including dropping it). + /// + /// [`Vec`]: ../../std/vec/struct.Vec.html + /// + /// # Examples + /// + /// Correct usage of this method: + /// + /// ```rust + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::::uninit(); + /// x.write(true); + /// let x_init = unsafe { x.assume_init() }; + /// assert_eq!(x_init, true); + /// ``` + /// + /// *Incorrect* usage of this method: + /// + /// ```rust,no_run + /// use std::mem::MaybeUninit; + /// + /// let x = MaybeUninit::>::uninit(); + /// let x_init = unsafe { x.assume_init() }; + /// // `x` had not been initialized yet, so this last line caused undefined behavior. ⚠️ + /// ``` + #[inline(always)] + #[track_caller] + pub unsafe fn assume_init(self) -> T { + self.assert_init_val("assume_init").assume_init() + } + + /// Reads the value from the `CheckedMaybeUninit` container. The resulting `T` is subject + /// to the usual drop handling. + /// + /// Whenever possible, it is preferable to use [`assume_init`] instead, which + /// prevents duplicating the content of the `CheckedMaybeUninit`. + /// + /// # Safety + /// + /// It is up to the caller to guarantee that the `CheckedMaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes undefined + /// behavior. The [type-level documentation][inv] contains more information about + /// this initialization invariant. + /// + /// Moreover, similar to the [`ptr::read`] function, this function creates a + /// bitwise copy of the contents, regardless whether the contained type + /// implements the [`Copy`] trait or not. When using multiple copies of the + /// data (by calling `assume_init_read` multiple times, or first calling + /// `assume_init_read` and then [`assume_init`]), it is your responsibility + /// to ensure that that data may indeed be duplicated. + /// + /// [inv]: #initialization-invariant + /// [`assume_init`]: MaybeUninit::assume_init + /// [`ptr::read`]: core::ptr::read + #[inline(always)] + #[track_caller] + pub unsafe fn assume_init_read(&self) -> T { + self.assert_init("assume_init_read").assume_init_read() + } + + /// Drops the contained value in place. + /// + /// If you have ownership of the `CheckedMaybeUninit`, you can also use + /// [`assume_init`] as an alternative. + /// + /// # Safety + /// + /// It is up to the caller to guarantee that the `CheckedMaybeUninit` really is + /// in an initialized state. Calling this when the content is not yet fully + /// initialized causes undefined behavior. + /// + /// On top of that, all additional invariants of the type `T` must be + /// satisfied, as the `Drop` implementation of `T` (or its members) may + /// rely on this. For example, setting a [`Vec`] to an invalid but + /// non-null address makes it initialized (under the current implementation; + /// this does not constitute a stable guarantee), because the only + /// requirement the compiler knows about it is that the data pointer must be + /// non-null. Dropping such a `Vec` however will cause undefined + /// behaviour. + /// + /// [`assume_init`]: MaybeUninit::assume_init + #[inline(always)] + #[track_caller] + pub unsafe fn assume_init_drop(&mut self) { + self.assert_init_mut("assume_init_drop").assume_init_drop() + } + + /// Gets a shared reference to the contained value. + /// + /// This can be useful when we want to access a `CheckedMaybeUninit` that has been + /// initialized but don't have ownership of the `CheckedMaybeUninit` (preventing the use + /// of `.assume_init()`). + /// + /// # Safety + /// + /// Calling this when the content is not yet fully initialized causes undefined + /// behavior: it is up to the caller to guarantee that the `CheckedMaybeUninit` really + /// is in an initialized state. + /// + /// # Examples + /// + /// ### Correct usage of this method: + /// + /// ```rust + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::>::uninit(); + /// // Initialize `x`: + /// x.write(vec![1, 2, 3]); + /// // Now that our `CheckedMaybeUninit<_>` is known to be initialized, it is okay to + /// // create a shared reference to it: + /// let x: &Vec = unsafe { + /// // SAFETY: `x` has been initialized. + /// x.assume_init_ref() + /// }; + /// assert_eq!(x, &vec![1, 2, 3]); + /// ``` + /// + /// ### *Incorrect* usages of this method: + /// + /// ```rust,no_run + /// use std::mem::MaybeUninit; + /// + /// let x = MaybeUninit::>::uninit(); + /// let x_vec: &Vec = unsafe { x.assume_init_ref() }; + /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️ + /// ``` + /// + /// ```rust,no_run + /// use std::{cell::Cell, mem::MaybeUninit}; + /// + /// let b = MaybeUninit::>::uninit(); + /// // Initialize the `CheckedMaybeUninit` using `Cell::set`: + /// unsafe { + /// b.assume_init_ref().set(true); + /// // ^^^^^^^^^^^^^^^ + /// // Reference to an uninitialized `Cell`: UB! + /// } + /// ``` + #[track_caller] + #[inline(always)] + pub unsafe fn assume_init_ref(&self) -> &T { + self.assert_init("assume_init_ref").assume_init_ref() + } + + /// Gets a mutable (unique) reference to the contained value. + /// + /// This can be useful when we want to access a `CheckedMaybeUninit` that has been + /// initialized but don't have ownership of the `CheckedMaybeUninit` (preventing the use + /// of `.assume_init()`). + /// + /// # Safety + /// + /// Calling this when the content is not yet fully initialized causes undefined + /// behavior: it is up to the caller to guarantee that the `CheckedMaybeUninit` really + /// is in an initialized state. For instance, `.assume_init_mut()` cannot be used to + /// initialize a `CheckedMaybeUninit`. + /// + /// # Examples + /// + /// ### Correct usage of this method: + /// + /// ```rust + /// # #![allow(unexpected_cfgs)] + /// use std::mem::MaybeUninit; + /// + /// # unsafe extern "C" fn initialize_buffer(buf: *mut [u8; 1024]) { *buf = [0; 1024] } + /// # #[cfg(FALSE)] + /// extern "C" { + /// /// Initializes *all* the bytes of the input buffer. + /// fn initialize_buffer(buf: *mut [u8; 1024]); + /// } + /// + /// let mut buf = MaybeUninit::<[u8; 1024]>::uninit(); + /// + /// // Initialize `buf`: + /// unsafe { initialize_buffer(buf.as_mut_ptr()); } + /// // Now we know that `buf` has been initialized, so we could `.assume_init()` it. + /// // However, using `.assume_init()` may trigger a `memcpy` of the 1024 bytes. + /// // To assert our buffer has been initialized without copying it, we upgrade + /// // the `&mut MaybeUninit<[u8; 1024]>` to a `&mut [u8; 1024]`: + /// let buf: &mut [u8; 1024] = unsafe { + /// // SAFETY: `buf` has been initialized. + /// buf.assume_init_mut() + /// }; + /// + /// // Now we can use `buf` as a normal slice: + /// buf.sort_unstable(); + /// debug_assert!( + /// buf.windows(2).all(|pair| pair[0] <= pair[1]), + /// "buffer is sorted", + /// ); + /// ``` + /// + /// ### *Incorrect* usages of this method: + /// + /// You cannot use `.assume_init_mut()` to initialize a value: + /// + /// ```rust,no_run + /// use std::mem::MaybeUninit; + /// + /// let mut b = MaybeUninit::::uninit(); + /// unsafe { + /// *b.assume_init_mut() = true; + /// // We have created a (mutable) reference to an uninitialized `bool`! + /// // This is undefined behavior. ⚠️ + /// } + /// ``` + /// + /// For instance, you cannot [`Read`] into an uninitialized buffer: + /// + /// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html + /// + /// ```rust,no_run + /// use std::{io, mem::MaybeUninit}; + /// + /// fn read_chunk (reader: &'_ mut dyn io::Read) -> io::Result<[u8; 64]> + /// { + /// let mut buffer = MaybeUninit::<[u8; 64]>::uninit(); + /// reader.read_exact(unsafe { buffer.assume_init_mut() })?; + /// // ^^^^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. + /// Ok(unsafe { buffer.assume_init() }) + /// } + /// ``` + /// + /// Nor can you use direct field access to do field-by-field gradual initialization: + /// + /// ```rust,no_run + /// use std::{mem::MaybeUninit, ptr}; + /// + /// struct Foo { + /// a: u32, + /// b: u8, + /// } + /// + /// let foo: Foo = unsafe { + /// let mut foo = MaybeUninit::::uninit(); + /// ptr::write(&mut foo.assume_init_mut().a as *mut u32, 1337); + /// // ^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. + /// ptr::write(&mut foo.assume_init_mut().b as *mut u8, 42); + /// // ^^^^^^^^^^^^^^^^^^^^^ + /// // (mutable) reference to uninitialized memory! + /// // This is undefined behavior. + /// foo.assume_init() + /// }; + /// ``` + + #[inline(always)] + #[track_caller] + pub unsafe fn assume_init_mut(&mut self) -> &mut T { + self.assert_init_mut("assume_init_mut").assume_init_mut() + } + + #[inline(always)] + fn init(&mut self) -> &mut MaybeUninit { + #[cfg(debug_assertions)] + { + self.initialized = true; + } + &mut self.value + } + + #[inline(always)] + #[track_caller] + fn assert_init(&self, _method: &'static str) -> &MaybeUninit { + #[cfg(debug_assertions)] + debug_assert!( + self.initialized, + "`MaybeUninit::{_method}` called on a `MaybeUninit` cell that was not initialized! this is a bug!", + ); + &self.value + } + + #[inline(always)] + #[track_caller] + fn assert_init_mut(&mut self, _method: &'static str) -> &mut MaybeUninit { + #[cfg(debug_assertions)] + debug_assert!( + self.initialized, + "`MaybeUninit::{_method}` called on a `MaybeUninit` cell that was not initialized! this is a bug!", + ); + &mut self.value + } + + #[inline(always)] + #[track_caller] + fn assert_init_val(self, _method: &'static str) -> MaybeUninit { + #[cfg(debug_assertions)] + debug_assert!( + self.initialized, + "`MaybeUninit::{_method}` called on a `MaybeUninit` cell that was not initialized! this is a bug!", + ); + self.value + } +} + +impl fmt::Debug for CheckedMaybeUninit { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut s = f.debug_struct("CheckedMaybeUninit"); + #[cfg(debug_assertions)] + if self.initialized { + s.field("value", unsafe { &self.assume_init_ref() }); + } else { + s.field("value", &format_args!("")); + } + + #[cfg(not(test))] + { + s.field("value", &format_args!("")); + } + + s.finish() + } +} diff --git a/maitake/src/util/wake_batch.rs b/maitake-sync/src/util/wake_batch.rs similarity index 98% rename from maitake/src/util/wake_batch.rs rename to maitake-sync/src/util/wake_batch.rs index 1ee3511e..8c282791 100644 --- a/maitake/src/util/wake_batch.rs +++ b/maitake-sync/src/util/wake_batch.rs @@ -1,5 +1,5 @@ +use super::CheckedMaybeUninit; use core::{ptr, task::Waker}; -use mycelium_util::mem::CheckedMaybeUninit; /// A utility for waking multiple tasks in a batch, without reallocating. /// diff --git a/maitake/src/sync/wait_cell.rs b/maitake-sync/src/wait_cell.rs similarity index 89% rename from maitake/src/sync/wait_cell.rs rename to maitake-sync/src/wait_cell.rs index 8df6f7ca..5b7f5cf1 100644 --- a/maitake/src/sync/wait_cell.rs +++ b/maitake-sync/src/wait_cell.rs @@ -1,13 +1,16 @@ //! An atomically registered [`Waker`], for waking a single task. //! //! See the documentation for the [`WaitCell`] type for details. -use super::Closed; -use crate::loom::{ - cell::UnsafeCell, - sync::atomic::{ - AtomicUsize, - Ordering::{self, *}, +use crate::{ + loom::{ + cell::UnsafeCell, + sync::atomic::{ + AtomicUsize, + Ordering::{self, *}, + }, }, + util::{fmt, CachePadded}, + Closed, }; use core::{ future::Future, @@ -15,7 +18,6 @@ use core::{ pin::Pin, task::{self, Context, Poll, Waker}, }; -use mycelium_util::{fmt, sync::CachePadded}; /// An atomically registered [`Waker`]. /// @@ -255,7 +257,7 @@ impl WaitCell { /// # Examples /// /// ``` - /// use maitake::sync::WaitCell; + /// use maitake_sync::WaitCell; /// /// // Perform an operation that results in a concurrent wakeup, such as /// // unmasking an interrupt. @@ -538,33 +540,27 @@ impl fmt::Debug for State { #[cfg(all(feature = "alloc", not(loom), test))] mod tests { use super::*; - use crate::scheduler::Scheduler; use alloc::sync::Arc; use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task}; #[test] fn wait_smoke() { - static COMPLETED: AtomicUsize = AtomicUsize::new(0); let _trace = crate::util::test::trace_init(); - let sched = Scheduler::new(); let wait = Arc::new(WaitCell::new()); - let wait2 = wait.clone(); - sched.spawn(async move { - wait2.wait().await.unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); + let mut task = task::spawn({ + let wait = wait.clone(); + async move { wait.wait().await } }); - let tick = sched.tick(); - assert_eq!(tick.completed, 0); - assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); + assert_pending!(task.poll()); assert!(wait.wake()); - let tick = sched.tick(); - assert_eq!(tick.completed, 1); - assert_eq!(COMPLETED.load(Ordering::Relaxed), 1); + + assert!(task.is_woken()); + assert_ready_ok!(task.poll()); } /// Reproduces https://github.com/hawkw/mycelium/issues/449 @@ -690,66 +686,6 @@ mod tests { } } -#[cfg(test)] -pub(crate) mod test_util { - use super::*; - - use crate::loom::sync::atomic::{AtomicUsize, Ordering::Relaxed}; - use std::sync::Arc; - - #[derive(Debug)] - pub(crate) struct Chan { - num: AtomicUsize, - task: WaitCell, - num_notify: usize, - } - - impl Chan { - pub(crate) fn new(num_notify: usize) -> Arc { - Arc::new(Self { - num: AtomicUsize::new(0), - task: WaitCell::new(), - num_notify, - }) - } - - pub(crate) async fn wait(self: Arc) { - let this = Arc::downgrade(&self); - drop(self); - futures_util::future::poll_fn(move |cx| { - let Some(this) = this.upgrade() else {return Poll::Ready(()) }; - - let res = this.task.wait(); - futures_util::pin_mut!(res); - - if this.num_notify == this.num.load(Relaxed) { - return Poll::Ready(()); - } - - res.poll(cx).map(drop) - }) - .await - } - - pub(crate) fn wake(&self) { - self.num.fetch_add(1, Relaxed); - self.task.wake(); - } - - #[allow(dead_code)] - pub(crate) fn close(&self) { - self.num.fetch_add(1, Relaxed); - self.task.close(); - } - } - - impl Drop for Chan { - fn drop(&mut self) { - debug!(chan = ?fmt::alt(&self), "drop"); - } - } -} - #[cfg(all(loom, test))] mod loom { use super::*; @@ -765,19 +701,19 @@ mod loom { let closer = wait.clone(); thread::spawn(move || { - info!("waking"); + tracing::info!("waking"); waker.wake(); - info!("woken"); + tracing::info!("woken"); }); thread::spawn(move || { - info!("closing"); + tracing::info!("closing"); closer.close(); - info!("closed"); + tracing::info!("closed"); }); - info!("waiting"); + tracing::info!("waiting"); let _ = future::block_on(wait.wait()); - info!("wait'd"); + tracing::info!("wait'd"); }); } @@ -791,15 +727,15 @@ mod loom { thread::spawn({ let waker = cell.clone(); move || { - info!("waking"); + tracing::info!("waking"); waker.wake(); - info!("woken"); + tracing::info!("woken"); } }); - info!("waiting"); + tracing::info!("waiting"); wait.await.expect("wait should be woken, not closed"); - info!("wait'd"); + tracing::info!("wait'd"); }); }); } diff --git a/maitake/src/sync/wait_map.rs b/maitake-sync/src/wait_map.rs similarity index 98% rename from maitake/src/sync/wait_map.rs rename to maitake-sync/src/wait_map.rs index 9213d847..e605c137 100644 --- a/maitake/src/sync/wait_map.rs +++ b/maitake-sync/src/wait_map.rs @@ -10,7 +10,7 @@ use crate::{ spin::{Mutex, MutexGuard}, }, }, - util::WakeBatch, + util::{fmt, CachePadded, WakeBatch}, }; use cordyceps::{ list::{self, List}, @@ -26,8 +26,6 @@ use core::{ task::{Context, Poll, Waker}, }; use mycelium_bitfield::{enum_from_bits, FromBits}; -use mycelium_util::fmt; -use mycelium_util::sync::CachePadded; use pin_project::{pin_project, pinned_drop}; #[cfg(test)] @@ -91,12 +89,10 @@ const fn notified(data: T) -> Poll> { /// /// Waking a single task at a time by calling [`wake`][wake]: /// -/// ``` +/// ```ignore /// use std::sync::Arc; -/// use maitake::{ -/// scheduler::Scheduler, -/// sync::wait_map::{WaitMap, WakeOutcome}, -/// }; +/// use maitake::scheduler; +/// use maitake_sync::wait_map::{WaitMap, WakeOutcome}; /// /// const TASKS: usize = 10; /// @@ -237,12 +233,10 @@ impl<'map, 'wait, K: PartialEq, V> Wait<'map, K, V> { /// /// # Example /// - /// ``` + /// ```ignore /// use std::sync::Arc; - /// use maitake::{ - /// scheduler::Scheduler, - /// sync::wait_map::{WaitMap, WakeOutcome}, - /// }; + /// use maitake::scheduler; + /// use maitake_sync::wait_map::{WaitMap, WakeOutcome}; /// use futures_util::pin_mut; /// /// let scheduler = Scheduler::new(); @@ -380,7 +374,7 @@ enum State { /// *Note*: This *must* correspond to all state bits being set, as it's set /// via a [`fetch_or`]. /// - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [`fetch_or`]: core::sync::atomic::AtomicUsize::fetch_or Closed = 0b11, } @@ -926,9 +920,7 @@ impl FromBits for State { bits if bits == Self::Closed as u8 => Self::Closed, _ => unsafe { // TODO(AJM): this isn't *totally* true anymore... - mycelium_util::unreachable_unchecked!( - "all potential 2-bit patterns should be covered!" - ) + unreachable_unchecked!("all potential 2-bit patterns should be covered!") }, }) } diff --git a/maitake/src/sync/wait_map/tests.rs b/maitake-sync/src/wait_map/tests.rs similarity index 100% rename from maitake/src/sync/wait_map/tests.rs rename to maitake-sync/src/wait_map/tests.rs diff --git a/maitake/src/sync/wait_map/tests/alloc_tests.rs b/maitake-sync/src/wait_map/tests/alloc_tests.rs similarity index 58% rename from maitake/src/sync/wait_map/tests/alloc_tests.rs rename to maitake-sync/src/wait_map/tests/alloc_tests.rs index 13d8dbe8..32bedd24 100644 --- a/maitake/src/sync/wait_map/tests/alloc_tests.rs +++ b/maitake-sync/src/wait_map/tests/alloc_tests.rs @@ -1,8 +1,8 @@ use super::super::*; use crate::loom::sync::Arc; -use crate::scheduler::Scheduler; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use futures::{future::poll_fn, pin_mut, select_biased, FutureExt}; +use tokio_test::{assert_pending, assert_ready, assert_ready_err, task}; #[test] fn enqueue() { @@ -10,15 +10,16 @@ fn enqueue() { static COMPLETED: AtomicUsize = AtomicUsize::new(0); static ENQUEUED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::new()); // Create a waiter, but do not tick the scheduler yet - let q2 = q.clone(); - scheduler.spawn(async move { - let val = q2.wait(0).await.unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); - assert_eq!(val, 100); + let mut waiter1 = task::spawn({ + let q = q.clone(); + async move { + let val = q.wait(0).await.unwrap(); + COMPLETED.fetch_add(1, Ordering::Relaxed); + assert_eq!(val, 100); + } }); // Attempt to wake - but waiter is not enqueued yet @@ -27,17 +28,19 @@ fn enqueue() { assert_eq!(ENQUEUED.load(Ordering::Relaxed), 0); // Create a second waiter - this one that first checks for enqueued - let q3 = q.clone(); - scheduler.spawn(async move { - let wait = q3.wait(1); + let mut waiter2 = task::spawn({ + let q = q.clone(); + async move { + let wait = q.wait(1); - pin_mut!(wait); - wait.as_mut().enqueue().await.unwrap(); - ENQUEUED.fetch_add(1, Ordering::Relaxed); + pin_mut!(wait); + wait.as_mut().enqueue().await.unwrap(); + ENQUEUED.fetch_add(1, Ordering::Relaxed); - let val = wait.await.unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); - assert_eq!(val, 101); + let val = wait.await.unwrap(); + COMPLETED.fetch_add(1, Ordering::Relaxed); + assert_eq!(val, 101); + } }); // Attempt to wake - but waiter is not enqueued yet @@ -47,20 +50,20 @@ fn enqueue() { assert_eq!(ENQUEUED.load(Ordering::Relaxed), 0); // Tick once, we can see the second task moved into the enqueued state - let tick = scheduler.tick(); - assert_eq!(tick.completed, 0); + assert_pending!(waiter1.poll()); + assert_pending!(waiter2.poll()); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(ENQUEUED.load(Ordering::Relaxed), 1); - assert!(!tick.has_remaining); assert!(matches!(q.wake(&0, 100), WakeOutcome::Woke)); assert!(matches!(q.wake(&1, 101), WakeOutcome::Woke)); + assert!(waiter1.is_woken()); + assert!(waiter2.is_woken()); - let tick = scheduler.tick(); - assert_eq!(tick.completed, 2); + assert_ready!(waiter1.poll()); + assert_ready!(waiter2.poll()); assert_eq!(COMPLETED.load(Ordering::Relaxed), 2); assert_eq!(ENQUEUED.load(Ordering::Relaxed), 1); - assert!(!tick.has_remaining); } #[test] @@ -68,53 +71,51 @@ fn duplicate() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); static ENQUEUED: AtomicUsize = AtomicUsize::new(0); - static ERRORED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::new()); // Create a waiter, but do not tick the scheduler yet - let q2 = q.clone(); - scheduler.spawn(async move { - let wait = q2.wait(0); + let mut waiter1 = task::spawn({ + let q = q.clone(); + async move { + let wait = q.wait(0); - pin_mut!(wait); - wait.as_mut().enqueue().await.unwrap(); - ENQUEUED.fetch_add(1, Ordering::Relaxed); + pin_mut!(wait); + wait.as_mut().enqueue().await.unwrap(); + ENQUEUED.fetch_add(1, Ordering::Relaxed); - let val = wait.await.unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); - assert_eq!(val, 100); + let val = wait.await.unwrap(); + COMPLETED.fetch_add(1, Ordering::Relaxed); + assert_eq!(val, 100); + } }); // Create a waiter, but do not tick the scheduler yet - let q3 = q.clone(); - scheduler.spawn(async move { - // Duplicate key! - let wait = q3.wait(0); - - pin_mut!(wait); - let result = wait.as_mut().enqueue().await; - assert!(matches!(result, Err(WaitError::Duplicate))); - ERRORED.fetch_add(1, Ordering::Relaxed); + let mut waiter2 = task::spawn({ + let q = q.clone(); + async move { + // Duplicate key! + let wait = q.wait(0); + + pin_mut!(wait); + wait.as_mut().enqueue().await + } }); // Tick once, we can see the second task moved into the enqueued state - let tick = scheduler.tick(); - assert_eq!(tick.completed, 1); + assert_pending!(waiter1.poll()); + let err = assert_ready_err!(waiter2.poll()); + assert!(matches!(err, WaitError::Duplicate)); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(ENQUEUED.load(Ordering::Relaxed), 1); - assert_eq!(ERRORED.load(Ordering::Relaxed), 1); - assert!(!tick.has_remaining); assert!(matches!(q.wake(&0, 100), WakeOutcome::Woke)); - let tick = scheduler.tick(); - assert_eq!(tick.completed, 1); + assert!(waiter1.is_woken()); + assert_ready!(waiter1.poll()); + assert_eq!(COMPLETED.load(Ordering::Relaxed), 1); assert_eq!(ENQUEUED.load(Ordering::Relaxed), 1); - assert_eq!(ERRORED.load(Ordering::Relaxed), 1); - assert!(!tick.has_remaining); } #[test] @@ -122,32 +123,33 @@ fn close() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q: Arc> = Arc::new(WaitMap::new()); const TASKS: usize = 10; - for i in 0..TASKS { - let wait = q.wait_owned(i); - scheduler.spawn(async move { - wait.await.expect_err("dropping the queue must close it"); - COMPLETED.fetch_add(1, Ordering::Relaxed); - }); - } + let mut tasks = (0..TASKS) + .map(|i| { + let wait = q.wait_owned(i); + task::spawn(async move { + wait.await.expect_err("dropping the queue must close it"); + COMPLETED.fetch_add(1, Ordering::Relaxed); + }) + }) + .collect::>(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert_pending!(task.poll()); + } - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); q.close(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::Relaxed), TASKS); - assert!(!tick.has_remaining); } #[test] @@ -155,43 +157,44 @@ fn wake_one() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::new()); const TASKS: usize = 10; - for i in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - let val = q.wait(i).await.unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); + let mut tasks = (0..TASKS) + .map(|i| { + let q = q.clone(); + task::spawn(async move { + let val = q.wait(i).await.unwrap(); + COMPLETED.fetch_add(1, Ordering::Relaxed); + + assert_eq!(val, 100 + i); + + if i < (TASKS - 1) { + assert!(matches!(q.wake(&(i + 1), 100 + i + 1), WakeOutcome::Woke)); + } else { + assert!(matches!( + q.wake(&(i + 1), 100 + i + 1), + WakeOutcome::NoMatch(_) + )); + } + }) + }) + .collect::>(); - assert_eq!(val, 100 + i); - - if i < (TASKS - 1) { - assert!(matches!(q.wake(&(i + 1), 100 + i + 1), WakeOutcome::Woke)); - } else { - assert!(matches!( - q.wake(&(i + 1), 100 + i + 1), - WakeOutcome::NoMatch(_) - )); - } - }); + for task in &mut tasks { + assert_pending!(task.poll()); } - - let tick = scheduler.tick(); - - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); q.wake(&0, 100); - let tick = scheduler.tick(); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::Relaxed), TASKS); - assert!(!tick.has_remaining); } #[derive(Debug)] @@ -230,41 +233,41 @@ fn drop_no_wake() { static KEY_DROPS: AtomicUsize = AtomicUsize::new(0); static VAL_DROPS: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::::new()); const TASKS: usize = 10; - for i in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - q.wait(CountDropKey { - idx: i, - cnt: &KEY_DROPS, + let mut tasks = (0..TASKS) + .map(|i| { + let q = q.clone(); + task::spawn(async move { + q.wait(CountDropKey { + idx: i, + cnt: &KEY_DROPS, + }) + .await + .unwrap_err(); + COMPLETED.fetch_add(1, Ordering::Relaxed); }) - .await - .unwrap_err(); - COMPLETED.fetch_add(1, Ordering::Relaxed); - }); - } + }) + .collect::>(); - let tick = scheduler.tick(); - - assert_eq!(tick.completed, 0); + for task in &mut tasks { + assert_pending!(task.poll()); + } assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), 0); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); q.close(); - let tick = scheduler.tick(); - - assert_eq!(tick.completed, TASKS); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } assert_eq!(COMPLETED.load(Ordering::Relaxed), TASKS); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), TASKS); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); } #[test] @@ -274,32 +277,32 @@ fn drop_wake_completed() { static KEY_DROPS: AtomicUsize = AtomicUsize::new(0); static VAL_DROPS: AtomicUsize = AtomicUsize::new(0); static DONT_CARE: AtomicUsize = AtomicUsize::new(0); - - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::::new()); const TASKS: usize = 10; - for i in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - q.wait(CountDropKey { - idx: i, - cnt: &KEY_DROPS, + let mut tasks = (0..TASKS) + .map(|i| { + let q = q.clone(); + task::spawn(async move { + q.wait(CountDropKey { + idx: i, + cnt: &KEY_DROPS, + }) + .await + .unwrap(); + COMPLETED.fetch_add(1, Ordering::Relaxed); }) - .await - .unwrap(); - COMPLETED.fetch_add(1, Ordering::Relaxed); - }); - } + }) + .collect::>(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert_pending!(task.poll()); + } - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), 0); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); for i in 0..TASKS { q.wake( @@ -315,13 +318,14 @@ fn drop_wake_completed() { assert_eq!(KEY_DROPS.load(Ordering::Relaxed), 0); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); - let tick = scheduler.tick(); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::Relaxed), TASKS); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), TASKS); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), TASKS); - assert!(!tick.has_remaining); } #[test] @@ -333,46 +337,47 @@ fn drop_wake_bailed() { static DONT_CARE: AtomicUsize = AtomicUsize::new(0); static BAIL: AtomicBool = AtomicBool::new(false); - let scheduler = Scheduler::new(); let q = Arc::new(WaitMap::::new()); const TASKS: usize = 10; - for i in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - let mut bail_fut = poll_fn(|_| match BAIL.load(Ordering::Relaxed) { - false => Poll::Pending, - true => Poll::Ready(()), - }) - .fuse(); - - let wait_fut = q - .wait(CountDropKey { - idx: i, - cnt: &KEY_DROPS, + let mut tasks = (0..TASKS) + .map(|i| { + let q = q.clone(); + task::spawn(async move { + let mut bail_fut = poll_fn(|_| match BAIL.load(Ordering::Relaxed) { + false => Poll::Pending, + true => Poll::Ready(()), }) .fuse(); - pin_mut!(wait_fut); - - // NOTE: `select_baised is used specifically to ensure the bail - // future is processed first. - select_biased! { - _a = bail_fut => {}, - _b = wait_fut => { - COMPLETED.fetch_add(1, Ordering::Relaxed); + + let wait_fut = q + .wait(CountDropKey { + idx: i, + cnt: &KEY_DROPS, + }) + .fuse(); + pin_mut!(wait_fut); + + // NOTE: `select_baised is used specifically to ensure the bail + // future is processed first. + select_biased! { + _a = bail_fut => {}, + _b = wait_fut => { + COMPLETED.fetch_add(1, Ordering::Relaxed); + } } - } - }); - } + }) + }) + .collect::>(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert_pending!(task.poll()); + } - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), 0); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); - assert!(!tick.has_remaining); for i in 0..TASKS { q.wake( @@ -389,11 +394,11 @@ fn drop_wake_bailed() { assert_eq!(VAL_DROPS.load(Ordering::Relaxed), 0); BAIL.store(true, Ordering::Relaxed); - let tick = scheduler.tick(); - - assert_eq!(tick.completed, TASKS); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } assert_eq!(COMPLETED.load(Ordering::Relaxed), 0); assert_eq!(KEY_DROPS.load(Ordering::Relaxed), TASKS); assert_eq!(VAL_DROPS.load(Ordering::Relaxed), TASKS); - assert!(!tick.has_remaining); } diff --git a/maitake/src/sync/wait_map/tests/loom.rs b/maitake-sync/src/wait_map/tests/loom.rs similarity index 100% rename from maitake/src/sync/wait_map/tests/loom.rs rename to maitake-sync/src/wait_map/tests/loom.rs diff --git a/maitake/src/sync/wait_queue.rs b/maitake-sync/src/wait_queue.rs similarity index 95% rename from maitake/src/sync/wait_queue.rs rename to maitake-sync/src/wait_queue.rs index cacf3135..0fdd3348 100644 --- a/maitake/src/sync/wait_queue.rs +++ b/maitake-sync/src/wait_queue.rs @@ -2,6 +2,8 @@ //! all at once). //! //! See the [`WaitQueue`] type's documentation for details. +#[cfg(any(test, maitake_ultraverbose))] +use crate::util::fmt; use crate::{ loom::{ cell::UnsafeCell, @@ -10,8 +12,8 @@ use crate::{ spin::{Mutex, MutexGuard}, }, }, - sync::{self, WaitResult}, - util::WakeBatch, + util::{CachePadded, WakeBatch}, + WaitResult, }; use cordyceps::{ list::{self, List}, @@ -26,9 +28,6 @@ use core::{ task::{Context, Poll, Waker}, }; use mycelium_bitfield::{bitfield, enum_from_bits, FromBits}; -#[cfg(any(test, maitake_ultraverbose))] -use mycelium_util::fmt; -use mycelium_util::sync::CachePadded; use pin_project::{pin_project, pinned_drop}; #[cfg(test)] @@ -53,9 +52,10 @@ mod tests; /// /// Waking a single task at a time by calling [`wake`][wake]: /// -/// ``` +/// ```ignore /// use std::sync::Arc; -/// use maitake::{scheduler::Scheduler, sync::WaitQueue}; +/// use maitake::scheduler::Scheduler; +/// use maitake_sync::WaitQueue; /// /// const TASKS: usize = 10; /// @@ -99,9 +99,10 @@ mod tests; /// /// Waking all tasks using [`wake_all`][wake_all]: /// -/// ``` +/// ```ignore /// use std::sync::Arc; -/// use maitake::{scheduler::Scheduler, sync::WaitQueue}; +/// use maitake::scheduler::Scheduler; +/// use maitake_sync::WaitQueue; /// /// const TASKS: usize = 10; /// @@ -171,7 +172,7 @@ mod tests; /// [`UnsafeCell`]: core::cell::UnsafeCell /// [ilist]: cordyceps::List /// [intrusive]: https://fuchsia.dev/fuchsia-src/development/languages/c-cpp/fbl_containers_guide/introduction -/// [mutex]: crate::sync::Mutex +/// [mutex]: crate::Mutex /// [2]: https://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue #[derive(Debug)] pub struct WaitQueue { @@ -322,7 +323,7 @@ enum State { /// *Note*: This *must* correspond to all state bits being set, as it's set /// via a [`fetch_or`]. /// - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed /// [`fetch_or`]: core::sync::atomic::AtomicUsize::fetch_or Closed = 0b11, } @@ -387,15 +388,15 @@ impl WaitQueue { /// # Examples /// /// ``` + /// # use tokio::task; + /// # #[tokio::main(flavor = "current_thread")] + /// # async fn test() { /// use std::sync::Arc; - /// use maitake::{scheduler::Scheduler, sync::WaitQueue}; - /// - /// // In order to spawn tasks, we need a `Scheduler` instance. - /// let scheduler = Scheduler::new(); + /// use maitake_sync::WaitQueue; /// /// let queue = Arc::new(WaitQueue::new()); /// - /// scheduler.spawn({ + /// let waiter = task::spawn({ /// // clone the queue to move into the spawned task /// let queue = queue.clone(); /// async move { @@ -404,15 +405,12 @@ impl WaitQueue { /// } /// }); /// - /// let waiter = scheduler.spawn(async move { - /// println!("waking task..."); - /// queue.wake(); - /// }); - /// - /// // run the scheduler so that the spawned tasks can run. - /// scheduler.tick(); + /// println!("waking task..."); + /// queue.wake(); /// - /// assert!(waiter.is_complete()); + /// waiter.await.unwrap(); + /// # } + /// # test(); /// ``` #[inline] pub fn wake(&self) { @@ -466,16 +464,16 @@ impl WaitQueue { /// # Examples /// /// ``` - /// use maitake::{scheduler::Scheduler, sync::WaitQueue}; + /// # use tokio::task; + /// # #[tokio::main(flavor = "current_thread")] + /// # async fn test() { + /// use maitake_sync::WaitQueue; /// use std::sync::Arc; /// - /// // In order to spawn tasks, we need a `Scheduler` instance. - /// let scheduler = Scheduler::new(); - /// /// let queue = Arc::new(WaitQueue::new()); /// /// // spawn multiple tasks to wait on the queue. - /// let task1 = scheduler.spawn({ + /// let task1 = task::spawn({ /// let queue = queue.clone(); /// async move { /// println!("task 1 waiting..."); @@ -484,7 +482,7 @@ impl WaitQueue { /// } /// }); /// - /// let task2 = scheduler.spawn({ + /// let task2 = task::spawn({ /// let queue = queue.clone(); /// async move { /// println!("task 2 waiting..."); @@ -493,21 +491,24 @@ impl WaitQueue { /// } /// }); /// - /// // tick the scheduler so that both tasks register + /// // yield to the scheduler so that both tasks register /// // themselves to wait on the queue. - /// scheduler.tick(); + /// task::yield_now().await; /// /// // neither task will have been woken. - /// assert!(!task1.is_complete()); - /// assert!(!task2.is_complete()); + /// assert!(!task1.is_finished()); + /// assert!(!task2.is_finished()); /// /// // wake all tasks waiting on the queue. /// queue.wake_all(); /// - /// // tick the scheduler again so that the tasks can execute. - /// scheduler.tick(); - /// assert!(task1.is_complete()); - /// assert!(task2.is_complete()); + /// // yield to the scheduler again so that the tasks can execute. + /// task::yield_now().await; + /// + /// assert!(task1.is_finished()); + /// assert!(task2.is_finished()); + /// # } + /// # test(); /// ``` /// /// [`wake()`]: Self::wake @@ -615,15 +616,15 @@ impl WaitQueue { /// # Examples /// /// ``` + /// # use tokio::task; + /// # #[tokio::main(flavor = "current_thread")] + /// # async fn test() { /// use std::sync::Arc; - /// use maitake::{scheduler::Scheduler, sync::WaitQueue}; - /// - /// // In order to spawn tasks, we need a `Scheduler` instance. - /// let scheduler = Scheduler::new(); + /// use maitake_sync::WaitQueue; /// /// let queue = Arc::new(WaitQueue::new()); /// - /// scheduler.spawn({ + /// let waiter = task::spawn({ /// // clone the queue to move into the spawned task /// let queue = queue.clone(); /// async move { @@ -632,20 +633,17 @@ impl WaitQueue { /// } /// }); /// - /// let waiter = scheduler.spawn(async move { - /// println!("waking task..."); - /// queue.wake(); - /// }); - /// - /// // run the scheduler so that the spawned tasks can run. - /// scheduler.tick(); + /// println!("waking task..."); + /// queue.wake(); /// - /// assert!(waiter.is_complete()); + /// waiter.await.unwrap(); + /// # } + /// # test(); /// ``` /// /// [`wake()`]: Self::wake /// [`wake_all()`]: Self::wake_all - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed pub fn wait(&self) -> Wait<'_> { Wait { queue: self, @@ -664,7 +662,7 @@ impl WaitQueue { } match state.get(QueueState::STATE) { - State::Closed => sync::closed(), + State::Closed => crate::closed(), _ if state.get(QueueState::WAKE_ALLS) > initial_wake_alls => Poll::Ready(Ok(())), State::Empty | State::Waiting => Poll::Pending, State::Woken => Poll::Ready(Ok(())), @@ -914,7 +912,7 @@ impl Waiter { Err(actual) => queue_state = actual, } } - State::Closed => return sync::closed(), + State::Closed => return crate::closed(), } } @@ -958,7 +956,7 @@ impl Waiter { } Wakeup::Closed => { this.state.set(WaitStateBits::STATE, WaitState::Woken); - sync::closed() + crate::closed() } Wakeup::Empty => { if let Some(waker) = waker { @@ -1048,7 +1046,7 @@ impl Wait<'_> { /// # Examples /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// /// let queue1 = WaitQueue::new(); /// let queue2 = WaitQueue::new(); @@ -1071,7 +1069,7 @@ impl Wait<'_> { /// Two [`Wait`] futures waiting on the same [`WaitQueue`] return `true`: /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// /// let queue = WaitQueue::new(); /// @@ -1084,7 +1082,7 @@ impl Wait<'_> { /// Two [`Wait`] futures waiting on different [`WaitQueue`]s return `false`: /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// /// let queue1 = WaitQueue::new(); /// let queue2 = WaitQueue::new(); @@ -1191,9 +1189,7 @@ impl FromBits for State { bits if bits == Self::Woken as u8 => Self::Woken, bits if bits == Self::Closed as u8 => Self::Closed, _ => unsafe { - mycelium_util::unreachable_unchecked!( - "all potential 2-bit patterns should be covered!" - ) + unreachable_unchecked!("all potential 2-bit patterns should be covered!") }, }) } @@ -1269,7 +1265,7 @@ feature! { /// [`wake_all()`]: Self::wake_all /// [`wait()`]: Self::wait /// [closed]: Self::close - /// [`Closed`]: crate::sync::Closed + /// [`Closed`]: crate::Closed pub fn wait_owned(self: &Arc) -> WaitOwned { let waiter = self.waiter(); let queue = self.clone(); @@ -1280,13 +1276,13 @@ feature! { // === impl WaitOwned === impl WaitOwned { - /// Returns `true` if this `WaitOwned` future is waiting for a + /// Returns `true` if this `WaitOwned` future is waiting for a /// notification from the provided [`WaitQueue`]. /// /// # Examples /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// use std::sync::Arc; /// /// let queue1 = Arc::new(WaitQueue::new()); @@ -1311,7 +1307,7 @@ feature! { /// `true`: /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// use std::sync::Arc; /// /// let queue = Arc::new(WaitQueue::new()); @@ -1326,7 +1322,7 @@ feature! { /// `false`: /// /// ``` - /// use maitake::sync::WaitQueue; + /// use maitake_sync::WaitQueue; /// use std::sync::Arc; /// /// let queue1 = Arc::new(WaitQueue::new()); diff --git a/maitake/src/sync/wait_queue/tests.rs b/maitake-sync/src/wait_queue/tests.rs similarity index 100% rename from maitake/src/sync/wait_queue/tests.rs rename to maitake-sync/src/wait_queue/tests.rs diff --git a/maitake/src/sync/wait_queue/tests/alloc_tests.rs b/maitake-sync/src/wait_queue/tests/alloc_tests.rs similarity index 58% rename from maitake/src/sync/wait_queue/tests/alloc_tests.rs rename to maitake-sync/src/wait_queue/tests/alloc_tests.rs index eda52ef0..802f575c 100644 --- a/maitake/src/sync/wait_queue/tests/alloc_tests.rs +++ b/maitake-sync/src/wait_queue/tests/alloc_tests.rs @@ -1,40 +1,41 @@ use super::*; use crate::loom::sync::Arc; -use crate::scheduler::Scheduler; use core::sync::atomic::{AtomicUsize, Ordering}; use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task}; +const TASKS: usize = 10; + #[test] fn wake_all() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitQueue::new()); - const TASKS: usize = 10; - - for _ in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - q.wait().await.unwrap(); - COMPLETED.fetch_add(1, Ordering::SeqCst); - }); + let mut tasks = (0..TASKS) + .map(|_| { + let q = q.clone(); + task::spawn(async move { + q.wait().await.unwrap(); + COMPLETED.fetch_add(1, Ordering::SeqCst); + }) + }) + .collect::>(); + + for task in &mut tasks { + assert_pending!(task.poll()); } - let tick = scheduler.tick(); - - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::SeqCst), 0); - assert!(!tick.has_remaining); q.wake_all(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::SeqCst), TASKS); - assert!(!tick.has_remaining); } #[test] @@ -42,32 +43,30 @@ fn close() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitQueue::new()); - const TASKS: usize = 10; - - for _ in 0..TASKS { - let wait = q.wait_owned(); - scheduler.spawn(async move { - wait.await.expect_err("dropping the queue must close it"); - COMPLETED.fetch_add(1, Ordering::SeqCst); - }); + let mut tasks = (0..TASKS) + .map(|_| { + let wait = q.wait_owned(); + task::spawn(async move { + wait.await.expect_err("dropping the queue must close it"); + COMPLETED.fetch_add(1, Ordering::SeqCst); + }) + }) + .collect::>(); + + for task in &mut tasks { + assert_pending!(task.poll()); } - let tick = scheduler.tick(); - - assert_eq!(tick.completed, 0); - assert_eq!(COMPLETED.load(Ordering::SeqCst), 0); - assert!(!tick.has_remaining); - q.close(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::SeqCst), TASKS); - assert!(!tick.has_remaining); } #[test] @@ -75,51 +74,51 @@ fn wake_one() { let _trace = crate::util::trace_init(); static COMPLETED: AtomicUsize = AtomicUsize::new(0); - let scheduler = Scheduler::new(); let q = Arc::new(WaitQueue::new()); - const TASKS: usize = 10; - - for _ in 0..TASKS { - let q = q.clone(); - scheduler.spawn(async move { - q.wait().await.unwrap(); - COMPLETED.fetch_add(1, Ordering::SeqCst); - q.wake(); - }); + let mut tasks = (0..TASKS) + .map(|_| { + let q = q.clone(); + task::spawn(async move { + q.wait().await.unwrap(); + COMPLETED.fetch_add(1, Ordering::SeqCst); + q.wake(); + }) + }) + .collect::>(); + + for task in &mut tasks { + assert_pending!(task.poll()); } - let tick = scheduler.tick(); - - assert_eq!(tick.completed, 0); assert_eq!(COMPLETED.load(Ordering::SeqCst), 0); - assert!(!tick.has_remaining); q.wake(); - let tick = scheduler.tick(); + for task in &mut tasks { + assert!(task.is_woken()); + assert_ready!(task.poll()); + } - assert_eq!(tick.completed, TASKS); assert_eq!(COMPLETED.load(Ordering::SeqCst), TASKS); - assert!(!tick.has_remaining); } #[test] fn wake_not_subscribed() { let _trace = crate::util::trace_init(); - let scheduler = Scheduler::new(); let q = Arc::new(WaitQueue::new()); - let task = scheduler.spawn({ + let mut task = task::spawn({ let q = q.clone(); async move { q.wait().await.unwrap() } }); + assert_pending!(task.poll()); + q.wake(); - let tick = scheduler.tick(); - assert_eq!(tick.completed, 1); - assert!(task.is_complete()); + assert!(task.is_woken()); + assert_ready!(task.poll()); } #[test] diff --git a/maitake/src/sync/wait_queue/tests/loom.rs b/maitake-sync/src/wait_queue/tests/loom.rs similarity index 100% rename from maitake/src/sync/wait_queue/tests/loom.rs rename to maitake-sync/src/wait_queue/tests/loom.rs diff --git a/maitake/Cargo.toml b/maitake/Cargo.toml index 06725ba6..097042c2 100644 --- a/maitake/Cargo.toml +++ b/maitake/Cargo.toml @@ -30,13 +30,14 @@ links = "maitake" [features] default = ["alloc"] -alloc = ["cordyceps/alloc"] -no-cache-pad = ["mycelium-util/no-cache-pad", "cordyceps/no-cache-pad"] -core-error = [] +alloc = ["cordyceps/alloc", "maitake-sync/alloc"] +no-cache-pad = ["mycelium-util/no-cache-pad", "cordyceps/no-cache-pad", "maitake-sync/no-cache-pad"] +core-error = ["maitake-sync/core-error"] [dependencies] mycelium-bitfield = { path = "../bitfield" } mycelium-util = { path = "../util" } +maitake-sync = { path = "../maitake-sync" } cordyceps = { path = "../cordyceps" } pin-project = "1" portable-atomic = "1.2" @@ -65,7 +66,7 @@ console-subscriber = "0.1.6" proptest = "1" [target.'cfg(loom)'.dev-dependencies] -loom = { version = "0.5.5", features = ["futures", "checkpoint"] } +loom = { version = "0.7", features = ["futures", "checkpoint"] } tracing_01 = { package = "tracing", version = "0.1", default_features = false } tracing_subscriber_03 = { package = "tracing-subscriber", version = "0.3.11", features = ["fmt"] } diff --git a/maitake/src/lib.rs b/maitake/src/lib.rs index 0a4a0f86..f5223eb0 100644 --- a/maitake/src/lib.rs +++ b/maitake/src/lib.rs @@ -16,6 +16,49 @@ pub(crate) mod loom; pub mod future; pub mod scheduler; -pub mod sync; pub mod task; pub mod time; + +pub mod sync { + //! Asynchronous synchronization primitives + //! + //! [_Synchronization primitives_][primitives] are tools for implementing + //! synchronization between [tasks]: to control which tasks can run at any given + //! time, and in what order, and to coordinate tasks' access to shared + //! resources. Typically, this synchronization involves some form of _waiting_. + //! In asynchronous systems, synchronization primitives allow tasks to wait by + //! yielding to the runtime scheduler, so that other tasks may run while they + //! are waiting. This module provides asynchronous implementations of common + //! synchronization primitives. + //! + //! The following synchronization primitives are provided: + //! + //! - [`Mutex`]: a fairly queued, asynchronous [mutual exclusion lock], for + //! protecting shared data + //! - [`RwLock`]: a fairly queued, asynchronous [readers-writer lock], which + //! allows concurrent read access to shared data while ensuring write + //! access is exclusive + //! - [`Semaphore`]: an asynchronous [counting semaphore], for limiting the + //! number of tasks which may run concurrently + //! - [`WaitCell`], a cell that stores a *single* waiting task's [`Waker`], so + //! that the task can be woken by another task, + //! - [`WaitQueue`], a queue of waiting tasks, which are woken in first-in, + //! first-out order + //! - [`WaitMap`], a set of waiting tasks associated with keys, in which a task + //! can be woken by its key + //! + //! **Note**: `maitake`'s synchronization primitives *do not* require the + //! `maitake` runtime, and can be used with any async executor. Therefore, + //! they are provided by a separate [`maitake-sync`] crate, which can be + //! used without depending on the rest of the `maitake` runtime. This module + //! re-exports these APIs from [`maitake-sync`]. + //! + //! [primitives]: https://wiki.osdev.org/Synchronization_Primitives + //! [tasks]: crate::task + //! [mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion + //! [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock + //! [counting semaphore]: https://en.wikipedia.org/wiki/Semaphore_(programming) + //! [`Waker`]: core::task::Waker + //! [`maitake-sync`]: https://crates.io/crates/maitake-sync + pub use maitake_sync::*; +} diff --git a/maitake/src/scheduler/tests.rs b/maitake/src/scheduler/tests.rs index 08bee715..028163e7 100644 --- a/maitake/src/scheduler/tests.rs +++ b/maitake/src/scheduler/tests.rs @@ -1,5 +1,11 @@ use super::*; -use crate::{future, sync::wait_cell::test_util::Chan}; +use crate::{ + future, + loom::sync::atomic::{AtomicUsize, Ordering::Relaxed}, + sync::WaitCell, +}; +use core::task::Poll; +use std::sync::Arc; #[cfg(all(feature = "alloc", not(loom)))] mod alloc_tests; @@ -9,3 +15,55 @@ mod custom_storage; #[cfg(any(loom, feature = "alloc"))] mod loom; + +#[derive(Debug)] +pub(crate) struct Chan { + num: AtomicUsize, + task: WaitCell, + num_notify: usize, +} + +impl Chan { + pub(crate) fn new(num_notify: usize) -> Arc { + Arc::new(Self { + num: AtomicUsize::new(0), + task: WaitCell::new(), + num_notify, + }) + } + + pub(crate) async fn wait(self: Arc) { + let this = Arc::downgrade(&self); + drop(self); + futures_util::future::poll_fn(move |cx| { + let Some(this) = this.upgrade() else {return Poll::Ready(()) }; + + let res = this.task.wait(); + futures_util::pin_mut!(res); + + if this.num_notify == this.num.load(Relaxed) { + return Poll::Ready(()); + } + + res.poll(cx).map(drop) + }) + .await + } + + pub(crate) fn wake(&self) { + self.num.fetch_add(1, Relaxed); + self.task.wake(); + } + + #[allow(dead_code)] + pub(crate) fn close(&self) { + self.num.fetch_add(1, Relaxed); + self.task.close(); + } +} + +impl Drop for Chan { + fn drop(&mut self) { + debug!(chan = ?fmt::alt(&self), "drop"); + } +} diff --git a/maitake/src/sync.rs b/maitake/src/sync.rs deleted file mode 100644 index 8081370f..00000000 --- a/maitake/src/sync.rs +++ /dev/null @@ -1,93 +0,0 @@ -//! Asynchronous synchronization primitives -//! -//! [_Synchronization primitives_][primitives] are tools for implementing -//! synchronization between [tasks]: to control which tasks can run at any given -//! time, and in what order, and to coordinate tasks' access to shared -//! resources. Typically, this synchronization involves some form of _waiting_. -//! In asynchronous systems, synchronization primitives allow tasks to wait by -//! yielding to the runtime scheduler, so that other tasks may run while they -//! are waiting. This module provides asynchronous implementations of common -//! synchronization primitives. -//! -//! The following synchronization primitives are provided: -//! -//! - [`Mutex`]: a fairly queued, asynchronous [mutual exclusion lock], for -//! protecting shared data -//! - [`RwLock`]: a fairly queued, asynchronous [readers-writer lock], which -//! allows concurrent read access to shared data while ensuring write -//! access is exclusive -//! - [`Semaphore`]: an asynchronous [counting semaphore], for limiting the -//! number of tasks which may run concurrently -//! - [`WaitCell`], a cell that stores a *single* waiting task's [`Waker`], so -//! that the task can be woken by another task, -//! - [`WaitQueue`], a queue of waiting tasks, which are woken in first-in, -//! first-out order -//! - [`WaitMap`], a set of waiting tasks associated with keys, in which a task -//! can be woken by its key -//! -//! [primitives]: https://wiki.osdev.org/Synchronization_Primitives -//! [tasks]: crate::task -//! [mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion -//! [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock -//! [counting semaphore]: https://en.wikipedia.org/wiki/Semaphore_(programming) -//! [`Waker`]: core::task::Waker -#![warn(missing_docs, missing_debug_implementations)] -pub mod mutex; -pub mod rwlock; -pub mod semaphore; -pub mod wait_cell; -pub mod wait_map; -pub mod wait_queue; - -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use self::mutex::OwnedMutexGuard; -#[doc(inline)] -pub use self::mutex::{Mutex, MutexGuard}; -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use self::rwlock::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard}; -#[doc(inline)] -pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -#[doc(inline)] -pub use self::semaphore::Semaphore; -#[doc(inline)] -pub use self::wait_cell::WaitCell; -#[doc(inline)] -pub use self::wait_map::WaitMap; -#[doc(inline)] -pub use self::wait_queue::WaitQueue; - -use core::task::Poll; - -/// An error indicating that a [`WaitCell`], [`WaitQueue`] or [`Semaphore`] was -/// closed while attempting to register a waiting task. -/// -/// This error is returned by the [`WaitCell::wait`], [`WaitQueue::wait`] and -/// [`Semaphore::acquire`] methods. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct Closed(()); - -/// The result of waiting on a [`WaitQueue`] or [`Semaphore`]. -pub type WaitResult = Result; - -pub(in crate::sync) const fn closed() -> Poll> { - Poll::Ready(Err(Closed::new())) -} - -impl Closed { - pub(in crate::sync) const fn new() -> Self { - Self(()) - } -} - -impl core::fmt::Display for Closed { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.pad("closed") - } -} - -feature! { - #![feature = "core-error"] - impl core::error::Error for Closed {} -} diff --git a/maitake/src/task/state.rs b/maitake/src/task/state.rs index f8e2bd6e..dce2df4e 100644 --- a/maitake/src/task/state.rs +++ b/maitake/src/task/state.rs @@ -1,10 +1,14 @@ use super::PollResult; -use crate::loom::sync::atomic::{ - self, AtomicUsize, - Ordering::{self, *}, +use crate::{ + loom::sync::atomic::{ + self, AtomicUsize, + Ordering::{self, *}, + }, + sync::util::Backoff, }; + use core::fmt; -use mycelium_util::{sync::spin::Backoff, unreachable_unchecked}; +use mycelium_util::unreachable_unchecked; mycelium_bitfield::bitfield! { /// A snapshot of a task's current state. diff --git a/maitake/src/task/tests/alloc_tests.rs b/maitake/src/task/tests/alloc_tests.rs index 2a54f683..caa2e8ef 100644 --- a/maitake/src/task/tests/alloc_tests.rs +++ b/maitake/src/task/tests/alloc_tests.rs @@ -81,10 +81,14 @@ fn empty_task_size() { " state: {}B", size_of_val(&task.schedulable.header.state) ); - println!( - " vtable: {}B", - size_of_val(&task.schedulable.header.vtable) - ); + + // clippy warns us that the vtable field on `schedulable.header` is a + // reference, and that we probably want to dereference it to get the actual + // size of the vtable. but...we *don't*. we want the size of the reference, + // since we care about the actual header size here. + #[allow(clippy::size_of_ref)] + let vtable_ptr = size_of_val(&task.schedulable.header.vtable); + println!(" vtable (ptr): {vtable_ptr}B",); println!( " id: {}B", size_of_val(&task.schedulable.header.id) diff --git a/maitake/src/trace.rs b/maitake/src/trace.rs index f2a32271..a1a163a3 100644 --- a/maitake/src/trace.rs +++ b/maitake/src/trace.rs @@ -100,18 +100,6 @@ macro_rules! test_dbg { }; } -#[cfg(all(not(test), not(maitake_ultraverbose)))] -macro_rules! enter_test_debug_span { - ($($args:tt)+) => {}; -} - -#[cfg(any(test, maitake_ultraverbose))] -macro_rules! enter_test_debug_span { - ($($args:tt)+) => { - let _span = debug_span!($($args)+).entered(); - }; -} - #[cfg(all(not(test), not(maitake_ultraverbose)))] macro_rules! test_debug { ($($args:tt)+) => {}; diff --git a/maitake/src/util.rs b/maitake/src/util.rs index 025c95e7..bf193a05 100644 --- a/maitake/src/util.rs +++ b/maitake/src/util.rs @@ -1,23 +1,5 @@ use core::ptr::NonNull; -mod wake_batch; -pub(crate) use self::wake_batch::WakeBatch; - -macro_rules! fmt_bits { - ($self: expr, $f: expr, $has_states: ident, $($name: ident),+) => { - $( - if $self.contains(Self::$name) { - if $has_states { - $f.write_str(" | ")?; - } - $f.write_str(stringify!($name))?; - $has_states = true; - } - )+ - - }; -} - macro_rules! feature { ( #![$meta:meta] @@ -172,11 +154,4 @@ pub(crate) mod test { _x1: collector.set_default(), } } - - #[allow(dead_code)] - pub(crate) fn assert_send() {} - - #[allow(dead_code)] - pub(crate) fn assert_sync() {} - pub(crate) fn assert_send_sync() {} } diff --git a/util/Cargo.toml b/util/Cargo.toml index 3f3c9313..8d7c2769 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -10,14 +10,14 @@ readme = "README.md" # https://doc.rust-lang.org/cargo/reference/manifest.html [features] default = [] -alloc = ["cordyceps/alloc"] -no-cache-pad = ["cordyceps/no-cache-pad"] +alloc = ["cordyceps/alloc", "maitake-sync/alloc"] +no-cache-pad = ["cordyceps/no-cache-pad", "maitake-sync/no-cache-pad"] [dependencies] tracing = { git = "https://github.com/tokio-rs/tracing", default_features = false, features = ["attributes"] } cordyceps = { path = "../cordyceps", default-features = false } mycelium-bitfield = { path = "../bitfield", default-features = false } - +maitake-sync = { path = "../maitake-sync", default-features = false } [dev-dependencies] proptest = "1" @@ -25,7 +25,7 @@ tracing-subscriber = { git = "https://github.com/tokio-rs/tracing", features = [ tracing = { git = "https://github.com/tokio-rs/tracing", default_features = false, features = ["attributes", "std"] } [target.'cfg(loom)'.dependencies] -loom = "0.5.5" +loom = "0.7" [target.'cfg(loom)'.dev-dependencies] tracing_01 = { package = "tracing", version = "0.1.36" } diff --git a/util/src/macros.rs b/util/src/macros.rs index 0dfcdcc2..a416d91c 100644 --- a/util/src/macros.rs +++ b/util/src/macros.rs @@ -1,18 +1,3 @@ -macro_rules! loom_const_fn { - ( - $(#[$meta:meta])* - $vis:vis fn $name:ident($($arg:ident: $T:ty),*) -> $Ret:ty $body:block - ) => { - $(#[$meta])* - #[cfg(not(loom))] - $vis const fn $name($($arg: $T),*) -> $Ret $body - - $(#[$meta])* - #[cfg(loom)] - $vis fn $name($($arg: $T),*) -> $Ret $body - } -} - /// Indicates unreachable code that we are confident is *truly* unreachable. /// /// This is essentially a compromise between `core::unreachable!()` and @@ -67,43 +52,6 @@ macro_rules! unreachable_unchecked { }); } -#[cfg(test)] -macro_rules! test_dbg { - ($x:expr) => { - match $x { - x => { - test_trace!( - location = %core::panic::Location::caller(), - "{} = {x:?}", - stringify!($x) - ); - x - } - } - }; -} - -#[cfg(not(test))] -macro_rules! test_dbg { - ($x:expr) => { - $x - }; -} - -#[cfg(all(test, not(loom)))] -macro_rules! test_trace { - ($($arg:tt)+) => { - tracing::trace!($($arg)+); - }; -} - -#[cfg(all(test, loom))] -macro_rules! test_trace { - ($($arg:tt)+) => { - tracing_01::trace!($($arg)+); - }; -} - #[cfg(all(test, not(loom)))] macro_rules! test_info { ($($arg:tt)+) => { diff --git a/util/src/sync.rs b/util/src/sync.rs index c3e4b3cb..08c17c68 100644 --- a/util/src/sync.rs +++ b/util/src/sync.rs @@ -7,14 +7,17 @@ pub use loom::sync::atomic; pub use core::sync::atomic; pub mod cell; -pub mod once; -pub mod spin; -#[doc(inline)] -pub use self::once::{InitOnce, Lazy}; -mod cache_pad; -pub use self::cache_pad::CachePadded; +pub use self::once::{InitOnce, Lazy}; +pub use maitake_sync::{spin::once, util::CachePadded}; +/// Spinlocks and related synchronization primitives. +pub mod spin { + pub use maitake_sync::{ + spin::{Mutex, MutexGuard}, + util::Backoff, + }; +} /// A wrapper for the [`core::hint`] module that emits either [`loom`] spin loop /// hints (when `cfg(loom)` is enabled), or real spin loop hints when loom is /// not enabled.