diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1bbcde4..94383b8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,8 +64,12 @@ jobs: run: rustup toolchain install stable - name: Run raw example run: cargo run --example raw + - name: Run barging example + run: cargo run --example barging - name: Run thread_local example run: cargo run --example thread_local --features thread_local + - name: Run lock_api example + run: cargo run --example lock_api --features lock_api linter: name: Linter @@ -81,10 +85,12 @@ jobs: run: cargo clippy --features yield - name: Lint thread_local run: cargo clippy --features thread_local + - name: Lint lock_api + run: cargo clippy --features lock_api - name: Lint loom env: RUSTFLAGS: ${{ env.LOOM_RUSTFLAGS }} - run: cargo clippy --profile test --features thread_local + run: cargo clippy --profile test --all-features miri: name: Miri @@ -98,7 +104,7 @@ jobs: - name: Set Rust nightly as default run: rustup default nightly - name: Miri test - run: cargo miri test --features thread_local + run: cargo miri test --all-features loom: name: Loom @@ -111,4 +117,4 @@ jobs: - name: Loom test env: RUSTFLAGS: ${{ env.LOOM_RUSTFLAGS }} - run: cargo test --lib --release --features thread_local + run: cargo test --lib --release --all-features diff --git a/Cargo.toml b/Cargo.toml index 560f92d..7d2019b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,22 +6,31 @@ spin-lock for mutual exclusion, referred to as MCS lock. name = "mcslock" version = "0.1.0" edition = "2021" -rust-version = "1.60.0" +# NOTE: Rust 1.65 is required for GATs and let-else statements. +rust-version = "1.65.0" license = "MIT OR Apache-2.0" readme = "README.md" # documentation = "https://docs.rs/mcslock" # homepage = "https://crates.io/mcslock" repository = "https://github.com/pedromfedricci/mcslock" authors = ["Pedro de Matos Fedricci "] -categories = ["no-std", "no-std::no-alloc", "concurrency"] +categories = ["no-std", "concurrency"] keywords = ["no_std", "mutex", "spin-lock", "mcs-lock"] [workspace] members = [".", "benches"] [features] +# NOTE: Features `yield` and `thread_local` require std. yield = [] thread_local = [] +# NOTE: The `dep:` syntax requires Rust 1.60. +lock_api = ["dep:lock_api"] + +[dependencies.lock_api] +version = "0.4" +default-features = false +optional = true [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.7" } @@ -30,6 +39,14 @@ loom = { version = "0.7" } all-features = true rustdoc-args = ["--cfg", "docsrs"] +[[test]] +name = "lock_api" +required-features = ["lock_api", "yield"] + [[example]] name = "thread_local" required-features = ["thread_local"] + +[[example]] +name = "lock_api" +required-features = ["lock_api"] diff --git a/Makefile.toml b/Makefile.toml index 89d7bbb..541870e 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -1,3 +1,6 @@ +[env] +CFG_LOOM = "--cfg loom" + # Don't run these tasks for all crates in the workspace. [config] default_to_workspace = false @@ -25,19 +28,19 @@ args = ["clippy", "--all-features", "--", "-D", "clippy::pedantic", "-D", "clipp toolchain = "nightly" install_crate = { rustup_component_name = "miri" } command = "cargo" -args = ["miri", "test", "--features", "thread_local"] +args = ["miri", "test", "--all-features"] # Run Loom tests. [tasks.loom-test] command = "cargo" -env = { "RUSTFLAGS" = "--cfg loom" } -args = ["test", "--lib", "--release", "--features", "thread_local"] +env = { "RUSTFLAGS" = "${CFG_LOOM}" } +args = ["test", "--lib", "--release", "--all-features"] # Lint Loom cfg. [tasks.loom-lint] command = "cargo" -env = { "RUSTFLAGS" = "--cfg loom" } -args = ["clippy", "--profile", "test", "--features", "thread_local"] +env = { "RUSTFLAGS" = "${CFG_LOOM}" } +args = ["clippy", "--profile", "test", "--all-features", "--", "-D", "clippy::pedantic", "-D", "clippy::nursery"] # Run busy loop bench. [tasks.bench-busy] diff --git a/README.md b/README.md index b4bf9e4..c6c2fac 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,22 @@ mechanism are: This algorithm and serveral others were introduced by [Mellor-Crummey and Scott] paper. And a simpler correctness proof of the MCS lock was proposed by [Johnson and Harathi]. +## Use cases + +It is noteworthy to mention that [spinlocks are usually not what you want]. The +majority of use cases are well covered by OS-based mutexes like +[`std::sync::Mutex`] or [`parking_lot::Mutex`]. These implementations will notify +the system that the waiting thread should be parked, freeing the processor to +work on something else. + +Spinlocks are only efficient in very few circunstances where the overhead +of context switching or process rescheduling are greater than busy waiting +for very short periods. Spinlocks can be useful inside operating-system kernels, +on embedded systems or even complement other locking designs. As a reference +use case, some [Linux kernel mutexes] run an customized MCS lock specifically +tailored for optimistic spinning during contention before actually sleeping. +This implementation is `no_std` by default, so it's useful in those environments. + ## Install Include the following under the `[dependencies]` section in your `Cargo.toml` file. @@ -21,16 +37,53 @@ Include the following under the `[dependencies]` section in your `Cargo.toml` fi # Cargo.toml [dependencies] -# Avaliable features: `yield`, `thread_local`. +# Avaliable features: `yield`, `thread_local` and `lock_api`. mcslock = { version = "0.1", git = "https://github.com/pedromfedricci/mcslock" } ``` -## Raw locking APIs +## Documentation + +Currently this project documentation is not hosted anywhere, you can render +the documentation by cloning this repository and then run: + +```bash +cargo doc --all-features --open +``` + +## Barging MCS lock + +This implementation will have non-waiting threads race for the lock against +the front of the waiting queue thread, which means this it is an unfair lock. +This implementation is suitable for `no_std` environments, and the locking +APIs are compatible with the [lock_api] crate. See `barging` and `lock_api` +modules for more information. + +```rust +use std::sync::Arc; +use std::thread; + +use mcslock::barging::spins::Mutex; + +fn main() { + let mutex = Arc::new(Mutex::new(0)); + let c_mutex = Arc::clone(&mutex); + + thread::spawn(move || { + *c_mutex.lock() = 10; + }) + .join().expect("thread::spawn failed"); + + assert_eq!(*mutex.try_lock().unwrap(), 10); +} +``` + +## Raw MCS lock -Raw locking APIs require exclusive access to a local queue node. This node is -represented by the `MutexNode` type. The `raw` module provides an implementation -that is `no_std` compatible, but also requires that queue nodes must be -instantiated by the callers. +This implementation operates under FIFO. Raw locking APIs require exclusive +access to a locally accessible queue node. This node is represented by the +`MutexNode` type. Callers are responsible for instantiating the queue nodes +themselves. This implementation is `no_std` compatible. See `raw` module for +more information. ```rust use std::sync::Arc; @@ -55,12 +108,14 @@ fn main() { } ``` -## Thread local locking APIs +## Thread local MCS lock -This crate also provides locking APIs that do not require user-side node -instantiation, by enabling the `thread_local` feature. These APIs require -that critical sections must be provided as closures, and are not compatible -with `no_std` environments as they require thread local storage. +This implementation also operates under FIFO. The locking APIs provided +by this module do not require user-side node allocation, critical +sections must be provided as closures and at most one lock can be held at +any time within a thread. It is not `no_std` compatible and can be enabled +through the `thread_local` feature. See `thread_local` module for more +information. ```rust use std::sync::Arc; @@ -74,57 +129,22 @@ fn main() { let c_mutex = Arc::clone(&mutex); thread::spawn(move || { - // Node instantiation is not required. // Critical section must be defined as closure. c_mutex.lock_with(|mut guard| *guard = 10); }) .join().expect("thread::spawn failed"); - // Node instantiation is not required. // Critical section must be defined as closure. assert_eq!(mutex.try_lock_with(|guard| *guard.unwrap()), 10); } ``` -## Documentation - -Currently this project documentation is not hosted anywhere, you can render -the documentation by cloning this repository and then run: - -```bash -cargo doc --all-features --open -``` - -## Use cases - -[Spinlocks are usually not what you want]. The majority of use cases are well -covered by OS-based mutexes like [`std::sync::Mutex`] or [`parking_lot::Mutex`]. -These implementations will notify the system that the waiting thread should -be parked, freeing the processor to work on something else. - -Spinlocks are only efficient in very few circunstances where the overhead -of context switching or process rescheduling are greater than busy waiting -for very short periods. Spinlocks can be useful inside operating-system kernels, -on embedded systems or even complement other locking designs. As a reference -use case, some [Linux kernel mutexes] run an customized MCS lock specifically -tailored for optimistic spinning during contention before actually sleeping. -This implementation is `no_std` by default, so it's useful in those environments. - -## API for `no_std` environments - -The `raw` locking interface of a MCS lock is not quite the same as other -mutexes. To acquire a raw MCS lock, a queue node must be exclusively borrowed for -the lifetime of the guard returned by `lock` or `try_lock`. This node is exposed -as the `MutexNode` type. See their documentation for more information. If you -are looking for spin-based primitives that implement the [lock_api] interface -and also compatible with `no_std`, consider using [spin-rs]. - ## Features This crate dos not provide any default features. Features that can be enabled are: -### `yield` +### yield The `yield` feature requires linking to the standard library, so it is not suitable for `no_std` environments. By enabling the `yield` feature, instead @@ -135,22 +155,28 @@ this feature if your intention is to to actually do optimistic spinning. The default implementation calls [`core::hint::spin_loop`], which does in fact just simply busy-waits. -### `thread_local` +### thread_local The `thread_local` feature provides locking APIs that do not require user-side -node instantiation, but critical sections must be provided as closures. This +node allocation, but critical sections must be provided as closures. This implementation handles the queue's nodes transparently, by storing them in the thread local storage of the waiting threads. These locking implementations will panic if recursively acquired. Not `no_std` compatible. +### lock_api + +This feature implements the [`RawMutex`] trait from the [lock_api] crate for +`barging::Mutex`. Aliases are provided by the `lock_api` module. This feature +is `no_std` compatible. + ## Related projects These projects provide MCS lock implementations with slightly different APIs, implementation details or compiler requirements, you can check their repositories: -- `mcs-rs`: -- `libmcs`: +- mcs-rs: +- libmcs: ## License @@ -180,7 +206,7 @@ each of your dependencies, including this one. [spin-rs]: https://docs.rs/spin/latest/spin [lock_api]: https://docs.rs/lock_api/latest/lock_api [Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html -[Spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +[spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html [Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf [Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf [cargo-crev]: https://github.com/crev-dev/cargo-crev diff --git a/examples/barging.rs b/examples/barging.rs new file mode 100644 index 0000000..b42b5ea --- /dev/null +++ b/examples/barging.rs @@ -0,0 +1,40 @@ +use std::sync::mpsc::channel; +use std::sync::Arc; +use std::thread; + +use mcslock::barging::spins::Mutex; + +fn main() { + const N: usize = 10; + + // Spawn a few threads to increment a shared variable (non-atomically), and + // let the main thread know once all increments are done. + // + // Here we're using an Arc to share memory among threads, and the data inside + // the Arc is protected with a mutex. + let data = Arc::new(Mutex::new(0)); + + let (tx, rx) = channel(); + for _ in 0..N { + let (data, tx) = (data.clone(), tx.clone()); + thread::spawn(move || { + // The shared state can only be accessed once the lock is held. + // Our non-atomic increment is safe because we're the only thread + // which can access the shared state when the lock is held. + // + // We unwrap() the return value to assert that we are not expecting + // threads to ever fail while holding the lock. + let mut data = data.lock(); + *data += 1; + if *data == N { + tx.send(()).unwrap(); + } + // the lock is unlocked here when `data` goes out of scope. + }); + } + let _message = rx.recv(); + + let count = data.lock(); + assert_eq!(*count, N); + // lock is unlock here when `count` goes out of scope. +} diff --git a/examples/lock_api.rs b/examples/lock_api.rs new file mode 100644 index 0000000..4d0aaf3 --- /dev/null +++ b/examples/lock_api.rs @@ -0,0 +1,48 @@ +use std::sync::mpsc::channel; +use std::sync::Arc; +use std::thread; + +// Requires that the `lock_api` features is enabled. +// +// You may export these types to your callers, change the inner mutex type +// (as long as it implements the same raw mutex interfaces), without breaking +// their code. +// +// Maybe spin::lock_api::{Mutex, MutexGuard} is better for your use case? Switch it! +pub type Mutex = mcslock::lock_api::spins::Mutex; +pub type MutexGuard<'a, T> = mcslock::lock_api::spins::MutexGuard<'a, T>; + +fn main() { + const N: usize = 10; + + // Spawn a few threads to increment a shared variable (non-atomically), and + // let the main thread know once all increments are done. + // + // Here we're using an Arc to share memory among threads, and the data inside + // the Arc is protected with a mutex. + let data = Arc::new(Mutex::new(0)); + + let (tx, rx) = channel(); + for _ in 0..N { + let (data, tx) = (data.clone(), tx.clone()); + thread::spawn(move || { + // The shared state can only be accessed once the lock is held. + // Our non-atomic increment is safe because we're the only thread + // which can access the shared state when the lock is held. + // + // We unwrap() the return value to assert that we are not expecting + // threads to ever fail while holding the lock. + let mut data = data.lock(); + *data += 1; + if *data == N { + tx.send(()).unwrap(); + } + // the lock is unlocked here when `data` goes out of scope. + }); + } + let _message = rx.recv(); + + let count = data.lock(); + assert_eq!(*count, N); + // lock is unlock here when `count` goes out of scope. +} diff --git a/examples/thread_local.rs b/examples/thread_local.rs index c460252..bdccced 100644 --- a/examples/thread_local.rs +++ b/examples/thread_local.rs @@ -2,6 +2,7 @@ use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; +// Requires that the `thread_local` feature is enabled. use mcslock::thread_local::spins::Mutex; const N: usize = 10; diff --git a/src/barging/mod.rs b/src/barging/mod.rs new file mode 100644 index 0000000..e3e5792 --- /dev/null +++ b/src/barging/mod.rs @@ -0,0 +1,151 @@ +//! A barging MCS lock implementation that is compliant with the [lock_api] crate. +//! +//! This implementation will have non-waiting threads race for the lock against +//! the front of the waiting queue thread. If the front of the queue thread +//! looses the race, it will simply keep spinning, while holding its position +//! in the queue. By allowing barging instead of forcing FIFO, a higher throughput +//! can be achieved when the lock is heavily contended. This implementation is +//! suitable for `no_std` environments, and the locking APIs are compatible with +//! the [lock_api] crate (see `lock_api` feature). +//! +//! The lock is hold for as long as its associated RAII guard is in scope. Once +//! the guard is dropped, the mutex is freed. Mutex guards are returned by +//! [`lock`] and [`try_lock`]. Guards are also accessible as the closure argument +//! for [`lock_with`] and [`try_lock_with`] methods. +//! +//! The Mutex is generic over the relax strategy. User may choose a strategy +//! as long as it implements the [`Relax`] trait. There is a number of strategies +//! provided by the [`relax`] module. Each submodule provides type aliases for +//! [`Mutex`] and [`MutexGuard`] associated with one relax strategy. See their +//! documentation for more information. +//! +//! [lock_api]: https://crates.io/crates/lock_api +//! [`lock`]: Mutex::lock +//! [`try_lock`]: Mutex::try_lock +//! [`lock_with`]: Mutex::lock_with +//! [`try_lock_with`]: Mutex::try_lock_with +//! [`relax`]: crate::relax +//! [`Relax`]: crate::relax::Relax + +mod mutex; +pub use mutex::{Mutex, MutexGuard}; + +/// A `barging` MCS lock alias that signals the processor that it is running +/// a busy-wait spin-loop during lock contention. +pub mod spins { + use crate::relax::Spin; + + /// A `barging` MCS lock that implements the [`Spin`] relax strategy. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::spins::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Spin`] relax strategy. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Spin>; +} + +/// A `barging` MCS lock alias that yields the current time slice to the +/// OS scheduler during lock contention. +#[cfg(any(feature = "yield", loom, test))] +#[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +pub mod yields { + use crate::relax::Yield; + + /// A `barging` MCS lock that implements the [`Yield`] relax strategy. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::yields::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Yield`] relax strategy. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Yield>; +} + +/// A `barging` MCS lock alias that rapidly spins without telling the CPU +/// to do any power down during lock contention. +pub mod loops { + use crate::relax::Loop; + + /// A `barging` MCS lock that implements the [`Loop`] relax strategy. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::loops::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Loop`] relax strategy. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Loop>; +} + +/// A `barging` MCS lock alias that, during lock contention, will perform +/// exponential backoff while signaling the processor that it is running a +/// busy-wait spin-loop. +pub mod spins_backoff { + use crate::relax::SpinBackoff; + + /// A `barging` MCS lock that implements the [`SpinBackoff`] relax + /// strategy. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::spins_backoff::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`SpinBackoff`] relax + /// strategy. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, SpinBackoff>; +} + +/// A `barging` MCS lock alias that, during lock contention, will perform +/// exponential backoff while spinning up to a threshold, then yields back to +/// the OS scheduler. +#[cfg(feature = "yield")] +#[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +pub mod yields_backoff { + use crate::relax::YieldBackoff; + + /// A `barging` MCS lock that implements the [`YieldBackoff`] relax + /// strategy. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::yields_backoff::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`YieldBackoff`] relax + /// strategy. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, YieldBackoff>; +} diff --git a/src/barging/mutex.rs b/src/barging/mutex.rs new file mode 100644 index 0000000..ee7a750 --- /dev/null +++ b/src/barging/mutex.rs @@ -0,0 +1,771 @@ +use core::fmt; +use core::marker::PhantomData; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +#[cfg(not(all(loom, test)))] +use core::ops::{Deref, DerefMut}; +#[cfg(not(all(loom, test)))] +use core::sync::atomic::AtomicBool; + +#[cfg(all(loom, test))] +use loom::cell::{ConstPtr, MutPtr}; +#[cfg(all(loom, test))] +use loom::sync::atomic::AtomicBool; + +#[cfg(all(loom, test))] +use crate::loom::{Guard, GuardDeref, GuardDerefMut}; + +use crate::raw::{Mutex as RawMutex, MutexNode}; +use crate::relax::Relax; + +/// A mutual exclusion primitive useful for protecting shared data. +/// +/// This mutex will block threads waiting for the lock to become available. The +/// mutex can also be statically initialized or created via a [`new`] +/// constructor. Each mutex has a type parameter which represents the data that +/// it is protecting. The data can only be accessed through the RAII guards +/// returned from [`lock`] and [`try_lock`], which guarantees that the data is only +/// ever accessed when the mutex is locked. +/// +/// # Examples +/// +/// ``` +/// use std::sync::Arc; +/// use std::thread; +/// use std::sync::mpsc::channel; +/// +/// use mcslock::barging::Mutex; +/// use mcslock::relax::Spin; +/// +/// type SpinMutex = Mutex; +/// +/// const N: usize = 10; +/// +/// // Spawn a few threads to increment a shared variable (non-atomically), and +/// // let the main thread know once all increments are done. +/// // +/// // Here we're using an Arc to share memory among threads, and the data inside +/// // the Arc is protected with a mutex. +/// let data = Arc::new(SpinMutex::new(0)); +/// +/// let (tx, rx) = channel(); +/// for _ in 0..N { +/// let (data, tx) = (data.clone(), tx.clone()); +/// thread::spawn(move || { +/// // The shared state can only be accessed once the lock is held. +/// // Our non-atomic increment is safe because we're the only thread +/// // which can access the shared state when the lock is held. +/// // +/// // We unwrap() the return value to assert that we are not expecting +/// // threads to ever fail while holding the lock. +/// let mut data = data.lock(); +/// *data += 1; +/// if *data == N { +/// tx.send(()).unwrap(); +/// } +/// // the lock is unlocked here when `data` goes out of scope. +/// }); +/// } +/// +/// rx.recv().unwrap(); +/// ``` +/// [`new`]: Mutex::new +/// [`lock`]: Mutex::lock +/// [`try_lock`]: Mutex::try_lock +pub struct Mutex { + locked: AtomicBool, + marker: PhantomData, + inner: RawMutex, +} + +impl Mutex { + /// Creates a new mutex in an unlocked state ready for use. + /// + /// # Examples + /// + /// ``` + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// const MUTEX: SpinMutex = SpinMutex::new(0); + /// let mutex = SpinMutex::new(0); + /// ``` + #[cfg(not(all(loom, test)))] + #[inline] + pub const fn new(value: T) -> Self { + let locked = AtomicBool::new(false); + let inner = RawMutex::new(value); + Self { locked, inner, marker: PhantomData } + } + + /// Creates a new unlocked mutex with Loom primitives (non-const). + #[cfg(all(loom, test))] + fn new(value: T) -> Self { + let locked = AtomicBool::new(false); + let inner = RawMutex::new(value); + Self { locked, inner, marker: PhantomData } + } + + /// Consumes this mutex, returning the underlying data. + /// + /// # Examples + /// + /// ``` + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = SpinMutex::new(0); + /// assert_eq!(mutex.into_inner(), 0); + /// ``` + #[inline] + pub fn into_inner(self) -> T { + self.inner.into_inner() + } +} + +impl Mutex { + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon returning, the thread is the only thread with the lock + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. To acquire a MCS + /// lock, it's also required a mutably borrowed queue node, which is a record + /// that keeps a link for forming the queue, see [`MutexNode`]. + /// + /// This function will block if the lock is unavailable. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = Arc::new(SpinMutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// *c_mutex.lock() = 10; + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(*mutex.lock(), 10); + /// ``` + pub fn lock(&self) -> MutexGuard<'_, T, R> { + if self.try_lock_fast() { + return MutexGuard::new(self); + } + let mut node = MutexNode::new(); + let guard = self.inner.lock(&mut node); + while !self.try_lock_fast() { + let mut relax = R::default(); + while self.locked.load(Relaxed) { + relax.relax(); + } + } + drop(guard); + MutexGuard::new(self) + } + + /// Acquires a mutex and then runs the closure against its guard. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon acquiring the mutex, the user provided closure will be + /// executed against the mutex guard. Once the guard goes out of scope, it + /// will unlock the mutex. + /// + /// This function will block if the lock is unavailable. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = Arc::new(SpinMutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// c_mutex.lock_with(|mut guard| *guard = 10); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// ``` + /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::barging::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.lock_with(|guard| &*guard); + /// ``` + pub fn lock_with(&self, f: F) -> Ret + where + F: FnOnce(MutexGuard<'_, T, R>) -> Ret, + { + f(self.lock()) + } +} + +impl Mutex { + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then [`None`] is returned. + /// Otherwise, an RAII guard is returned. The lock will be unlocked when the + /// guard is dropped. To acquire a MCS lock, it's also required a mutably + /// borrowed queue node, which is a record that keeps a link for forming the + /// queue, see [`MutexNode`]. + /// + /// This function does not block. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = Arc::new(SpinMutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// let mut lock = c_mutex.try_lock(); + /// if let Some(ref mut mutex) = lock { + /// **mutex = 10; + /// } else { + /// println!("try_lock failed"); + /// } + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(*mutex.lock(), 10); + /// ``` + pub fn try_lock(&self) -> Option> { + self.locked + .compare_exchange(false, true, Acquire, Relaxed) + .map(|_| MutexGuard::new(self)) + .ok() + } + + /// Attempts to acquire this lock and then runs the closure against its guard. + /// + /// If the lock could not be acquired at this time, then a [`None`] value is + /// given to the user provided closure as the argument. If the lock has been + /// acquired, then a [`Some`] with the mutex guard is given instead. The lock + /// will be unlocked when the guard is dropped. + /// + /// This function does not block. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::thread; + /// + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = Arc::new(SpinMutex::new(0)); + /// let c_mutex = Arc::clone(&mutex); + /// + /// thread::spawn(move || { + /// c_mutex.try_lock_with(|mut guard| { + /// *guard.unwrap() = 10; + /// }); + /// }) + /// .join().expect("thread::spawn failed"); + /// + /// assert_eq!(mutex.lock_with(|guard| *guard), 10); + /// ``` + /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::barging::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.try_lock_with(|guard| &*guard.unwrap()); + /// ``` + pub fn try_lock_with(&self, f: F) -> Ret + where + F: FnOnce(Option>) -> Ret, + { + f(self.try_lock()) + } + + /// Returns `true` if the lock is currently held. + /// + /// This method does not provide any synchronization guarantees, so its only + /// useful as a heuristic, and so must be considered not up to date. + /// + /// # Example + /// + /// ``` + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mutex = SpinMutex::new(0); + /// let guard = mutex.lock(); + /// drop(guard); + /// + /// assert_eq!(mutex.is_locked(), false); + /// ``` + #[inline] + pub fn is_locked(&self) -> bool { + // Relaxed is sufficient because this method only guarantees atomicity. + self.locked.load(Relaxed) + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `Mutex` mutably, no actual locking needs to + /// take place - the mutable borrow statically guarantees no locks exist. + /// + /// # Examples + /// + /// ``` + /// use mcslock::barging::Mutex; + /// use mcslock::relax::Spin; + /// + /// type SpinMutex = Mutex; + /// + /// let mut mutex = SpinMutex::new(0); + /// *mutex.get_mut() = 10; + /// + /// assert_eq!(*mutex.lock(), 10); + /// ``` + #[cfg(not(all(loom, test)))] + #[inline] + pub fn get_mut(&mut self) -> &mut T { + self.inner.get_mut() + } + + /// Tries to lock this mutex with a weak exchange. + fn try_lock_fast(&self) -> bool { + self.locked.compare_exchange_weak(false, true, Acquire, Relaxed).is_ok() + } + + /// Unlocks this mutex. + fn unlock(&self) { + self.locked.store(false, Release); + } + + #[cfg(not(all(loom, test)))] + /// Returns a raw mutable pointer to the underlying data. + const fn data_ptr(&self) -> *mut T { + self.inner.data_ptr() + } + + /// Get a Loom immutable raw pointer to the underlying data. + #[cfg(all(loom, test))] + fn data_get(&self) -> ConstPtr { + self.inner.data_get() + } + + /// Get a Loom mutable raw pointer to the underlying data. + #[cfg(all(loom, test))] + fn data_get_mut(&self) -> MutPtr { + self.inner.data_get_mut() + } +} + +impl Default for Mutex { + /// Creates a `Mutex`, with the `Default` value for `T`. + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl From for Mutex { + /// Creates a `Mutex` from a instance of `T`. + fn from(data: T) -> Self { + Self::new(data) + } +} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[cfg(all(feature = "lock_api", not(loom)))] +unsafe impl lock_api::RawMutex for Mutex<(), R> { + type GuardMarker = lock_api::GuardSend; + + // It is fine to const initialize `Mutex<(), R>` since the data is not going + // to be shared. And since it is a `Unit` type, copies will be optimized away. + #[allow(clippy::declare_interior_mutable_const)] + const INIT: Self = Self::new(()); + + fn lock(&self) { + core::mem::forget(Self::lock(self)); + } + + fn try_lock(&self) -> bool { + Self::try_lock(self).map(core::mem::forget).is_some() + } + + unsafe fn unlock(&self) { + self.unlock(); + } + + fn is_locked(&self) -> bool { + self.is_locked() + } +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be access through this guard via its +/// [`Deref`] and [`DerefMut`] implementations. +/// +/// This structure is returned by [`lock`] and [`try_lock`] methods on [`Mutex`]. +/// It is also given as closure argument by [`lock_with`] and [`try_lock_with`] +/// methods. +/// +/// [`lock`]: Mutex::lock +/// [`try_lock`]: Mutex::lock +/// [`lock_with`]: Mutex::lock_with +/// [`try_lock_with`]: Mutex::try_lock_with +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a, T: ?Sized, R> { + lock: &'a Mutex, +} + +// Same unsafe impl as `std::sync::MutexGuard`. +unsafe impl Sync for MutexGuard<'_, T, R> {} + +impl<'a, T: ?Sized, R> MutexGuard<'a, T, R> { + /// Creates a new `MutexGuard` instance. + const fn new(lock: &'a Mutex) -> Self { + Self { lock } + } + + /// Runs `f` with an immutable reference to the wrapped value. + #[cfg(not(all(loom, test)))] + fn data_with(&self, f: F) -> Ret + where + F: FnOnce(&T) -> Ret, + { + // SAFETY: A guard instance holds the lock locked. + f(unsafe { &*self.lock.data_ptr() }) + } + + /// Runs `f` with an immutable reference to the wrapped value. + #[cfg(all(loom, test))] + pub(crate) fn data_with(&self, f: F) -> Ret + where + F: FnOnce(&T) -> Ret, + { + // SAFETY: A guard instance holds the lock locked. + f(unsafe { self.lock.data_get().deref() }) + } +} + +impl Drop for MutexGuard<'_, T, R> { + fn drop(&mut self) { + self.lock.unlock(); + } +} + +#[cfg(not(all(loom, test)))] +impl<'a, T: ?Sized, R> Deref for MutexGuard<'a, T, R> { + type Target = T; + + /// Dereferences the guard to access the underlying data. + fn deref(&self) -> &T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &*self.lock.data_ptr() } + } +} + +#[cfg(not(all(loom, test)))] +impl<'a, T: ?Sized, R> DerefMut for MutexGuard<'a, T, R> { + /// Mutably dereferences the guard to access the underlying data. + fn deref_mut(&mut self) -> &mut T { + // SAFETY: A guard instance holds the lock locked. + unsafe { &mut *self.lock.data_ptr() } + } +} + +impl<'a, T: ?Sized + fmt::Debug, R> fmt::Debug for MutexGuard<'a, T, R> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.data_with(|data| fmt::Debug::fmt(data, f)) + } +} + +impl<'a, T: ?Sized + fmt::Display, R> fmt::Display for MutexGuard<'a, T, R> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.data_with(|data| fmt::Display::fmt(data, f)) + } +} + +/// SAFETY: A guard instance hold the lock locked, with exclusive access to the +/// underlying data. +#[cfg(all(loom, test))] +unsafe impl<'a, T: ?Sized, R> Guard<'a, T> for MutexGuard<'a, T, R> { + type Guard = Self; + + fn deref(&'a self) -> GuardDeref<'a, T, Self::Guard> { + GuardDeref::new(self.lock.data_get()) + } + + fn deref_mut(&'a self) -> GuardDerefMut<'a, T, Self::Guard> { + GuardDerefMut::new(self.lock.data_get_mut()) + } +} + +#[cfg(all(not(loom), test))] +mod test { + // Test suite from the Rust's Mutex implementation with minor modifications + // since the API is not compatible with this crate implementation and some + // new tests as well. + // + // Copyright 2014 The Rust Project Developers. + // + // Licensed under the Apache License, Version 2.0 or the MIT license + // , at your + // option. This file may not be copied, modified, or distributed + // except according to those terms. + + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::thread; + + use crate::barging::yields::Mutex; + + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); + + #[test] + fn smoke() { + let m = Mutex::new(()); + drop(m.lock()); + drop(m.lock()); + } + + #[test] + fn lots_and_lots() { + static LOCK: Mutex = Mutex::new(0); + + const ITERS: u32 = 1000; + const CONCURRENCY: u32 = 3; + + fn inc() { + for _ in 0..ITERS { + let mut g = LOCK.lock(); + *g += 1; + } + } + + let (tx, rx) = channel(); + for _ in 0..CONCURRENCY { + let tx2 = tx.clone(); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); + let tx2 = tx.clone(); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); + } + + drop(tx); + for _ in 0..2 * CONCURRENCY { + rx.recv().unwrap(); + } + assert_eq!(*LOCK.lock(), ITERS * CONCURRENCY * 2); + } + + #[test] + fn try_lock() { + let m = Mutex::new(()); + *m.try_lock().unwrap() = (); + } + + #[test] + fn test_into_inner() { + let m = Mutex::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_get_mut() { + let mut m = Mutex::new(NonCopy(10)); + *m.get_mut() = NonCopy(20); + assert_eq!(m.into_inner(), NonCopy(20)); + } + + #[test] + fn test_lock_arc_nested() { + // Tests nested locks and access + // to underlying data. + let arc = Arc::new(Mutex::new(1)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } + + #[test] + fn test_recursive_lock() { + let arc = Arc::new(Mutex::new(1)); + let (tx, rx) = channel(); + for _ in 0..4 { + let tx2 = tx.clone(); + let c_arc = Arc::clone(&arc); + let _t = thread::spawn(move || { + let mutex = Mutex::new(1); + let _lock = c_arc.lock(); + let lock2 = mutex.lock(); + assert_eq!(*lock2, 1); + tx2.send(()).unwrap(); + }); + } + drop(tx); + rx.recv().unwrap(); + } + + #[test] + fn test_lock_arc_access_in_unwind() { + let arc = Arc::new(Mutex::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.lock(); + assert_eq!(*lock, 2); + } + + #[test] + fn test_lock_unsized() { + let lock: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); + { + let b = &mut *lock.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*lock.lock(), comp); + } +} + +#[cfg(all(loom, test))] +mod test { + use loom::{model, thread}; + + use crate::barging::yields::Mutex; + use crate::loom::Guard; + + #[test] + fn threads_join() { + use core::ops::Range; + use loom::sync::Arc; + + fn inc(lock: Arc>) { + let guard = lock.lock(); + *guard.deref_mut() += 1; + } + + model(|| { + let data = Arc::new(Mutex::new(0)); + // 3 or more threads make this model run for too long. + let runs @ Range { end, .. } = 0..2; + + let handles = runs + .into_iter() + .map(|_| Arc::clone(&data)) + .map(|data| thread::spawn(move || inc(data))) + .collect::>(); + + for handle in handles { + handle.join().unwrap(); + } + + assert_eq!(end, *data.lock().deref()); + }); + } + + #[test] + fn threads_fork() { + // Using std's Arc or else this model runs for loo long. + use std::sync::Arc; + + fn inc(lock: Arc>) { + let guard = lock.lock(); + *guard.deref_mut() += 1; + } + + model(|| { + let data = Arc::new(Mutex::new(0)); + // 4 or more threads make this model run for too long. + for _ in 0..3 { + let data = Arc::clone(&data); + thread::spawn(move || inc(data)); + } + }); + } +} diff --git a/src/lib.rs b/src/lib.rs index ffd6f41..b60f8bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,15 +11,58 @@ //! - works equally well (requiring only O(1) network transactions per lock //! acquisition) on machines with and without coherent caches. //! -//! This algorithm and serveral others were introduced by [Mellor-Crummey and Scott] paper. -//! And a simpler correctness proof of the MCS lock was proposed by [Johnson and Harathi]. +//! This algorithm and serveral others were introduced by [Mellor-Crummey and Scott] +//! paper. And a simpler correctness proof of the MCS lock was proposed by +//! [Johnson and Harathi]. //! -//! ## Raw locking APIs +//! ## Use cases +//! +//! It is noteworthy to mention that [spinlocks are usually not what you want]. +//! The majority of use cases are well covered by OS-based mutexes like +//! [`std::sync::Mutex`] or [`parking_lot::Mutex`]. These implementations will +//! notify the system that the waiting thread should be parked, freeing the +//! processor to work on something else. +//! +//! Spinlocks are only efficient in very few circunstances where the overhead +//! of context switching or process rescheduling are greater than busy waiting +//! for very short periods. Spinlocks can be useful inside operating-system kernels, +//! on embedded systems or even complement other locking designs. As a reference +//! use case, some [Linux kernel mutexes] run an customized MCS lock specifically +//! tailored for optimistic spinning during contention before actually sleeping. +//! This implementation is `no_std` by default, so it's useful in those environments. +//! +//! ## Barging MCS lock //! -//! Raw locking APIs require exclusive access to a local queue node. This node is -//! represented by the `MutexNode` type. The `raw` module provides an implementation -//! that is `no_std` compatible, but also requires that queue nodes must be -//! instantiated by the callers. +//! This implementation will have non-waiting threads race for the lock against +//! the front of the waiting queue thread, which means this it is an unfair lock. +//! This implementation is suitable for `no_std` environments, and the locking +//! APIs are compatible with the `lock_api` crate. See [`mod@barging`] and +//! [`mod@lock_api`] modules for more information. +//! +//! ``` +//! use std::sync::Arc; +//! use std::thread; +//! +//! use mcslock::barging::spins::Mutex; +//! +//! let mutex = Arc::new(Mutex::new(0)); +//! let c_mutex = Arc::clone(&mutex); +//! +//! thread::spawn(move || { +//! *c_mutex.lock() = 10; +//! }) +//! .join().expect("thread::spawn failed"); +//! +//! assert_eq!(*mutex.try_lock().unwrap(), 10); +//! ``` +//! +//! ## Raw MCS lock +//! +//! This implementation operates under FIFO. Raw locking APIs require exclusive +//! access to a locally accessible queue node. This node is represented by the +//! [`MutexNode`] type. Callers are responsible for instantiating the queue nodes +//! themselves. This implementation is `no_std` compatible. See [`mod@raw`] +//! module for more information. //! //! ``` //! use std::sync::Arc; @@ -42,12 +85,14 @@ //! assert_eq!(*mutex.try_lock(&mut node).unwrap(), 10); //! ``` //! -//! ## Thread local locking APIs +//! ## Thread local MCS lock //! -//! This crate also provides locking APIs that do not require user-side node -//! instantiation, by enabling the `thread_local` feature. These APIs require -//! that critical sections must be provided as closures, and are not compatible -//! with `no_std` environments as they require thread local storage. +//! This implementation also operates under FIFO. The locking APIs provided +//! by this module do not require user-side node allocation, critical sections +//! must be provided as closures and at most one lock can be held at any time +//! within a thread. It is not `no_std` compatible and can be enabled through +//! the `thread_local` feature. See [`mod@thread_local`] module for more +//! information. //! //! ``` //! # #[cfg(feature = "thread_local")] @@ -62,13 +107,11 @@ //! let c_mutex = Arc::clone(&mutex); //! //! thread::spawn(move || { -//! // Node instantiation is not required. //! // Critical section must be defined as closure. //! c_mutex.lock_with(|mut guard| *guard = 10); //! }) //! .join().expect("thread::spawn failed"); //! -//! // Node instantiation is not required. //! // Critical section must be defined as closure. //! assert_eq!(mutex.try_lock_with(|guard| *guard.unwrap()), 10); //! # } @@ -76,36 +119,12 @@ //! # fn main() {} //! ``` //! -//! ## Use cases -//! -//! [Spinlocks are usually not what you want]. The majority of use cases are well -//! covered by OS-based mutexes like [`std::sync::Mutex`] or [`parking_lot::Mutex`]. -//! These implementations will notify the system that the waiting thread should -//! be parked, freeing the processor to work on something else. -//! -//! Spinlocks are only efficient in very few circunstances where the overhead -//! of context switching or process rescheduling are greater than busy waiting -//! for very short periods. Spinlocks can be useful inside operating-system kernels, -//! on embedded systems or even complement other locking designs. As a reference -//! use case, some [Linux kernel mutexes] run an customized MCS lock specifically -//! tailored for optimistic spinning during contention before actually sleeping. -//! This implementation is `no_std` by default, so it's useful in those environments. -//! -//! ## API for `no_std` environments -//! -//! The [`raw`] locking interface of a MCS lock is not quite the same as other -//! mutexes. To acquire a raw MCS lock, a queue node must be exclusively borrowed for -//! the lifetime of the guard returned by [`lock`] or [`try_lock`]. This node is exposed -//! as the [`MutexNode`] type. See their documentation for more information. If you -//! are looking for spin-based primitives that implement the [lock_api] interface -//! and also compatible with `no_std`, consider using [spin-rs]. -//! //! ## Features //! //! This crate dos not provide any default features. Features that can be enabled //! are: //! -//! ### `yield` +//! ### yield //! //! The `yield` feature requires linking to the standard library, so it is not //! suitable for `no_std` environments. By enabling the `yield` feature, instead @@ -116,29 +135,32 @@ //! default implementation calls [`core::hint::spin_loop`], which does in fact //! just simply busy-waits. //! -//! ### `thread_local` +//! ### thread_local //! //! The `thread_local` feature provides locking APIs that do not require user-side -//! node instantiation, but critical sections must be provided as closures. This +//! node allocation, but critical sections must be provided as closures. This //! implementation handles the queue's nodes transparently, by storing them in //! the thread local storage of the waiting threads. Thes locking implementations //! will panic if recursively acquired. Not `no_std` compatible. //! +//! ### lock_api +//! +//! This feature implements the [`RawMutex`] trait from the [lock_api] +//! crate for [`barging::Mutex`]. Aliases are provided by the +//! [`mod@lock_api`] module. This feature is `no_std` compatible. +//! //! ## Related projects //! //! These projects provide MCS lock implementations with different APIs, //! implementation details or compiler requirements, you can check their //! repositories: //! -//! - `mcs-rs`: -//! - `libmcs`: +//! - mcs-rs: +//! - libmcs: //! //! [`MutexNode`]: raw::MutexNode -//! [`lock`]: raw::Mutex::lock -//! [`try_lock`]: raw::Mutex::try_lock //! [`std::sync::Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html //! [`parking_lot::Mutex`]: https://docs.rs/parking_lot/latest/parking_lot/type.Mutex.html -//! [`mcslock::Mutex`]: crate::Mutex //! [`RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html //! [`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html //! [`std::thread::yield_now`]: https://doc.rust-lang.org/std/thread/fn.yield_now.html @@ -146,7 +168,7 @@ //! [spin-rs]: https://docs.rs/spin/latest/spin //! [lock_api]: https://docs.rs/lock_api/latest/lock_api //! [Linux kernel mutexes]: https://www.kernel.org/doc/html/latest/locking/mutex-design.html -//! [Spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +//! [spinlocks are usually not what you want]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html //! [Mellor-Crummey and Scott]: https://www.cs.rochester.edu/~scott/papers/1991_TOCS_synch.pdf //! [Johnson and Harathi]: https://web.archive.org/web/20140411142823/http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf @@ -155,13 +177,20 @@ no_std )] #![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(test, allow(clippy::needless_pass_by_value))] #![allow(clippy::module_name_repetitions)] #![allow(clippy::inline_always)] +#![allow(clippy::doc_markdown)] #![warn(missing_docs)] +pub mod barging; pub mod raw; pub mod relax; +#[cfg(feature = "lock_api")] +#[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))] +pub mod lock_api; + #[cfg(feature = "thread_local")] #[cfg_attr(docsrs, doc(cfg(feature = "thread_local")))] pub mod thread_local; diff --git a/src/lock_api.rs b/src/lock_api.rs new file mode 100644 index 0000000..a6914fe --- /dev/null +++ b/src/lock_api.rs @@ -0,0 +1,155 @@ +//! Locking interfaces for MCS lock that are compatible with [lock_api]. +//! +//! This module exports [`lock_api::Mutex`] and [`lock_api::MutexGuard`] type +//! aliases with a `barging` MCS lock and guard as their inner types. The +//! [`barging::Mutex`] type will implement the [`lock_api::RawMutex`] trait when +//! this feature is enabled. +//! +//! The Mutex is generic over the relax strategy. User may choose a strategy +//! as long as it implements the [`Relax`] trait. There is a number of strategies +//! provided by the [`relax`] module. The following modules provide type aliases +//! for [`lock_api::Mutex`] and [`lock_api::MutexGuard`] associated with one +//! relax strategy. See their documentation for more information. +//! +//! [`relax`]: crate::relax +//! [`Relax`]: crate::relax::Relax +//! [`barging::Mutex`]: crate::barging::Mutex +//! [lock_api]: https://crates.io/crates/lock_api +//! [`lock_api::Mutex`]: https://docs.rs/lock_api/latest/lock_api/struct.Mutex.html +//! [`lock_api::MutexGuard`]: https://docs.rs/lock_api/latest/lock_api/struct.MutexGuard.html +//! [`lock_api::RawMutex`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutex.html +//! [`RawMutexFair`]: https://docs.rs/lock_api/latest/lock_api/trait.RawMutexFair.html + +/// A lock that provides mutually exclusive data access that is compatible with +/// [`lock_api`](https://crates.io/crates/lock_api). +pub type Mutex = lock_api::Mutex, T>; + +/// A guard that provides mutable data access that is compatible with +/// [`lock_api`](https://crates.io/crates/lock_api). +pub type MutexGuard<'a, T, R> = lock_api::MutexGuard<'a, crate::barging::Mutex<(), R>, T>; + +/// A `barging` MCS lock alias that signals the processor that it is running +/// a busy-wait spin-loop during lock contention. +pub mod spins { + use crate::relax::Spin; + + /// A `barging` MCS lock that implements the [`Spin`] relax strategy + /// and compatible with the `lock_api` crate. + /// + /// # Example + /// + /// ``` + /// use mcslock::lock_api::spins::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Spin`] relax strategy + /// and compatible with the `lock_api` crate. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Spin>; +} + +/// A `barging` MCS lock alias that yields the current time slice to the +/// OS scheduler during lock contention. +#[cfg(any(feature = "yield", loom, test))] +#[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +pub mod yields { + use crate::relax::Yield; + + /// A `barging` MCS lock that implements the [`Yield`] relax strategy + /// and compatible with the `lock_api` crate. + /// + /// # Example + /// + /// ``` + /// use mcslock::lock_api::yields::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Yield`] relax strategy + /// and compatible with the `lock_api` crate. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Yield>; +} + +/// A `barging` MCS lock alias that rapidly spins without telling the CPU +/// to do any power down during lock contention. +pub mod loops { + use crate::relax::Loop; + + /// A `barging` MCS lock that implements the [`Loop`] relax strategy + /// and compatible with the `lock_api` crate. + /// + /// # Example + /// + /// ``` + /// use mcslock::lock_api::loops::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`Loop`] relax strategy + /// and compatible with the `lock_api` crate. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, Loop>; +} + +/// A `barging` MCS lock alias that, during lock contention, will perform +/// exponential backoff while signaling the processor that it is running a +/// busy-wait spin-loop. +pub mod spins_backoff { + use crate::relax::SpinBackoff; + + /// A `barging` MCS lock that implements the [`SpinBackoff`] relax + /// strategy and compatible with the `lock_api` crate. + /// + /// # Example + /// + /// ``` + /// use mcslock::lock_api::spins_backoff::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`SpinBackoff`] relax + /// strategy and compatible with the `lock_api` crate. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, SpinBackoff>; +} + +/// A `barging` MCS lock alias that, during lock contention, will perform +/// exponential backoff while spinning up to a threshold, then yields back to +/// the OS scheduler. +#[cfg(feature = "yield")] +#[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +pub mod yields_backoff { + use crate::relax::YieldBackoff; + + /// A `barging` MCS lock that implements the [`YieldBackoff`] relax + /// strategy and compatible with the `lock_api` crate. + /// + /// # Example + /// + /// ``` + /// use mcslock::lock_api::yields_backoff::Mutex; + /// + /// let mutex = Mutex::new(0); + /// let guard = mutex.lock(); + /// assert_eq!(*guard, 0); + /// ``` + pub type Mutex = super::Mutex; + + /// A `barging` MCS guard that implements the [`YieldBackoff`] relax + /// strategy and compatible with the `lock_api` crate. + pub type MutexGuard<'a, T> = super::MutexGuard<'a, T, YieldBackoff>; +} diff --git a/src/loom.rs b/src/loom.rs index 25c50d9..7a97fe8 100644 --- a/src/loom.rs +++ b/src/loom.rs @@ -1,4 +1,3 @@ -#![allow(clippy::needless_pass_by_value)] #![allow(clippy::redundant_pub_crate)] use core::marker::PhantomData; diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 4f68089..db3aeed 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,19 +1,25 @@ -//! A MCS lock implementation that requires instantiation and exclusive access -//! to a queue node. +//! A MCS lock implementation that requires exclusive access to a locally +//! accessible queue node. //! -//! The `raw` module provides an implementation that is `no_std` compatible, but -//! also requires that queue nodes must be instantiated by the callers. Queue -//! nodes are represented by the [`MutexNode`] type. The lock is hold for as -//! long as its associated RAII guard is in scope. Once the guard is dropped, -//! the mutex is freed. Mutex guards are returned by [`lock`] and [`try_lock`]. -//! Guards are also accessible as the closure argument for [`lock_with`] and -//! [`try_lock_with`] methods. +//! The `raw` implementation of MCS lock is fair, that is, it guarantees that +//! thread that have waited for longer will be scheduled first (FIFO). Each +//! waiting thread will spin against its own, locally-accessible atomic lock +//! state, which then avoids the network contention of the state access. +//! +//! This module provides an implementation that is `no_std` compatible, but +//! also requires that queue nodes must be allocated by the callers. Queue +//! nodes are represented by the [`MutexNode`] type. +//! +//! The lock is hold for as long as its associated RAII guard is in scope. Once +//! the guard is dropped, the mutex is freed. Mutex guards are returned by +//! [`lock`] and [`try_lock`]. Guards are also accessible as the closure argument +//! for [`lock_with`] and [`try_lock_with`] methods. //! //! The Mutex is generic over the relax strategy. User may choose a strategy //! as long as it implements the [`Relax`] trait. There is a number of strategies -//! provided by the [`relax`] module. The default relax strategy is [`Spin`]. -//! Each module in `raw` provides type aliases for [`Mutex`] and [`MutexGuard`] -//! associated with one relax strategy. See their documentation for more information. +//! provided by the [`relax`] module. Each module in `raw` provides type aliases +//! for [`Mutex`] and [`MutexGuard`] associated with one relax strategy. See +//! their documentation for more information. //! //! [`lock`]: Mutex::lock //! [`try_lock`]: Mutex::try_lock @@ -21,7 +27,6 @@ //! [`try_lock_with`]: Mutex::try_lock_with //! [`relax`]: crate::relax //! [`Relax`]: crate::relax::Relax -//! [`Spin`]: crate::relax::Spin mod mutex; pub use mutex::{Mutex, MutexGuard, MutexNode}; diff --git a/src/raw/mutex.rs b/src/raw/mutex.rs index c042f41..c45c898 100644 --- a/src/raw/mutex.rs +++ b/src/raw/mutex.rs @@ -48,7 +48,7 @@ impl MutexNodeInit { /// Returns a raw mutable pointer of this node. const fn as_ptr(&self) -> *mut Self { - self as *const _ as *mut _ + (self as *const Self).cast_mut() } } @@ -284,6 +284,15 @@ impl Mutex { /// /// assert_eq!(mutex.lock_with(|guard| *guard), 10); /// ``` + /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.try_lock_with(|guard| &*guard.unwrap()); + /// ``` pub fn try_lock_with(&self, f: F) -> Ret where F: FnOnce(Option>) -> Ret, @@ -333,7 +342,7 @@ impl Mutex { if !pred.is_null() { // SAFETY: Already verified that predecessor is not null. unsafe { &*pred }.next.store(node.as_ptr(), Release); - let mut relax = R::new(); + let mut relax = R::default(); while node.locked.load(Relaxed) { relax.relax(); } @@ -372,6 +381,15 @@ impl Mutex { /// /// assert_eq!(mutex.lock_with(|guard| *guard), 10); /// ``` + /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::raw::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.lock_with(|guard| &*guard); + /// ``` pub fn lock_with(&self, f: F) -> Ret where F: FnOnce(MutexGuard<'_, T, R>) -> Ret, @@ -390,7 +408,7 @@ impl Mutex { let false = self.try_unlock(node.as_ptr()) else { return }; // But if we are not the tail, then we have a pending successor. We // must wait for them to finish linking with us. - let mut relax = R::new(); + let mut relax = R::default(); loop { next = node.next.load(Relaxed); let true = next.is_null() else { break }; @@ -649,19 +667,6 @@ mod test { drop(m.lock(&mut node)); } - // #[test] - // fn must_not_compile() { - // let m = Mutex::new(1); - // let guard = m.lock_with(|guard| guard); - // let _value = *guard; - - // let m = Mutex::new(1); - // let _val = m.lock_with(|guard| &mut *guard); - - // let m = Mutex::new(1); - // let _val = m.lock_with(|guard| &*guard); - // } - #[test] fn lots_and_lots() { static LOCK: Mutex = Mutex::new(0); diff --git a/src/relax.rs b/src/relax.rs index 9ec9bb7..369040f 100644 --- a/src/relax.rs +++ b/src/relax.rs @@ -15,10 +15,7 @@ //! Strategies that determine the behaviour of locks when encountering contention. /// A trait implemented by spinning relax strategies. -pub trait Relax { - /// Initialize the state for the relaxing operation, if any. - fn new() -> Self; - +pub trait Relax: Default { /// Perform the relaxing operation during a period of contention. fn relax(&mut self); } @@ -39,14 +36,10 @@ pub trait Relax { /// all possible. /// /// [priority inversion]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +#[derive(Default)] pub struct Spin; impl Relax for Spin { - #[inline(always)] - fn new() -> Self { - Self - } - #[inline(always)] fn relax(&mut self) { core::hint::spin_loop(); @@ -62,15 +55,11 @@ impl Relax for Spin { /// and you should generally use these instead, except in rare circumstances. #[cfg(any(feature = "yield", loom, test))] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +#[derive(Default)] pub struct Yield; -#[cfg(any(feature = "yield", all(test, not(loom))))] +#[cfg(any(all(feature = "yield", not(loom)), all(test, not(loom))))] impl Relax for Yield { - #[inline(always)] - fn new() -> Self { - Self - } - #[inline] fn relax(&mut self) { std::thread::yield_now(); @@ -82,11 +71,6 @@ impl Relax for Yield { /// progress. #[cfg(all(loom, test))] impl Relax for Yield { - #[inline(always)] - fn new() -> Self { - Self - } - #[inline(always)] fn relax(&mut self) { loom::thread::yield_now(); @@ -99,14 +83,10 @@ impl Relax for Yield { /// for completeness and for targets that, for some reason, miscompile or do not /// support spin hint intrinsics despite attempting to generate code for them /// (i.e: this is a workaround for possible compiler bugs). +#[derive(Default)] pub struct Loop; impl Relax for Loop { - #[inline(always)] - fn new() -> Self { - Self - } - #[inline(always)] fn relax(&mut self) {} } @@ -131,6 +111,7 @@ impl Relax for Loop { /// any significant improvement. As with [`Spin`], this implementation is /// subject to priority inversion problems, you may want to consider a yielding /// strategy or using a scheduler-aware lock. +#[derive(Default)] pub struct SpinBackoff { step: Step, } @@ -140,11 +121,6 @@ impl SpinBackoff { } impl Relax for SpinBackoff { - #[inline(always)] - fn new() -> Self { - Self { step: Step(0) } - } - #[inline(always)] fn relax(&mut self) { self.step.spin_to(Self::SPIN_LIMIT); @@ -163,6 +139,7 @@ impl Relax for SpinBackoff { /// locks if you have access to the standard library. #[cfg(feature = "yield")] #[cfg_attr(docsrs, doc(cfg(feature = "yield")))] +#[derive(Default)] pub struct YieldBackoff { step: Step, } @@ -175,11 +152,6 @@ impl YieldBackoff { #[cfg(feature = "yield")] impl Relax for YieldBackoff { - #[inline(always)] - fn new() -> Self { - Self { step: Step(0) } - } - #[inline(always)] fn relax(&mut self) { if self.step.0 <= Self::SPIN_LIMIT { @@ -192,6 +164,7 @@ impl Relax for YieldBackoff { } /// Keeps count of the number of steps taken. +#[derive(Default)] struct Step(u32); impl Step { diff --git a/src/thread_local/mod.rs b/src/thread_local/mod.rs index ce9b9d8..6d43ce2 100644 --- a/src/thread_local/mod.rs +++ b/src/thread_local/mod.rs @@ -1,8 +1,13 @@ //! A MCS lock implementation that stores queue nodes in the thread local //! storage of the waiting threads. //! +//! The `thread_local` implementation of MCS lock is fair, that is, it +//! guarantees that thread that have waited for longer will be scheduled first +//! (FIFO). Each waiting thread will spin against its own, thread local atomic +//! lock state, which then avoids the network contention of the state access. +//! //! This module provide MCS locking APIs that do not require user-side node -//! instantiation, by managing the queue's nodes allocations internally. Queue +//! allocation, by managing the queue's node allocations internally. Queue //! nodes are stored in the thread local storage, therefore this implementation //! requires support from the standard library. Critical sections must be //! provided to [`lock_with`] and [`try_lock_with`] as closures. Closure arguments @@ -11,22 +16,21 @@ //! //! The Mutex is generic over the relax strategy. User may choose a strategy //! as long as it implements the [`Relax`] trait. There is a number of strategies -//! provided by the [`relax`] module. The default relax strategy is [`Spin`]. -//! Each module in `thread_local` provides type aliases for [`Mutex`] and -//! [`MutexGuard`] associated with one relax strategy. See their documentation -//! for more information. +//! provided by the [`relax`] module. Each module in `thread_local` provides type +//! aliases for [`Mutex`] and [`MutexGuard`] associated with one relax strategy. +//! See their documentation for more information. //! //! # Panics //! -//! The `thread_local` [`Mutex`] implementation does not allow recursive locking, -//! doing so will cause a panic. See [`lock_with`] and [`try_lock_with`] functions -//! for more information. +//! The `thread_local` [`Mutex`] implementation only allows at most on lock held +//! within a single thread at any time. Trying to acquire a second lock while a +//! guard is alive will cause a panic. See [`lock_with`] and [`try_lock_with`] +//! functions for more information. //! //! [`lock_with`]: Mutex::lock_with //! [`try_lock_with`]: Mutex::try_lock_with //! [`relax`]: crate::relax //! [`Relax`]: crate::relax::Relax -//! [`Spin`]: crate::relax::Spin mod mutex; pub use mutex::{Mutex, MutexGuard}; diff --git a/src/thread_local/mutex.rs b/src/thread_local/mutex.rs index eb9f777..053f62e 100644 --- a/src/thread_local/mutex.rs +++ b/src/thread_local/mutex.rs @@ -20,12 +20,6 @@ use crate::relax::Relax; /// provided as closure arguments from [`lock_with`] and [`try_lock_with`], which /// guarantees that the data is only ever accessed when the mutex is locked. /// -/// # Panics -/// -/// The `thread_local` [`Mutex`] implementation does not allow recursive locking, -/// doing so will cause a panic. See [`lock_with`] and [`try_lock_with`] functions -/// for more information. -/// /// # Examples /// /// ``` @@ -133,9 +127,9 @@ impl Mutex { /// /// # Panics /// - /// This lock implementation cannot be recursively acquired, doing so it - /// result in a panic. That is the case for both `lock_with` and - /// `try_lock_with`. + /// At most one lock of this implementation might be held within a single + /// thread at any time. Trying to acquire a second lock while a guard is + /// still alive will cause a panic. /// /// # Examples /// @@ -161,6 +155,15 @@ impl Mutex { /// assert_eq!(mutex.lock_with(|guard| *guard), 10); /// ``` /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::thread_local::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.try_lock_with(|guard| &*guard.unwrap()); + /// ``` + /// /// An example of panic: /// /// ```should_panic @@ -196,9 +199,9 @@ impl Mutex { /// /// # Panics /// - /// This lock implementation cannot be recursively acquired, doing so it - /// result in a panic. That is the case for both `lock_with` and - /// `try_lock_with`. + /// At most one lock of this implementation might be held within a single + /// thread at any time. Trying to acquire a second lock while a guard is + /// still alive will cause a panic. /// /// # Examples /// @@ -222,6 +225,15 @@ impl Mutex { /// assert_eq!(mutex.lock_with(|guard| *guard), 10); /// ``` /// + /// Borrows of the guard or its data cannot escape the given closure. + /// + /// ```compile_fail,E0515 + /// use mcslock::thread_local::spins::Mutex; + /// + /// let mutex = Mutex::new(1); + /// let data = mutex.lock_with(|guard| &*guard); + /// ``` + /// /// An example of panic: /// /// ```should_panic @@ -457,19 +469,6 @@ mod test { assert_eq!(data, 2); } - // #[test] - // fn must_not_compile() { - // let m = Mutex::new(1); - // let guard = m.lock_with(|guard| guard); - // let _value = *guard; - - // let m = Mutex::new(1); - // let _val = m.lock_with(|guard| &mut *guard); - - // let m = Mutex::new(1); - // let _val = m.lock_with(|guard| &*guard); - // } - #[test] fn lots_and_lots() { static LOCK: Mutex = Mutex::new(0); diff --git a/tests/lock_api.rs b/tests/lock_api.rs new file mode 100644 index 0000000..58034ec --- /dev/null +++ b/tests/lock_api.rs @@ -0,0 +1,168 @@ +// Test suite from the Rust's Mutex implementation with minor modifications +// since the API is not compatible with this crate implementation and some +// new tests as well. +// +// Copyright 2014 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::Arc; +use std::thread; + +use mcslock::lock_api::yields::Mutex; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +#[test] +fn smoke() { + let m = Mutex::new(()); + drop(m.lock()); + drop(m.lock()); +} + +#[test] +fn lots_and_lots() { + static LOCK: Mutex = Mutex::new(0); + + const ITERS: u32 = 1000; + const CONCURRENCY: u32 = 3; + + fn inc() { + for _ in 0..ITERS { + let mut g = LOCK.lock(); + *g += 1; + } + } + + let (tx, rx) = channel(); + for _ in 0..CONCURRENCY { + let tx2 = tx.clone(); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); + let tx2 = tx.clone(); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); + } + + drop(tx); + for _ in 0..2 * CONCURRENCY { + rx.recv().unwrap(); + } + assert_eq!(*LOCK.lock(), ITERS * CONCURRENCY * 2); +} + +#[test] +fn try_lock() { + let m = Mutex::new(()); + *m.try_lock().unwrap() = (); +} + +#[test] +fn test_into_inner() { + let m = Mutex::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_get_mut() { + let mut m = Mutex::new(NonCopy(10)); + *m.get_mut() = NonCopy(20); + assert_eq!(m.into_inner(), NonCopy(20)); +} + +#[test] +fn test_lock_arc_nested() { + // Tests nested locks and access + // to underlying data. + let arc = Arc::new(Mutex::new(1)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); +} + +#[test] +fn test_recursive_lock() { + let arc = Arc::new(Mutex::new(1)); + let (tx, rx) = channel(); + for _ in 0..4 { + let tx2 = tx.clone(); + let c_arc = Arc::clone(&arc); + let _t = thread::spawn(move || { + let mutex = Mutex::new(1); + let _lock = c_arc.lock(); + let lock2 = mutex.lock(); + assert_eq!(*lock2, 1); + tx2.send(()).unwrap(); + }); + } + drop(tx); + rx.recv().unwrap(); +} + +#[test] +fn test_lock_arc_access_in_unwind() { + let arc = Arc::new(Mutex::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || -> () { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.lock(); + assert_eq!(*lock, 2); +} + +#[test] +fn test_lock_unsized() { + let lock: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); + { + let b = &mut *lock.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*lock.lock(), comp); +}