Skip to content

Commit

Permalink
Add add/sub/and/or/xor methods that do not return previous value
Browse files Browse the repository at this point in the history
  • Loading branch information
taiki-e committed Dec 4, 2022
1 parent b1dfa1a commit 1dbed76
Show file tree
Hide file tree
Showing 11 changed files with 937 additions and 1 deletion.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ Note: In this file, do not use the hard wrap in the middle of a sentence for com

## [Unreleased]

- Add `Atomic{I,U}*::{add,sub,and,or,xor}` and `AtomicBool::{and,or,xor}` methods.

They are equivalent to the corresponding `fetch_*` methods, but do not return the previous value. They are intended for optimization on platforms that implement atomics using inline assembly, such as the MSP430.

- Various improvements to `portable_atomic_unsafe_assume_single_core` cfg. ([#44](https://github.com/taiki-e/portable-atomic/pull/44))

- Support disabling FIQs on pre-v6 ARM under `portable_atomic_disable_fiq` cfg.
Expand Down
1 change: 1 addition & 0 deletions src/imp/atomic128/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ macro_rules! atomic128 {
// SAFETY: any data races are prevented by atomic intrinsics.
unsafe impl Sync for $atomic_type {}

no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Expand Down
2 changes: 2 additions & 0 deletions src/imp/atomic128/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ macro_rules! atomic128 {
// SAFETY: any data races are prevented by atomic intrinsics.
unsafe impl Sync for $atomic_type {}

no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Expand Down Expand Up @@ -216,6 +217,7 @@ macro_rules! atomic128 {
// SAFETY: any data races are prevented by atomic intrinsics.
unsafe impl Sync for $atomic_type {}

no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Expand Down
12 changes: 12 additions & 0 deletions src/imp/core_atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ impl AtomicBool {
}
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
no_fetch_ops_impl!(AtomicBool, bool);
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
impl AtomicBool {
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
Expand Down Expand Up @@ -171,6 +174,15 @@ macro_rules! atomic_int {
pub(crate) struct $atomic_type {
inner: core::sync::atomic::$atomic_type,
}
#[cfg_attr(
portable_atomic_no_cfg_target_has_atomic,
cfg(not(portable_atomic_no_atomic_cas))
)]
#[cfg_attr(
not(portable_atomic_no_cfg_target_has_atomic),
cfg(target_has_atomic = "ptr")
)]
no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Expand Down
2 changes: 2 additions & 0 deletions src/imp/fallback/imp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ macro_rules! atomic {
// SAFETY: any data races are prevented by the lock and atomic operation.
unsafe impl Sync for $atomic_type {}

#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
Expand Down
72 changes: 71 additions & 1 deletion src/imp/interrupt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@
// [^avr2]: https://github.com/llvm/llvm-project/blob/llvmorg-15.0.0/llvm/test/CodeGen/AVR/atomics/load16.ll#L5

// On some platforms, atomic load/store can be implemented in a more efficient
// way than disabling interrupts.
// way than disabling interrupts. On MSP430, some RMWs that do not return the
// previous value can also be optimized.
//
// Note: On single-core systems, it is okay to use critical session-based
// CAS together with atomic load/store. The load/store will not be
Expand Down Expand Up @@ -246,6 +247,33 @@ impl AtomicBool {
}
}

#[cfg(not(target_arch = "msp430"))]
no_fetch_ops_impl!(AtomicBool, bool);
#[cfg(target_arch = "msp430")]
impl AtomicBool {
#[inline]
pub(crate) fn and(&self, val: bool, order: Ordering) {
// SAFETY: Self and atomic::AtomicBool have the same layout,
unsafe {
(*(self as *const Self as *const atomic::AtomicBool)).and(val, order);
}
}
#[inline]
pub(crate) fn or(&self, val: bool, order: Ordering) {
// SAFETY: Self and atomic::AtomicBool have the same layout,
unsafe {
(*(self as *const Self as *const atomic::AtomicBool)).or(val, order);
}
}
#[inline]
pub(crate) fn xor(&self, val: bool, order: Ordering) {
// SAFETY: Self and atomic::AtomicBool have the same layout,
unsafe {
(*(self as *const Self as *const atomic::AtomicBool)).xor(val, order);
}
}
}

#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
Expand Down Expand Up @@ -462,10 +490,52 @@ macro_rules! atomic_int {
}
}
}

#[cfg(not(target_arch = "msp430"))]
no_fetch_ops_impl!($atomic_type, $int_type);
#[cfg(target_arch = "msp430")]
impl $atomic_type {
#[inline]
pub(crate) fn add(&self, val: $int_type, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).add(val, order);
}
}
#[inline]
pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).sub(val, order);
}
}
#[inline]
pub(crate) fn and(&self, val: $int_type, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).and(val, order);
}
}
#[inline]
pub(crate) fn or(&self, val: $int_type, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).or(val, order);
}
}
#[inline]
pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).xor(val, order);
}
}
}
};
(load_store_critical_session, $atomic_type:ident, $int_type:ident, $align:expr) => {
atomic_int!(base, $atomic_type, $int_type, $align);
atomic_int!(cas, $atomic_type, $int_type);
no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
Expand Down
2 changes: 2 additions & 0 deletions src/imp/interrupt/msp430.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
// Adapted from https://github.com/rust-embedded/msp430.
//
// See also src/imp/msp430.rs.

#[cfg(not(portable_atomic_no_asm))]
use core::arch::asm;
Expand Down
191 changes: 191 additions & 0 deletions src/imp/msp430.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
// Including https://github.com/pftbest/msp430-atomic/pull/4 for a compile error fix.
// Including https://github.com/pftbest/msp430-atomic/pull/5 for a soundness bug fix.
//
// Operations not supported here are provided by disabling interrupts.
// See also src/imp/interrupt/msp430.rs.
//
// Note: Ordering is always SeqCst.

#[cfg(not(portable_atomic_no_asm))]
Expand Down Expand Up @@ -106,6 +109,39 @@ impl AtomicBool {
u8::atomic_store(self.v.get(), val as u8);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn and(&self, val: bool, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
u8::atomic_and(self.v.get(), val as u8);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn or(&self, val: bool, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
u8::atomic_or(self.v.get(), val as u8);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn xor(&self, val: bool, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
u8::atomic_xor(self.v.get(), val as u8);
}
}
}

#[repr(transparent)]
Expand Down Expand Up @@ -235,6 +271,61 @@ macro_rules! atomic_int {
$int_type::atomic_store(self.v.get(), val);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn add(&self, val: $int_type, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_add(self.v.get(), val);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_sub(self.v.get(), val);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn and(&self, val: $int_type, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_and(self.v.get(), val);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn or(&self, val: $int_type, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_or(self.v.get(), val);
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_xor(self.v.get(), val);
}
}
}

impl AtomicOperations for $int_type {
Expand Down Expand Up @@ -277,6 +368,101 @@ macro_rules! atomic_int {
);
}
}

#[inline]
unsafe fn atomic_add(dst: *mut Self, val: Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("add", $asm_suffix, " {val}, 0({dst})"),
dst = in(reg) dst,
val = in(reg) val,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("add", $asm_suffix, " $1, $0")
:: "*m"(dst), "ir"(val) : "memory" : "volatile"
);
}
}

#[inline]
unsafe fn atomic_sub(dst: *mut Self, val: Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("sub", $asm_suffix, " {val}, 0({dst})"),
dst = in(reg) dst,
val = in(reg) val,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("sub", $asm_suffix, " $1, $0")
:: "*m"(dst), "ir"(val) : "memory" : "volatile"
);
}
}

#[inline]
unsafe fn atomic_and(dst: *mut Self, val: Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_and`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("and", $asm_suffix, " {val}, 0({dst})"),
dst = in(reg) dst,
val = in(reg) val,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("and", $asm_suffix, " $1, $0")
:: "*m"(dst), "ir"(val) : "memory" : "volatile"
);
}
}

#[inline]
unsafe fn atomic_or(dst: *mut Self, val: Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_or`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("bis", $asm_suffix, " {val}, 0({dst})"),
dst = in(reg) dst,
val = in(reg) val,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("bis", $asm_suffix, " $1, $0")
:: "*m"(dst), "ir"(val) : "memory" : "volatile"
);
}
}

#[inline]
unsafe fn atomic_xor(dst: *mut Self, val: Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("xor", $asm_suffix, " {val}, 0({dst})"),
dst = in(reg) dst,
val = in(reg) val,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("xor", $asm_suffix, " $1, $0")
:: "*m"(dst), "ir"(val) : "memory" : "volatile"
);
}
}
}
}
}
Expand All @@ -291,4 +477,9 @@ atomic_int!(usize, AtomicUsize, ".w");
trait AtomicOperations: Sized {
unsafe fn atomic_load(src: *const Self) -> Self;
unsafe fn atomic_store(dst: *mut Self, val: Self);
unsafe fn atomic_add(dst: *mut Self, val: Self);
unsafe fn atomic_sub(dst: *mut Self, val: Self);
unsafe fn atomic_and(dst: *mut Self, val: Self);
unsafe fn atomic_or(dst: *mut Self, val: Self);
unsafe fn atomic_xor(dst: *mut Self, val: Self);
}
Loading

0 comments on commit 1dbed76

Please sign in to comment.