diff --git a/stl/inc/atomic b/stl/inc/atomic index 8f78cb684d..a223a9a246 100644 --- a/stl/inc/atomic +++ b/stl/inc/atomic @@ -289,9 +289,24 @@ struct _Atomic_padded<_Ty, false> { }; #endif // TRANSITION, ABI +template +struct _Atomic_storage_types { + using _TVal = _Ty; + using _TStorage = _Atomic_padded<_Ty>; + using _TConstr = const _Ty; +}; + +template +struct _Atomic_storage_types<_Ty&> { + using _TVal = _Ty; + using _TStorage = _Ty&; + using _TConstr = _Ty&; +}; + + // STRUCT TEMPLATE _Atomic_storage #if 1 // TRANSITION, ABI -template +template ::_TVal)> #else // ^^^ don't break ABI / break ABI vvv template ::_Storage_size> #endif // TRANSITION, ABI @@ -299,13 +314,15 @@ struct _Atomic_storage { // Provides operations common to all specializations of std::atomic, load, store, exchange, and CAS. // Locking version used when hardware has no atomic operations for sizeof(_Ty). + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept : _Storage(_Value) { + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept : _Storage(_Value) { // non-atomically initialize this atomic } - void store(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + void store(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // store with sequential consistency _Check_store_memory_order(_Order); _Lock(); @@ -313,37 +330,37 @@ struct _Atomic_storage { _Unlock(); } - _NODISCARD _Ty load(const memory_order _Order = memory_order_seq_cst) const noexcept { + _NODISCARD _TVal load(const memory_order _Order = memory_order_seq_cst) const noexcept { // load with sequential consistency _Check_load_memory_order(_Order); _Lock(); - _Ty _Local(_Storage); + _TVal _Local(_Storage); _Unlock(); return _Local; } - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange _Value with _Storage with sequential consistency _Check_memory_order(_Order); _Lock(); - _Ty _Result(_Storage); + _TVal _Result(_Storage); _Storage = _Value; _Unlock(); return _Result; } - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with sequential consistency, plain _Check_memory_order(_Order); const auto _Storage_ptr = _STD addressof(_Storage); const auto _Expected_ptr = _STD addressof(_Expected); bool _Result; _Lock(); - if (_CSTD memcmp(_Storage_ptr, _Expected_ptr, sizeof(_Ty)) == 0) { - _CSTD memcpy(_Storage_ptr, _STD addressof(_Desired), sizeof(_Ty)); + if (_CSTD memcmp(_Storage_ptr, _Expected_ptr, sizeof(_TVal)) == 0) { + _CSTD memcpy(_Storage_ptr, _STD addressof(_Desired), sizeof(_TVal)); _Result = true; } else { - _CSTD memcpy(_Expected_ptr, _Storage_ptr, sizeof(_Ty)); + _CSTD memcpy(_Expected_ptr, _Storage_ptr, sizeof(_TVal)); _Result = false; } @@ -398,13 +415,17 @@ public: template struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics + + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept : _Storage{_Value} { + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Storage{_Value} { // non-atomically initialize this atomic } - void store(const _Ty _Value) noexcept { // store with sequential consistency + void store(const _TVal _Value) noexcept { // store with sequential consistency const auto _Mem = _Atomic_address_as(_Storage); const char _As_bytes = _Atomic_reinterpret_as(_Value); #if defined(_M_ARM) || defined(_M_ARM64) @@ -416,7 +437,7 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics #endif // hardware } - void store(const _Ty _Value, const memory_order _Order) noexcept { // store with given memory order + void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order const char _As_bytes = _Atomic_reinterpret_as(_Value); switch (_Order) { case memory_order_relaxed: @@ -438,19 +459,19 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics } } - _NODISCARD _Ty load() const noexcept { // load with sequential consistency + _NODISCARD _TVal load() const noexcept { // load with sequential consistency char _As_bytes = _ISO_VOLATILE_LOAD8(_Storage); _Compiler_or_memory_barrier(); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _NODISCARD _Ty load(const memory_order _Order) const noexcept { // load with given memory order + _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order char _As_bytes = _ISO_VOLATILE_LOAD8(_Storage); _Load_barrier(_Order); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange with given memory order char _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange8, _Atomic_address_as(_Storage), @@ -458,7 +479,7 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics return reinterpret_cast<_Ty&>(_As_bytes); } - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order const char _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation char _Prev_bytes; @@ -472,18 +493,21 @@ struct _Atomic_storage<_Ty, 1> { // lock-free using 1-byte intrinsics return false; } - _Atomic_padded<_Ty> _Storage; + typename _Atomic_storage_types<_Ty>::_TStorage _Storage; }; template struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics + + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept : _Storage{_Value} { + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept : _Storage{_Value} { // non-atomically initialize this atomic } - void store(const _Ty _Value) noexcept { // store with sequential consistency + void store(const _TVal _Value) noexcept { // store with sequential consistency const auto _Mem = _Atomic_address_as(_Storage); const short _As_bytes = _Atomic_reinterpret_as(_Value); #if defined(_M_ARM) || defined(_M_ARM64) @@ -495,7 +519,7 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics #endif // hardware } - void store(const _Ty _Value, const memory_order _Order) noexcept { // store with given memory order + void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order const short _As_bytes = _Atomic_reinterpret_as(_Value); switch (_Order) { case memory_order_relaxed: @@ -517,27 +541,27 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics } } - _NODISCARD _Ty load() const noexcept { // load with sequential consistency + _NODISCARD _TVal load() const noexcept { // load with sequential consistency short _As_bytes = _ISO_VOLATILE_LOAD16(_Storage); _Compiler_or_memory_barrier(); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _NODISCARD _Ty load(const memory_order _Order) const noexcept { // load with given memory order + _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order short _As_bytes = _ISO_VOLATILE_LOAD16(_Storage); _Load_barrier(_Order); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange with given memory order short _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange16, _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order const short _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation short _Prev_bytes; @@ -551,18 +575,22 @@ struct _Atomic_storage<_Ty, 2> { // lock-free using 2-byte intrinsics return false; } - _Atomic_padded<_Ty> _Storage; + typename _Atomic_storage_types<_Ty>::_TStorage _Storage; }; template struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics + + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept : _Storage{_Value} { + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Storage{_Value} { // non-atomically initialize this atomic } - void store(const _Ty _Value) noexcept { // store with sequential consistency + void store(const _TVal _Value) noexcept { // store with sequential consistency #if defined(_M_ARM) || defined(_M_ARM64) _Memory_barrier(); _ISO_VOLATILE_STORE32(_Storage, _Atomic_reinterpret_as(_Value)); @@ -572,7 +600,7 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics #endif // hardware } - void store(const _Ty _Value, const memory_order _Order) noexcept { // store with given memory order + void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order const int _As_bytes = _Atomic_reinterpret_as(_Value); switch (_Order) { case memory_order_relaxed: @@ -594,27 +622,27 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics } } - _NODISCARD _Ty load() const noexcept { // load with sequential consistency + _NODISCARD _TVal load() const noexcept { // load with sequential consistency auto _As_bytes = _ISO_VOLATILE_LOAD32(_Storage); _Compiler_or_memory_barrier(); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _NODISCARD _Ty load(const memory_order _Order) const noexcept { // load with given memory order + _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order auto _As_bytes = _ISO_VOLATILE_LOAD32(_Storage); _Load_barrier(_Order); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange with given memory order long _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange, _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order const long _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation long _Prev_bytes; @@ -624,30 +652,33 @@ struct _Atomic_storage<_Ty, 4> { // lock-free using 4-byte intrinsics return true; } - _CSTD memcpy(_STD addressof(_Expected), &_Prev_bytes, sizeof(_Ty)); + _CSTD memcpy(_STD addressof(_Expected), &_Prev_bytes, sizeof(_TVal)); return false; } - _Atomic_padded<_Ty> _Storage; + typename _Atomic_storage_types<_Ty>::_TStorage _Storage; }; template struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics + + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept : _Storage{_Value} { + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept : _Storage{_Value} { // non-atomically initialize this atomic } #ifdef _M_IX86 - void store(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + void store(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // store with (effectively) sequential consistency _Check_store_memory_order(_Order); (void) exchange(_Value, _Order); } #else // ^^^ _M_IX86 / !_M_IX86 vvv - void store(const _Ty _Value) noexcept { // store with sequential consistency + void store(const _TVal _Value) noexcept { // store with sequential consistency const auto _Mem = _Atomic_address_as(_Storage); const long long _As_bytes = _Atomic_reinterpret_as(_Value); #ifdef _M_ARM64 @@ -659,7 +690,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics #endif // _M_ARM64 } - void store(const _Ty _Value, const memory_order _Order) noexcept { // store with given memory order + void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order const long long _As_bytes = _Atomic_reinterpret_as(_Value); switch (_Order) { case memory_order_relaxed: @@ -682,7 +713,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics } #endif // _M_IX86 - _NODISCARD _Ty load() const noexcept { // load with sequential consistency + _NODISCARD _TVal load() const noexcept { // load with sequential consistency const auto _Mem = _Atomic_address_as(_Storage); long long _As_bytes; #if defined(_M_ARM) @@ -695,10 +726,10 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics _As_bytes = *_Mem; _Compiler_barrier(); #endif // hardware - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } - _NODISCARD _Ty load(const memory_order _Order) const noexcept { // load with given memory order + _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order const auto _Mem = _Atomic_address_as(_Storage); #if defined(_M_ARM) long long _As_bytes = __ldrexd(_Mem); @@ -708,29 +739,29 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics long long _As_bytes = *_Mem; #endif // hardware _Load_barrier(_Order); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } #ifdef _M_IX86 - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange with (effectively) sequential consistency - _Ty _Temp{load()}; + _TVal _Temp{load()}; while (!compare_exchange_strong(_Temp, _Value, _Order)) { // keep trying } return _Temp; } #else // ^^^ _M_IX86 / !_M_IX86 vvv - _Ty exchange(const _Ty _Value, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal exchange(const _TVal _Value, const memory_order _Order = memory_order_seq_cst) noexcept { // exchange with given memory order long long _As_bytes; _ATOMIC_CHOOSE_INTRINSIC(_Order, _As_bytes, _InterlockedExchange64, _Atomic_address_as(_Storage), _Atomic_reinterpret_as(_Value)); - return reinterpret_cast<_Ty&>(_As_bytes); + return reinterpret_cast<_TVal&>(_As_bytes); } #endif // _M_IX86 - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order const long long _Expected_bytes = _Atomic_reinterpret_as(_Expected); // read before atomic operation long long _Prev_bytes; @@ -740,39 +771,42 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics return true; } - _CSTD memcpy(_STD addressof(_Expected), &_Prev_bytes, sizeof(_Ty)); + _CSTD memcpy(_STD addressof(_Expected), &_Prev_bytes, sizeof(_TVal)); return false; } - _Atomic_padded<_Ty> _Storage; + typename _Atomic_storage_types<_Ty>::_TStorage _Storage; }; #if 0 // TRANSITION, ABI #if defined(_M_X64) || defined(_M_ARM64) template struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics + + using _TVal = typename _Atomic_storage_types<_Ty>::_TVal; + _Atomic_storage() = default; - /* implicit */ constexpr _Atomic_storage(const _Ty _Value) noexcept + /* implicit */ constexpr _Atomic_storage(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept : _Storage{_Value} {} // non-atomically initialize this atomic - void store(const _Ty _Value) noexcept { // store with sequential consistency + void store(const _TVal _Value) noexcept { // store with sequential consistency (void) exchange(_Value); } - void store(const _Ty _Value, const memory_order _Order) noexcept { // store with given memory order + void store(const _TVal _Value, const memory_order _Order) noexcept { // store with given memory order _Check_store_memory_order(_Order); (void) exchange(_Value, _Order); } - _NODISCARD _Ty load() const noexcept { // load with sequential consistency + _NODISCARD _TVal load() const noexcept { // load with sequential consistency long long* const _Storage_ptr = const_cast(_Atomic_address_as(_Storage)); _Int128 _Result{}; // atomic CAS 0 with 0 (void) _STD_COMPARE_EXCHANGE_128(_Storage_ptr, 0, 0, &_Result._Low); - return reinterpret_cast<_Ty&>(_Result); + return reinterpret_cast<_TVal&>(_Result); } - _NODISCARD _Ty load(const memory_order _Order) const noexcept { // load with given memory order + _NODISCARD _TVal load(const memory_order _Order) const noexcept { // load with given memory order #ifdef _M_ARM64 long long* const _Storage_ptr = const_cast(_Atomic_address_as(_Storage)); _Int128 _Result{}; // atomic CAS 0 with 0 @@ -794,14 +828,14 @@ struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics break; } - return reinterpret_cast<_Ty&>(_Result); + return reinterpret_cast<_TVal&>(_Result); #else // ^^^ _M_ARM64 / _M_X64 vvv _Check_load_memory_order(_Order); return load(); #endif // _M_ARM64 } - _Ty exchange(const _Ty _Value) noexcept { // exchange with sequential consistency + _TVal exchange(const _TVal _Value) noexcept { // exchange with sequential consistency _Ty _Result{_Value}; while (!compare_exchange_strong(_Result, _Value)) { // keep trying } @@ -809,7 +843,7 @@ struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics return _Result; } - _Ty exchange(const _Ty _Value, const memory_order _Order) noexcept { // exchange with given memory order + _TVal exchange(const _TVal _Value, const memory_order _Order) noexcept { // exchange with given memory order _Ty _Result{_Value}; while (!compare_exchange_strong(_Result, _Value, _Order)) { // keep trying } @@ -817,12 +851,12 @@ struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics return _Result; } - bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, + bool compare_exchange_strong(_TVal& _Expected, const _TVal _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { // CAS with given memory order _Int128 _Desired_bytes{}; - _CSTD memcpy(&_Desired_bytes, _STD addressof(_Desired), sizeof(_Ty)); + _CSTD memcpy(&_Desired_bytes, _STD addressof(_Desired), sizeof(_TVal)); _Int128 _Expected_temp{}; - _CSTD memcpy(&_Expected_temp, _STD addressof(_Expected), sizeof(_Ty)); + _CSTD memcpy(&_Expected_temp, _STD addressof(_Expected), sizeof(_TVal)); unsigned char _Result; #ifdef _M_ARM64 _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedCompareExchange128, @@ -833,7 +867,7 @@ struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics &reinterpret_cast(_Storage), _Desired_bytes._High, _Desired_bytes._Low, &_Expected_temp._Low); #endif // _M_ARM64 if (_Result == 0) { - _CSTD memcpy(_STD addressof(_Expected), &_Expected_temp, sizeof(_Ty)); + _CSTD memcpy(_STD addressof(_Expected), &_Expected_temp, sizeof(_TVal)); } return _Result != 0; @@ -844,7 +878,7 @@ struct _Atomic_storage<_Ty, 16> { // lock-free using 16-byte intrinsics long long _High; }; - _Atomic_padded<_Ty> _Storage; + typename _Atomic_storage_types<_Ty>::_TStorage _Storage; }; #endif // defined(_M_X64) || defined(_M_ARM64) #endif // TRANSITION, ABI @@ -859,59 +893,62 @@ struct _Atomic_integral<_Ty, 1> : _Atomic_storage<_Ty> { // atomic integral oper #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_integral() = default; - /* implicit */ constexpr _Atomic_integral(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_integral(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + using _TVal = typename _Base::_TVal; + + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd8, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedAnd8, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedOr8, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { char _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedXor8, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty operator++(int) noexcept { - return static_cast<_Ty>(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), 1)); + _TVal operator++(int) noexcept { + return static_cast<_TVal>(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), 1)); } - _Ty operator++() noexcept { + _TVal operator++() noexcept { unsigned char _Before = static_cast(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), 1)); ++_Before; - return static_cast<_Ty>(_Before); + return static_cast<_TVal>(_Before); } - _Ty operator--(int) noexcept { + _TVal operator--(int) noexcept { return static_cast<_Ty>(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), -1)); } - _Ty operator--() noexcept { + _TVal operator--() noexcept { unsigned char _Before = static_cast(_InterlockedExchangeAdd8(_Atomic_address_as(this->_Storage), -1)); --_Before; - return static_cast<_Ty>(_Before); + return static_cast<_TVal>(_Before); } }; @@ -922,59 +959,62 @@ struct _Atomic_integral<_Ty, 2> : _Atomic_storage<_Ty> { // atomic integral oper #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_integral() = default; - /* implicit */ constexpr _Atomic_integral(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_integral(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + using _TVal = typename _Base::_TVal; + + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd16, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd16, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedOr16, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { short _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor16, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty operator++(int) noexcept { + _TVal operator++(int) noexcept { unsigned short _After = static_cast(_InterlockedIncrement16(_Atomic_address_as(this->_Storage))); --_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator++() noexcept { - return static_cast<_Ty>(_InterlockedIncrement16(_Atomic_address_as(this->_Storage))); + _TVal operator++() noexcept { + return static_cast<_TVal>(_InterlockedIncrement16(_Atomic_address_as(this->_Storage))); } - _Ty operator--(int) noexcept { + _TVal operator--(int) noexcept { unsigned short _After = static_cast(_InterlockedDecrement16(_Atomic_address_as(this->_Storage))); ++_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator--() noexcept { - return static_cast<_Ty>(_InterlockedDecrement16(_Atomic_address_as(this->_Storage))); + _TVal operator--() noexcept { + return static_cast<_TVal>(_InterlockedDecrement16(_Atomic_address_as(this->_Storage))); } }; @@ -984,59 +1024,62 @@ struct _Atomic_integral<_Ty, 4> : _Atomic_storage<_Ty> { // atomic integral oper #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_integral() = default; - /* implicit */ constexpr _Atomic_integral(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_integral(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + using _TVal = typename _Base::_TVal; + + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedAnd, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedOr, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long _Result; _ATOMIC_CHOOSE_INTRINSIC( _Order, _Result, _InterlockedXor, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty operator++(int) noexcept { + _TVal operator++(int) noexcept { unsigned long _After = static_cast(_InterlockedIncrement(_Atomic_address_as(this->_Storage))); --_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator++() noexcept { - return static_cast<_Ty>(_InterlockedIncrement(_Atomic_address_as(this->_Storage))); + _TVal operator++() noexcept { + return static_cast<_TVal>(_InterlockedIncrement(_Atomic_address_as(this->_Storage))); } - _Ty operator--(int) noexcept { + _TVal operator--(int) noexcept { unsigned long _After = static_cast(_InterlockedDecrement(_Atomic_address_as(this->_Storage))); ++_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator--() noexcept { - return static_cast<_Ty>(_InterlockedDecrement(_Atomic_address_as(this->_Storage))); + _TVal operator--() noexcept { + return static_cast<_TVal>(_InterlockedDecrement(_Atomic_address_as(this->_Storage))); } }; @@ -1046,113 +1089,115 @@ struct _Atomic_integral<_Ty, 8> : _Atomic_storage<_Ty> { // atomic integral oper #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_integral() = default; - /* implicit */ constexpr _Atomic_integral(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_integral(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ + using _TVal = typename _Base::_TVal; + #ifdef _M_IX86 - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { // effectively sequential consistency - _Ty _Temp{this->load()}; + _TVal _Temp{this->load()}; while (!this->compare_exchange_strong(_Temp, _Temp + _Operand, _Order)) { // keep trying } return _Temp; } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { // effectively sequential consistency - _Ty _Temp{this->load()}; + _TVal _Temp{this->load()}; while (!this->compare_exchange_strong(_Temp, _Temp & _Operand, _Order)) { // keep trying } return _Temp; } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { // effectively sequential consistency - _Ty _Temp{this->load()}; + _TVal _Temp{this->load()}; while (!this->compare_exchange_strong(_Temp, _Temp | _Operand, _Order)) { // keep trying } return _Temp; } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { // effectively sequential consistency - _Ty _Temp{this->load()}; + _TVal _Temp{this->load()}; while (!this->compare_exchange_strong(_Temp, _Temp ^ _Operand, _Order)) { // keep trying } return _Temp; } - _Ty operator++(int) noexcept { - return fetch_add(static_cast<_Ty>(1)); + _TVal operator++(int) noexcept { + return fetch_add(static_cast<_TVal>(1)); } - _Ty operator++() noexcept { - return fetch_add(static_cast<_Ty>(1)) + static_cast<_Ty>(1); + _TVal operator++() noexcept { + return fetch_add(static_cast<_TVal>(1)) + static_cast<_TVal>(1); } - _Ty operator--(int) noexcept { - return fetch_add(static_cast<_Ty>(-1)); + _TVal operator--(int) noexcept { + return fetch_add(static_cast<_TVal>(-1)); } - _Ty operator--() noexcept { - return fetch_add(static_cast<_Ty>(-1)) - static_cast<_Ty>(1); + _TVal operator--() noexcept { + return fetch_add(static_cast<_TVal>(-1)) - static_cast<_TVal>(1); } #else // ^^^ _M_IX86 / !_M_IX86 vvv - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedExchangeAdd64, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedAnd64, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedOr64, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { long long _Result; _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _InterlockedXor64, _Atomic_address_as(this->_Storage), static_cast(_Operand)); - return static_cast<_Ty>(_Result); + return static_cast<_TVal>(_Result); } - _Ty operator++(int) noexcept { + _TVal operator++(int) noexcept { unsigned long long _After = static_cast(_InterlockedIncrement64(_Atomic_address_as(this->_Storage))); --_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator++() noexcept { - return static_cast<_Ty>(_InterlockedIncrement64(_Atomic_address_as(this->_Storage))); + _TVal operator++() noexcept { + return static_cast<_TVal>(_InterlockedIncrement64(_Atomic_address_as(this->_Storage))); } - _Ty operator--(int) noexcept { + _TVal operator--(int) noexcept { unsigned long long _After = static_cast(_InterlockedDecrement64(_Atomic_address_as(this->_Storage))); ++_After; - return static_cast<_Ty>(_After); + return static_cast<_TVal>(_After); } - _Ty operator--() noexcept { - return static_cast<_Ty>(_InterlockedDecrement64(_Atomic_address_as(this->_Storage))); + _TVal operator--() noexcept { + return static_cast<_TVal>(_InterlockedDecrement64(_Atomic_address_as(this->_Storage))); } #endif // _M_IX86 }; @@ -1185,128 +1230,131 @@ struct _Atomic_integral_facade : _Atomic_integral<_Ty> { #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_integral_facade() = default; - /* implicit */ constexpr _Atomic_integral_facade(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_integral_facade(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ + using _TVal = typename _Base::_TVal; + // _Deprecate_non_lock_free_volatile is unnecessary here. // note: const_cast-ing away volatile is safe because all our intrinsics add volatile back on. // We make the primary functions non-volatile for better debug codegen, as non-volatile atomics // are far more common than volatile ones. using _Base::fetch_add; - _Ty fetch_add(const _Ty _Operand) volatile noexcept { + _TVal fetch_add(const _TVal _Operand) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_add(_Operand); } - _Ty fetch_add(const _Ty _Operand, const memory_order _Order) volatile noexcept { + _TVal fetch_add(const _TVal _Operand, const memory_order _Order) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_add(_Operand, _Order); } - _NODISCARD static _Ty _Negate(const _Ty _Value) noexcept { // returns two's complement negated value of _Value - return static_cast<_Ty>(0U - static_cast>(_Value)); + _NODISCARD static _TVal _Negate(const _TVal _Value) noexcept { // returns two's complement negated value of _Value + return static_cast<_TVal>(0U - static_cast>(_Value)); } - _Ty fetch_sub(const _Ty _Operand) noexcept { + _TVal fetch_sub(const _TVal _Operand) noexcept { return fetch_add(_Negate(_Operand)); } - _Ty fetch_sub(const _Ty _Operand) volatile noexcept { + _TVal fetch_sub(const _TVal _Operand) volatile noexcept { return fetch_add(_Negate(_Operand)); } - _Ty fetch_sub(const _Ty _Operand, const memory_order _Order) noexcept { + _TVal fetch_sub(const _TVal _Operand, const memory_order _Order) noexcept { return fetch_add(_Negate(_Operand), _Order); } - _Ty fetch_sub(const _Ty _Operand, const memory_order _Order) volatile noexcept { + _TVal fetch_sub(const _TVal _Operand, const memory_order _Order) volatile noexcept { return fetch_add(_Negate(_Operand), _Order); } using _Base::fetch_and; - _Ty fetch_and(const _Ty _Operand) volatile noexcept { + _TVal fetch_and(const _TVal _Operand) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_and(_Operand); } - _Ty fetch_and(const _Ty _Operand, const memory_order _Order) volatile noexcept { + _TVal fetch_and(const _TVal _Operand, const memory_order _Order) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_and(_Operand, _Order); } using _Base::fetch_or; - _Ty fetch_or(const _Ty _Operand) volatile noexcept { + _TVal fetch_or(const _TVal _Operand) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_or(_Operand); } - _Ty fetch_or(const _Ty _Operand, const memory_order _Order) volatile noexcept { + _TVal fetch_or(const _TVal _Operand, const memory_order _Order) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_or(_Operand, _Order); } using _Base::fetch_xor; - _Ty fetch_xor(const _Ty _Operand) volatile noexcept { + _TVal fetch_xor(const _TVal _Operand) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_xor(_Operand); } - _Ty fetch_xor(const _Ty _Operand, const memory_order _Order) volatile noexcept { + _TVal fetch_xor(const _TVal _Operand, const memory_order _Order) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_xor(_Operand, _Order); } using _Base::operator++; - _Ty operator++(int) volatile noexcept { + _TVal operator++(int) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::operator++(0); } - _Ty operator++() volatile noexcept { + _TVal operator++() volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::operator++(); } using _Base::operator--; - _Ty operator--(int) volatile noexcept { + _TVal operator--(int) volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::operator--(0); } - _Ty operator--() volatile noexcept { + _TVal operator--() volatile noexcept { return const_cast<_Atomic_integral_facade*>(this)->_Base::operator--(); } - _Ty operator+=(const _Ty _Operand) noexcept { - return static_cast<_Ty>(this->_Base::fetch_add(_Operand) + _Operand); + _TVal operator+=(const _TVal _Operand) noexcept { + return static_cast<_TVal>(this->_Base::fetch_add(_Operand) + _Operand); } - _Ty operator+=(const _Ty _Operand) volatile noexcept { - return static_cast<_Ty>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_add(_Operand) + _Operand); + _TVal operator+=(const _TVal _Operand) volatile noexcept { + return static_cast<_TVal>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_add(_Operand) + _Operand); } - _Ty operator-=(const _Ty _Operand) noexcept { - return static_cast<_Ty>(fetch_sub(_Operand) - _Operand); + _TVal operator-=(const _TVal _Operand) noexcept { + return static_cast<_TVal>(fetch_sub(_Operand) - _Operand); } - _Ty operator-=(const _Ty _Operand) volatile noexcept { - return static_cast<_Ty>(const_cast<_Atomic_integral_facade*>(this)->fetch_sub(_Operand) - _Operand); + _TVal operator-=(const _TVal _Operand) volatile noexcept { + return static_cast<_TVal>(const_cast<_Atomic_integral_facade*>(this)->fetch_sub(_Operand) - _Operand); } - _Ty operator&=(const _Ty _Operand) noexcept { - return static_cast<_Ty>(this->_Base::fetch_and(_Operand) & _Operand); + _TVal operator&=(const _TVal _Operand) noexcept { + return static_cast<_TVal>(this->_Base::fetch_and(_Operand) & _Operand); } - _Ty operator&=(const _Ty _Operand) volatile noexcept { - return static_cast<_Ty>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_and(_Operand) & _Operand); + _TVal operator&=(const _TVal _Operand) volatile noexcept { + return static_cast<_TVal>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_and(_Operand) & _Operand); } - _Ty operator|=(const _Ty _Operand) noexcept { - return static_cast<_Ty>(this->_Base::fetch_or(_Operand) | _Operand); + _TVal operator|=(const _TVal _Operand) noexcept { + return static_cast<_TVal>(this->_Base::fetch_or(_Operand) | _Operand); } - _Ty operator|=(const _Ty _Operand) volatile noexcept { - return static_cast<_Ty>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_or(_Operand) | _Operand); + _TVal operator|=(const _TVal _Operand) volatile noexcept { + return static_cast<_TVal>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_or(_Operand) | _Operand); } - _Ty operator^=(const _Ty _Operand) noexcept { - return static_cast<_Ty>(this->_Base::fetch_xor(_Operand) ^ _Operand); + _TVal operator^=(const _TVal _Operand) noexcept { + return static_cast<_TVal>(this->_Base::fetch_xor(_Operand) ^ _Operand); } - _Ty operator^=(const _Ty _Operand) volatile noexcept { - return static_cast<_Ty>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_xor(_Operand) ^ _Operand); + _TVal operator^=(const _TVal _Operand) volatile noexcept { + return static_cast<_TVal>(const_cast<_Atomic_integral_facade*>(this)->_Base::fetch_xor(_Operand) ^ _Operand); } }; @@ -1319,13 +1367,16 @@ struct _Atomic_floating : _Atomic_storage<_Ty> { #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_floating() = default; - /* implicit */ constexpr _Atomic_floating(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_floating(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { - _Ty _Temp{this->load(memory_order_relaxed)}; + using _TVal = typename _Base::_TVal; + + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal _Temp{this->load(memory_order_relaxed)}; while (!this->compare_exchange_strong(_Temp, _Temp + _Operand, _Order)) { // keep trying } @@ -1337,11 +1388,11 @@ struct _Atomic_floating : _Atomic_storage<_Ty> { // note: const_cast-ing away volatile is safe because all our intrinsics add volatile back on. // We make the primary functions non-volatile for better debug codegen, as non-volatile atomics // are far more common than volatile ones. - _Ty fetch_add(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) volatile noexcept { + _TVal fetch_add(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) volatile noexcept { return const_cast<_Atomic_floating*>(this)->fetch_add(_Operand, _Order); } - _Ty fetch_sub(const _Ty _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { + _TVal fetch_sub(const _TVal _Operand, const memory_order _Order = memory_order_seq_cst) noexcept { _Ty _Temp{this->load(memory_order_relaxed)}; while (!this->compare_exchange_strong(_Temp, _Temp - _Operand, _Order)) { // keep trying } @@ -1379,7 +1430,8 @@ struct _Atomic_pointer : _Atomic_storage<_Ty> { #ifdef __cplusplus_winrt // TRANSITION, VSO-1083296 _Atomic_pointer() = default; - /* implicit */ constexpr _Atomic_pointer(const _Ty _Value) noexcept : _Base(_Value) {} + /* implicit */ constexpr _Atomic_pointer(typename _Atomic_storage_types<_Ty>::_TConstr _Value) noexcept + : _Base(_Value) {} #else // ^^^ workaround / no workaround vvv using _Base::_Base; #endif // ^^^ no workaround ^^^ @@ -1659,6 +1711,163 @@ template atomic(_Ty) -> atomic<_Ty>; #endif // _HAS_CXX17 +#if _HAS_CXX20 +template +struct atomic_ref : _Choose_atomic_base_t<_Ty&> { // atomic value +private: + using _Base = _Choose_atomic_base_t<_Ty&>; + +public: + // clang-format off + static_assert(is_trivially_copyable_v<_Ty> && is_copy_constructible_v<_Ty> && is_move_constructible_v<_Ty> + && is_copy_assignable_v<_Ty> && is_move_assignable_v<_Ty>, + "atomic requires T to be trivially copyable, copy constructible, move constructible, copy assignable, " + "and move assignable."); + // clang-format on + + using value_type = _Ty; + + explicit atomic_ref(_Ty& _Value) noexcept : _Base(_Value) {} + + atomic_ref(const atomic_ref&) noexcept = default; + + atomic_ref& operator=(const atomic_ref&) = delete; + + static constexpr bool is_always_lock_free = _Is_always_lock_free; + +#if 1 // TRANSITION, ABI + _NODISCARD bool is_lock_free() const volatile noexcept { + constexpr bool _Result = sizeof(_Ty) <= 8 && (sizeof(_Ty) & sizeof(_Ty) - 1) == 0; + return _Result; + } + +#else // ^^^ don't break ABI / break ABI vvv + + _NODISCARD bool is_lock_free() const volatile noexcept { +#if _ATOMIC_HAS_DCAS + return sizeof(_Ty) <= 2 * sizeof(void*); +#else // ^^^ _ATOMIC_HAS_DCAS / !_ATOMIC_HAS_DCAS vvv + return sizeof(_Ty) <= sizeof(void*) || (sizeof(_Ty) <= 2 * sizeof(void*) && __std_atomic_has_cmpxchg16b()); +#endif // _ATOMIC_HAS_DCAS + } +#endif // TRANSITION, ABI + + _NODISCARD bool is_lock_free() const noexcept { + return static_cast(this)->is_lock_free(); + } + + _Ty operator=(const _Ty _Value) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + this->store(_Value); + return _Value; + } + + _Ty operator=(const _Ty _Value) noexcept { + this->store(_Value); + return _Value; + } + + // For the following, we do the real implementation in the non-volatile function, and const_cast + // to call the non-volatile function in the volatile one. This is safe because all of the + // non-volatile functions reapply volatile, as all our intrinsics accept only volatile T *. + // We expect most atomics to be non-volatile, so making the real implementations + // non-volatile should result in better debug codegen. + using _Base::store; + void store(const _Ty _Value) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + const_cast(this)->_Base::store(_Value); + } + + void store(const _Ty _Value, const memory_order _Order) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + const_cast(this)->_Base::store(_Value, _Order); + } + + using _Base::load; + _NODISCARD _Ty load() const volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::load(); + } + + _NODISCARD _Ty load(const memory_order _Order) const volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::load(_Order); + } + + using _Base::exchange; + _Ty exchange(const _Ty _Value) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::exchange(_Value); + } + + _Ty exchange(const _Ty _Value, const memory_order _Order) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::exchange(_Value, _Order); + } + + using _Base::compare_exchange_strong; + bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::compare_exchange_strong(_Expected, _Desired); + } + + bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, const memory_order _Order) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return const_cast(this)->_Base::compare_exchange_strong(_Expected, _Desired, _Order); + } + + bool compare_exchange_strong(_Ty& _Expected, const _Ty _Desired, const memory_order _Success, + const memory_order _Failure) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return this->compare_exchange_strong(_Expected, _Desired, _Combine_cas_memory_orders(_Success, _Failure)); + } + + bool compare_exchange_strong( + _Ty& _Expected, const _Ty _Desired, const memory_order _Success, const memory_order _Failure) noexcept { + return this->compare_exchange_strong(_Expected, _Desired, _Combine_cas_memory_orders(_Success, _Failure)); + } + + bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired) volatile noexcept { + // we have no weak CAS intrinsics, even on ARM32/ARM64, so fall back to strong + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return this->compare_exchange_strong(_Expected, _Desired); + } + + bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired) noexcept { + return this->compare_exchange_strong(_Expected, _Desired); + } + + bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired, const memory_order _Order) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return this->compare_exchange_strong(_Expected, _Desired, _Order); + } + + bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired, const memory_order _Order) noexcept { + return this->compare_exchange_strong(_Expected, _Desired, _Order); + } + + bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired, const memory_order _Success, + const memory_order _Failure) volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return this->compare_exchange_strong(_Expected, _Desired, _Combine_cas_memory_orders(_Success, _Failure)); + } + + bool compare_exchange_weak( + _Ty& _Expected, const _Ty _Desired, const memory_order _Success, const memory_order _Failure) noexcept { + return this->compare_exchange_strong(_Expected, _Desired, _Combine_cas_memory_orders(_Success, _Failure)); + } + + operator _Ty() const volatile noexcept { + static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails"); + return this->load(); + } + + operator _Ty() const noexcept { + return this->load(); + } +}; +#endif // _HAS_CXX20 + // NONMEMBER OPERATIONS ON ATOMIC TYPES template _NODISCARD bool atomic_is_lock_free(const volatile atomic<_Ty>* _Mem) noexcept {