diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index a7efe55cff0b2..755d2e11b667b 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -38,7 +38,6 @@ pub type LPWCH = *mut WCHAR; pub type LPWSTR = *mut WCHAR; pub type PLARGE_INTEGER = *mut c_longlong; -pub type PSRWLOCK = *mut SRWLOCK; pub type socklen_t = c_int; pub type ADDRESS_FAMILY = USHORT; @@ -355,6 +354,35 @@ compat_fn_optional! { pub fn WakeByAddressSingle(address: *const ::core::ffi::c_void); } +compat_fn_optional! { + crate::sys::compat::load_try_enter_critical_section_function(); + // >= NT 4 + // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-tryentercriticalsection + pub fn TryEnterCriticalSection(lpcriticalsection: *mut CRITICAL_SECTION) -> BOOL; +} + +compat_fn_optional! { + crate::sys::compat::load_srw_functions(); + // >= Win7 / Server 2008 R2 + // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-tryacquiresrwlockexclusive + pub fn TryAcquireSRWLockExclusive(srwlock: *mut SRWLOCK) -> BOOLEAN; + pub fn TryAcquireSRWLockShared(srwlock: *mut SRWLOCK) -> BOOLEAN; + // >= Vista / Server 2008 + // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-acquiresrwlockexclusive + pub fn AcquireSRWLockExclusive(srwlock: *mut SRWLOCK) -> (); + pub fn AcquireSRWLockShared(srwlock: *mut SRWLOCK) -> (); + pub fn ReleaseSRWLockExclusive(srwlock: *mut SRWLOCK) -> (); + pub fn ReleaseSRWLockShared(srwlock: *mut SRWLOCK) -> (); + pub fn SleepConditionVariableSRW( + conditionvariable: *mut CONDITION_VARIABLE, + srwlock: *mut SRWLOCK, + dwmilliseconds: u32, + flags: u32, + ) -> BOOL; + pub fn WakeAllConditionVariable(conditionvariable: *mut CONDITION_VARIABLE) -> (); + pub fn WakeConditionVariable(conditionvariable: *mut CONDITION_VARIABLE) -> (); +} + compat_fn_with_fallback! { pub static NTDLL: &CStr = c"ntdll" => { load: true, unicows: false }; diff --git a/library/std/src/sys/windows/c/windows_sys.lst b/library/std/src/sys/windows/c/windows_sys.lst index 9cf01d6f8434a..222c24c7e469d 100644 --- a/library/std/src/sys/windows/c/windows_sys.lst +++ b/library/std/src/sys/windows/c/windows_sys.lst @@ -2595,3 +2595,16 @@ Windows.Win32.UI.Shell.GetUserProfileDirectoryW // tidy-alphabetical-end Windows.Win32.System.LibraryLoader.LoadLibraryA + +// sync primitives +Windows.Win32.System.Threading.CreateMutexA +Windows.Win32.System.Threading.ReleaseMutex +Windows.Win32.System.Threading.CreateEventA +Windows.Win32.System.Threading.PulseEvent +Windows.Win32.System.Threading.CRITICAL_SECTION +Windows.Win32.System.Threading.InitializeCriticalSection +Windows.Win32.System.Threading.EnterCriticalSection +Windows.Win32.System.Threading.LeaveCriticalSection +Windows.Win32.System.Threading.DeleteCriticalSection +Windows.Win32.System.Threading.SRWLOCK +Windows.Win32.System.Threading.CONDITION_VARIABLE diff --git a/library/std/src/sys/windows/c/windows_sys.rs b/library/std/src/sys/windows/c/windows_sys.rs index 0a2283ac52160..e2919d9b69311 100644 --- a/library/std/src/sys/windows/c/windows_sys.rs +++ b/library/std/src/sys/windows/c/windows_sys.rs @@ -75,6 +75,15 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn CreateEventA( + lpeventattributes: *const SECURITY_ATTRIBUTES, + bmanualreset: BOOL, + binitialstate: BOOL, + lpname: PCSTR, + ) -> HANDLE; +} +#[link(name = "kernel32")] extern "system" { pub fn CreateEventW( lpeventattributes: *const SECURITY_ATTRIBUTES, @@ -104,6 +113,14 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn CreateMutexA( + lpmutexattributes: *const SECURITY_ATTRIBUTES, + binitialowner: BOOL, + lpname: PCSTR, + ) -> HANDLE; +} +#[link(name = "kernel32")] extern "system" { pub fn CreateNamedPipeW( lpname: PCWSTR, @@ -160,6 +177,10 @@ extern "system" { ) -> HANDLE; } #[link(name = "kernel32")] +extern "system" { + pub fn DeleteCriticalSection(lpcriticalsection: *mut CRITICAL_SECTION) -> (); +} +#[link(name = "kernel32")] extern "system" { pub fn DeleteFileW(lpfilename: PCWSTR) -> BOOL; } @@ -193,6 +214,10 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn EnterCriticalSection(lpcriticalsection: *mut CRITICAL_SECTION) -> (); +} +#[link(name = "kernel32")] extern "system" { pub fn ExitProcess(uexitcode: u32) -> !; } @@ -385,6 +410,10 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn InitializeCriticalSection(lpcriticalsection: *mut CRITICAL_SECTION) -> (); +} +#[link(name = "kernel32")] extern "system" { pub fn InitializeProcThreadAttributeList( lpattributelist: LPPROC_THREAD_ATTRIBUTE_LIST, @@ -394,6 +423,10 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn LeaveCriticalSection(lpcriticalsection: *mut CRITICAL_SECTION) -> (); +} +#[link(name = "kernel32")] extern "system" { pub fn LoadLibraryA(lplibfilename: PCSTR) -> HMODULE; } @@ -417,6 +450,10 @@ extern "system" { ) -> i32; } #[link(name = "kernel32")] +extern "system" { + pub fn PulseEvent(hevent: HANDLE) -> BOOL; +} +#[link(name = "kernel32")] extern "system" { pub fn QueryPerformanceCounter(lpperformancecount: *mut i64) -> BOOL; } @@ -455,6 +492,10 @@ extern "system" { ) -> BOOL; } #[link(name = "kernel32")] +extern "system" { + pub fn ReleaseMutex(hmutex: HANDLE) -> BOOL; +} +#[link(name = "kernel32")] extern "system" { pub fn ReleaseSRWLockExclusive(srwlock: *mut SRWLOCK) -> (); } @@ -1191,6 +1232,39 @@ pub const CREATE_SUSPENDED: PROCESS_CREATION_FLAGS = 4u32; pub const CREATE_UNICODE_ENVIRONMENT: PROCESS_CREATION_FLAGS = 1024u32; pub const CREATE_WAITABLE_TIMER_HIGH_RESOLUTION: u32 = 2u32; pub const CREATE_WAITABLE_TIMER_MANUAL_RESET: u32 = 1u32; +#[repr(C)] +pub struct CRITICAL_SECTION { + pub DebugInfo: *mut CRITICAL_SECTION_DEBUG, + pub LockCount: i32, + pub RecursionCount: i32, + pub OwningThread: HANDLE, + pub LockSemaphore: HANDLE, + pub SpinCount: usize, +} +impl ::core::marker::Copy for CRITICAL_SECTION {} +impl ::core::clone::Clone for CRITICAL_SECTION { + fn clone(&self) -> Self { + *self + } +} +#[repr(C)] +pub struct CRITICAL_SECTION_DEBUG { + pub Type: u16, + pub CreatorBackTraceIndex: u16, + pub CriticalSection: *mut CRITICAL_SECTION, + pub ProcessLocksList: LIST_ENTRY, + pub EntryCount: u32, + pub ContentionCount: u32, + pub Flags: u32, + pub CreatorBackTraceIndexHigh: u16, + pub Identifier: u16, +} +impl ::core::marker::Copy for CRITICAL_SECTION_DEBUG {} +impl ::core::clone::Clone for CRITICAL_SECTION_DEBUG { + fn clone(&self) -> Self { + *self + } +} pub const CSTR_EQUAL: COMPARESTRING_RESULT = 2i32; pub const CSTR_GREATER_THAN: COMPARESTRING_RESULT = 3i32; pub const CSTR_LESS_THAN: COMPARESTRING_RESULT = 1i32; @@ -3653,6 +3727,17 @@ impl ::core::clone::Clone for LINGER { *self } } +#[repr(C)] +pub struct LIST_ENTRY { + pub Flink: *mut LIST_ENTRY, + pub Blink: *mut LIST_ENTRY, +} +impl ::core::marker::Copy for LIST_ENTRY {} +impl ::core::clone::Clone for LIST_ENTRY { + fn clone(&self) -> Self { + *self + } +} pub type LPOVERLAPPED_COMPLETION_ROUTINE = ::core::option::Option< unsafe extern "system" fn( dwerrorcode: u32, diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs index b8f533bbb67ae..8aa6c772138c1 100644 --- a/library/std/src/sys/windows/compat.rs +++ b/library/std/src/sys/windows/compat.rs @@ -63,6 +63,12 @@ unsafe extern "C" fn init() { // because this function runs during global initialization. For example, DO NOT // do any dynamic allocation, don't call LoadLibrary, etc. + // check all the different synchronization primitives ... + load_try_enter_critical_section_function(); + load_srw_functions(); + // ... and init mutex downlevel compat based on it + super::locks::compat::init(); + // Attempt to preload the synch functions. load_synch_functions(); #[cfg(not(target_vendor = "uwp"))] @@ -405,3 +411,45 @@ pub(super) fn load_stack_overflow_functions() { try_load(); } + +pub(super) fn load_try_enter_critical_section_function() { + fn try_load() -> Option<()> { + const MODULE_NAME: &CStr = c"kernel32"; + + let library = unsafe { Module::new(MODULE_NAME) }?; + static_load!(library, [TryEnterCriticalSection]); + Some(()) + } + + try_load(); +} + +pub(super) fn load_srw_functions() { + fn try_load() -> Option<()> { + const MODULE_NAME: &CStr = c"kernel32"; + + // Try loading the library and all the required functions. + // If any step fails, then they all fail. + let library = unsafe { Module::new(MODULE_NAME) }?; + + static_load!( + library, + [ + // check the try_ functions first, as they have higher system requirements + TryAcquireSRWLockExclusive, + TryAcquireSRWLockShared, + AcquireSRWLockExclusive, + AcquireSRWLockShared, + ReleaseSRWLockExclusive, + ReleaseSRWLockShared, + SleepConditionVariableSRW, + WakeAllConditionVariable, + WakeConditionVariable + ] + ); + + Some(()) + } + + try_load(); +} diff --git a/library/std/src/sys/windows/locks/condvar.rs b/library/std/src/sys/windows/locks/condvar.rs index 66fafa2c00b00..1a0f3553809ef 100644 --- a/library/std/src/sys/windows/locks/condvar.rs +++ b/library/std/src/sys/windows/locks/condvar.rs @@ -1,11 +1,36 @@ +use super::compat::{MutexKind, MUTEX_KIND}; use crate::cell::UnsafeCell; +use crate::io; +use crate::mem::ManuallyDrop; +use crate::ops::Deref; +use crate::ptr; use crate::sys::c; -use crate::sys::locks::{mutex, Mutex}; +use crate::sys::cvt; +use crate::sys::locks::Mutex; use crate::sys::os; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; pub struct Condvar { - inner: UnsafeCell, + inner: LazyBox, +} + +union CondvarImpl { + srw: ManuallyDrop>, + event: c::HANDLE, +} + +impl Drop for CondvarImpl { + fn drop(&mut self) { + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => {} + MutexKind::CriticalSection | MutexKind::Legacy => { + cvt(c::CloseHandle(self.event)).unwrap(); + } + } + } + } } unsafe impl Send for Condvar {} @@ -14,37 +39,125 @@ unsafe impl Sync for Condvar {} impl Condvar { #[inline] pub const fn new() -> Condvar { - Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) } + Condvar { inner: LazyBox::new() } } #[inline] pub unsafe fn wait(&self, mutex: &Mutex) { - let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0); - debug_assert!(r != 0); + let inner = self.inner.deref(); + + match MUTEX_KIND { + MutexKind::SrwLock => { + let mutex = mutex.inner.deref(); + let r = c::SleepConditionVariableSRW( + inner.srw.get(), + mutex.srwlock.inner.get(), + c::INFINITE, + 0, + ); + debug_assert!(r != 0); + } + MutexKind::CriticalSection | MutexKind::Legacy => { + mutex.unlock(); + if (c::WaitForSingleObject(inner.event, c::INFINITE)) != c::WAIT_OBJECT_0 { + panic!("event wait failed: {}", io::Error::last_os_error()) + } + mutex.lock(); + } + } } pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let r = c::SleepConditionVariableSRW( - self.inner.get(), - mutex::raw(mutex), - crate::sys::windows::dur2timeout(dur), - 0, - ); - if r == 0 { - debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); - false - } else { - true + let inner = self.inner.deref(); + + match MUTEX_KIND { + MutexKind::SrwLock => { + let mutex = mutex.inner.deref(); + let r = c::SleepConditionVariableSRW( + inner.srw.get(), + mutex.srwlock.inner.get(), + crate::sys::windows::dur2timeout(dur), + 0, + ); + if r == 0 { + debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); + false + } else { + true + } + } + MutexKind::CriticalSection | MutexKind::Legacy => { + mutex.unlock(); + let ret = match c::WaitForSingleObject( + inner.event, + crate::sys::windows::dur2timeout(dur), + ) { + c::WAIT_OBJECT_0 => true, + c::WAIT_TIMEOUT => false, + _ => panic!("event wait failed: {}", io::Error::last_os_error()), + }; + mutex.lock(); + ret + } } } #[inline] pub fn notify_one(&self) { - unsafe { c::WakeConditionVariable(self.inner.get()) } + let inner = self.inner.deref(); + + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => c::WakeConditionVariable(inner.srw.get()), + MutexKind::CriticalSection | MutexKind::Legacy => { + // this currently wakes up all threads, but spurious wakeups are allowed, so + // this is "just" reducing perf + cvt(c::PulseEvent(inner.event)).unwrap(); + } + } + } } #[inline] pub fn notify_all(&self) { - unsafe { c::WakeAllConditionVariable(self.inner.get()) } + let inner = self.inner.deref(); + + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => c::WakeAllConditionVariable(inner.srw.get()), + MutexKind::CriticalSection | MutexKind::Legacy => { + cvt(c::PulseEvent(inner.event)).unwrap(); + } + } + } } } + +impl LazyInit for CondvarImpl { + fn init() -> Box { + Box::new(unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => CondvarImpl { + srw: ManuallyDrop::new(UnsafeCell::new(c::CONDITION_VARIABLE_INIT)), + }, + MutexKind::CriticalSection | MutexKind::Legacy => { + let event = c::CreateEventA( + ptr::null_mut(), + c::TRUE, // manual reset event + c::FALSE, + ptr::null(), + ); + + if event.is_null() { + panic!("failed creating event: {}", io::Error::last_os_error()); + } + + CondvarImpl { event } + } + } + }) + } + + fn cancel_init(_: Box) {} + fn destroy(_: Box) {} +} diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs index 0e0f9eccb2137..e0e2aed44ca48 100644 --- a/library/std/src/sys/windows/locks/mod.rs +++ b/library/std/src/sys/windows/locks/mod.rs @@ -2,5 +2,5 @@ mod condvar; mod mutex; mod rwlock; pub use condvar::Condvar; -pub use mutex::Mutex; +pub use mutex::{compat, Mutex}; pub use rwlock::RwLock; diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs index ef2f84082cd5c..afdee5a93669c 100644 --- a/library/std/src/sys/windows/locks/mutex.rs +++ b/library/std/src/sys/windows/locks/mutex.rs @@ -14,41 +14,171 @@ //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy //! is that there are no guarantees of fairness. +use self::compat::{MutexKind, MUTEX_KIND}; use crate::cell::UnsafeCell; -use crate::sys::c; +use crate::mem::ManuallyDrop; +use crate::ops::Deref; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +pub mod compat; +mod critical_section_mutex; +mod legacy_mutex; +mod srwlock; + +pub union InnerMutex { + pub(super) srwlock: ManuallyDrop, + critical_section: ManuallyDrop, + legacy: ManuallyDrop, +} + +impl Drop for InnerMutex { + #[inline] + fn drop(&mut self) { + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => ManuallyDrop::drop(&mut self.srwlock), + MutexKind::CriticalSection => { + if self.critical_section.destroy() { + ManuallyDrop::drop(&mut self.critical_section); + } else { + // The mutex is locked. This happens if a MutexGuard is leaked. + // In this case, we just leak the Mutex too. + } + } + MutexKind::Legacy => ManuallyDrop::drop(&mut self.legacy), + } + } + } +} pub struct Mutex { - srwlock: UnsafeCell, + pub inner: LazyBox, + // used to prevent reentrancy for critical sections and legacy mutexes: + // + // > The exact behavior on locking a mutex in the thread which already holds the lock is left + // > unspecified. However, this function will not return on the second call (it might panic or + // > deadlock, for example). + held: UnsafeCell, } unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} -#[inline] -pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { - m.srwlock.get() -} - impl Mutex { #[inline] pub const fn new() -> Mutex { - Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) } + Mutex { inner: LazyBox::new(), held: UnsafeCell::new(false) } } #[inline] pub fn lock(&self) { + let m = self.inner.deref(); + unsafe { - c::AcquireSRWLockExclusive(raw(self)); + match MUTEX_KIND { + MutexKind::SrwLock => m.srwlock.write(), + MutexKind::CriticalSection => { + m.critical_section.lock(); + if !self.flag_locked() { + self.unlock(); + panic!("cannot recursively lock a mutex"); + } + } + MutexKind::Legacy => { + m.legacy.lock(); + if !self.flag_locked() { + self.unlock(); + panic!("cannot recursively lock a mutex"); + } + } + } } } #[inline] pub fn try_lock(&self) -> bool { - unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 } + let m = self.inner.deref(); + + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => m.srwlock.try_write(), + MutexKind::CriticalSection => { + if !m.critical_section.try_lock() { + false + } else if self.flag_locked() { + true + } else { + self.unlock(); + false + } + } + MutexKind::Legacy => { + if !m.legacy.try_lock() { + false + } else if self.flag_locked() { + true + } else { + self.unlock(); + false + } + } + } + } } #[inline] pub unsafe fn unlock(&self) { - c::ReleaseSRWLockExclusive(raw(self)); + let m = self.inner.deref(); + + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => m.srwlock.write_unlock(), + MutexKind::CriticalSection => { + *self.held.get() = false; + m.critical_section.unlock(); + } + MutexKind::Legacy => { + *self.held.get() = false; + m.legacy.unlock(); + } + } + } + } + + unsafe fn flag_locked(&self) -> bool { + if *self.held.get() { + false + } else { + *self.held.get() = true; + true + } } } + +impl LazyInit for InnerMutex { + fn init() -> Box { + unsafe { + match MUTEX_KIND { + MutexKind::SrwLock => { + Box::new(InnerMutex { srwlock: ManuallyDrop::new(srwlock::SrwLock::new()) }) + } + MutexKind::CriticalSection => { + let boxed = Box::new(InnerMutex { + critical_section: ManuallyDrop::new( + critical_section_mutex::CriticalSectionMutex::new(), + ), + }); + boxed.critical_section.init(); + + boxed + } + MutexKind::Legacy => Box::new(InnerMutex { + legacy: ManuallyDrop::new(legacy_mutex::LegacyMutex::new()), + }), + } + } + } + + fn cancel_init(_: Box) {} + fn destroy(_: Box) {} +} diff --git a/library/std/src/sys/windows/locks/mutex/compat.rs b/library/std/src/sys/windows/locks/mutex/compat.rs new file mode 100644 index 0000000000000..64eb79c7ef259 --- /dev/null +++ b/library/std/src/sys/windows/locks/mutex/compat.rs @@ -0,0 +1,27 @@ +use crate::sys::c; + +#[derive(Debug, PartialEq)] +pub enum MutexKind { + /// Win 7+ (Vista doesn't support the `Try*` APIs) + SrwLock, + /// NT 4+ (9x/ME/NT3.x support critical sections, but don't support `TryEnterCriticalSection`) + CriticalSection, + /// Good ol' `CreateMutex`, available everywhere + Legacy, +} + +pub static mut MUTEX_KIND: MutexKind = MutexKind::SrwLock; + +pub fn init() { + let kind = if c::TryAcquireSRWLockExclusive::option().is_some() { + MutexKind::SrwLock + } else if c::TryEnterCriticalSection::option().is_some() { + MutexKind::CriticalSection + } else { + MutexKind::Legacy + }; + + unsafe { + MUTEX_KIND = kind; + } +} diff --git a/library/std/src/sys/windows/locks/mutex/critical_section_mutex.rs b/library/std/src/sys/windows/locks/mutex/critical_section_mutex.rs new file mode 100644 index 0000000000000..2cc7d0a851389 --- /dev/null +++ b/library/std/src/sys/windows/locks/mutex/critical_section_mutex.rs @@ -0,0 +1,56 @@ +use crate::cell::UnsafeCell; +use crate::mem::MaybeUninit; +use crate::sys::c; + +/// Mutex based on critical sections. +/// +/// Critical sections are available on all windows versions, but `TryEnterCriticalSection` was only +/// added with NT4, and never to the 9x range. +/// +/// It cannot be directly `const`-created as it needs to be initialized, and cannot be moved after +/// initialization. The top-level `Mutex` type handles boxing. +pub struct CriticalSectionMutex { + inner: MaybeUninit>, +} + +unsafe impl Send for CriticalSectionMutex {} +unsafe impl Sync for CriticalSectionMutex {} + +impl CriticalSectionMutex { + #[inline] + pub const fn new() -> Self { + Self { inner: MaybeUninit::uninit() } + } + + #[inline] + pub unsafe fn init(&self) { + c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())); + } + + #[inline] + pub unsafe fn lock(&self) { + c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + c::TryEnterCriticalSection::call(UnsafeCell::raw_get(self.inner.as_ptr())) != 0 + } + + #[inline] + pub unsafe fn unlock(&self) { + c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())); + } + + #[inline] + pub unsafe fn destroy(&self) -> bool { + if self.try_lock() { + self.unlock(); + c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())); + true + } else { + // mutex is still locked, cannot destroy. caller needs to leak it instead + false + } + } +} diff --git a/library/std/src/sys/windows/locks/mutex/legacy_mutex.rs b/library/std/src/sys/windows/locks/mutex/legacy_mutex.rs new file mode 100644 index 0000000000000..29388407fd5ee --- /dev/null +++ b/library/std/src/sys/windows/locks/mutex/legacy_mutex.rs @@ -0,0 +1,54 @@ +use crate::io; +use crate::ptr; +use crate::sys::{c, cvt}; + +/// Mutex based on `CreateMutex`. Slow, but available everywhere. +/// +/// Doesn't need to stay fixed in place, so it doesn't need to be boxed. +#[repr(transparent)] +pub struct LegacyMutex(c::HANDLE); + +unsafe impl Send for LegacyMutex {} +unsafe impl Sync for LegacyMutex {} + +impl LegacyMutex { + #[inline] + pub unsafe fn new() -> Self { + let handle = c::CreateMutexA(ptr::null_mut(), c::FALSE, ptr::null()); + + if handle.is_null() { + panic!("failed creating mutex: {}", io::Error::last_os_error()); + } + Self(handle) + } + + #[inline] + pub unsafe fn lock(&self) { + if c::WaitForSingleObject(self.0, c::INFINITE) != c::WAIT_OBJECT_0 { + panic!("mutex lock failed: {}", io::Error::last_os_error()) + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + match c::WaitForSingleObject(self.0, 0) { + c::WAIT_OBJECT_0 => true, + c::WAIT_TIMEOUT => false, + _ => panic!("try lock error: {}", io::Error::last_os_error()), + } + } + + #[inline] + pub unsafe fn unlock(&self) { + cvt(c::ReleaseMutex(self.0)).unwrap(); + } +} + +impl Drop for LegacyMutex { + #[inline] + fn drop(&mut self) { + unsafe { + cvt(c::CloseHandle(self.0)).unwrap(); + } + } +} diff --git a/library/std/src/sys/windows/locks/mutex/srwlock.rs b/library/std/src/sys/windows/locks/mutex/srwlock.rs new file mode 100644 index 0000000000000..6b2bac6628da6 --- /dev/null +++ b/library/std/src/sys/windows/locks/mutex/srwlock.rs @@ -0,0 +1,40 @@ +use crate::cell::UnsafeCell; +use crate::sys::c; + +pub struct SrwLock { + pub inner: UnsafeCell, +} + +unsafe impl Send for SrwLock {} +unsafe impl Sync for SrwLock {} + +impl SrwLock { + #[inline] + pub const fn new() -> SrwLock { + SrwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } + } + #[inline] + pub fn read(&self) { + unsafe { c::AcquireSRWLockShared(self.inner.get()) } + } + #[inline] + pub fn try_read(&self) -> bool { + unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 } + } + #[inline] + pub fn write(&self) { + unsafe { c::AcquireSRWLockExclusive(self.inner.get()) } + } + #[inline] + pub fn try_write(&self) -> bool { + unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } + } + #[inline] + pub unsafe fn read_unlock(&self) { + c::ReleaseSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn write_unlock(&self) { + c::ReleaseSRWLockExclusive(self.inner.get()) + } +} diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs index e69415baac42b..98bf3789e4775 100644 --- a/library/std/src/sys/windows/locks/rwlock.rs +++ b/library/std/src/sys/windows/locks/rwlock.rs @@ -1,8 +1,11 @@ -use crate::cell::UnsafeCell; -use crate::sys::c; +use super::{ + compat::{MutexKind, MUTEX_KIND}, + Mutex, +}; +use crate::ops::Deref; pub struct RwLock { - inner: UnsafeCell, + pub(super) inner: Mutex, } unsafe impl Send for RwLock {} @@ -11,30 +14,48 @@ unsafe impl Sync for RwLock {} impl RwLock { #[inline] pub const fn new() -> RwLock { - RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } + RwLock { inner: Mutex::new() } } #[inline] - pub fn read(&self) { - unsafe { c::AcquireSRWLockShared(self.inner.get()) } + pub unsafe fn read(&self) { + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.read(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.lock(), + } } #[inline] - pub fn try_read(&self) -> bool { - unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 } + pub unsafe fn try_read(&self) -> bool { + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.try_read(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.try_lock(), + } } #[inline] - pub fn write(&self) { - unsafe { c::AcquireSRWLockExclusive(self.inner.get()) } + pub unsafe fn write(&self) { + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.write(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.lock(), + } } #[inline] - pub fn try_write(&self) -> bool { - unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } + pub unsafe fn try_write(&self) -> bool { + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.try_write(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.try_lock(), + } } #[inline] pub unsafe fn read_unlock(&self) { - c::ReleaseSRWLockShared(self.inner.get()) + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.read_unlock(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.unlock(), + } } #[inline] pub unsafe fn write_unlock(&self) { - c::ReleaseSRWLockExclusive(self.inner.get()) + match MUTEX_KIND { + MutexKind::SrwLock => self.inner.inner.deref().srwlock.write_unlock(), + MutexKind::CriticalSection | MutexKind::Legacy => self.inner.unlock(), + } } }