]> git.proxmox.com Git - rustc.git/blame - library/std/src/sys/windows/mutex.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / library / std / src / sys / windows / mutex.rs
CommitLineData
c1a9b12d
SL
1//! System Mutexes
2//!
3//! The Windows implementation of mutexes is a little odd and it may not be
4//! immediately obvious what's going on. The primary oddness is that SRWLock is
5//! used instead of CriticalSection, and this is done because:
6//!
7//! 1. SRWLock is several times faster than CriticalSection according to
8//! benchmarks performed on both Windows 8 and Windows 7.
9//!
10//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
11//! Unix implementation deadlocks so consistency is preferred. See #19962 for
12//! more details.
13//!
14//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
b039eaaf 15//! is that there are no guarantees of fairness.
c1a9b12d
SL
16//!
17//! The downside of this approach, however, is that SRWLock is not available on
18//! Windows XP, so we continue to have a fallback implementation where
19//! CriticalSection is used and we keep track of who's holding the mutex to
20//! detect recursive locks.
21
1b1a35ee 22use crate::cell::{Cell, UnsafeCell};
532ac7d7
XL
23use crate::mem::{self, MaybeUninit};
24use crate::sync::atomic::{AtomicUsize, Ordering};
25use crate::sys::c;
1a4d82fc 26
c1a9b12d 27pub struct Mutex {
1b1a35ee 28 // This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
c1a9b12d 29 lock: AtomicUsize,
c1a9b12d 30}
1a4d82fc 31
c34b1796 32unsafe impl Send for Mutex {}
1a4d82fc
JJ
33unsafe impl Sync for Mutex {}
34
1b1a35ee
XL
35struct Inner {
36 remutex: ReentrantMutex,
37 held: Cell<bool>,
38}
39
c1a9b12d
SL
40#[derive(Clone, Copy)]
41enum Kind {
1b1a35ee
XL
42 SRWLock,
43 CriticalSection,
1a4d82fc
JJ
44}
45
c1a9b12d
SL
46#[inline]
47pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
48 debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
49 &m.lock as *const _ as *mut _
50}
85aaf69f 51
1a4d82fc 52impl Mutex {
62682a34 53 pub const fn new() -> Mutex {
c1a9b12d 54 Mutex {
b7449926
XL
55 // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
56 // initializing an SRWLOCK here.
c1a9b12d 57 lock: AtomicUsize::new(0),
c1a9b12d 58 }
62682a34 59 }
3157f602
XL
60 #[inline]
61 pub unsafe fn init(&mut self) {}
1a4d82fc 62 pub unsafe fn lock(&self) {
c1a9b12d
SL
63 match kind() {
64 Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
65 Kind::CriticalSection => {
1b1a35ee
XL
66 let inner = &*self.inner();
67 inner.remutex.lock();
68 if inner.held.replace(true) {
69 // It was already locked, so we got a recursive lock which we do not want.
70 inner.remutex.unlock();
c1a9b12d
SL
71 panic!("cannot recursively lock a mutex");
72 }
73 }
74 }
1a4d82fc 75 }
1a4d82fc 76 pub unsafe fn try_lock(&self) -> bool {
c1a9b12d
SL
77 match kind() {
78 Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
79 Kind::CriticalSection => {
1b1a35ee
XL
80 let inner = &*self.inner();
81 if !inner.remutex.try_lock() {
c1a9b12d 82 false
1b1a35ee
XL
83 } else if inner.held.replace(true) {
84 // It was already locked, so we got a recursive lock which we do not want.
85 inner.remutex.unlock();
c1a9b12d 86 false
1b1a35ee
XL
87 } else {
88 true
c1a9b12d
SL
89 }
90 }
91 }
1a4d82fc 92 }
1a4d82fc 93 pub unsafe fn unlock(&self) {
c1a9b12d
SL
94 match kind() {
95 Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
1b1a35ee
XL
96 Kind::CriticalSection => {
97 let inner = &*(self.lock.load(Ordering::SeqCst) as *const Inner);
98 inner.held.set(false);
99 inner.remutex.unlock();
100 }
c1a9b12d 101 }
1a4d82fc
JJ
102 }
103 pub unsafe fn destroy(&self) {
c1a9b12d
SL
104 match kind() {
105 Kind::SRWLock => {}
dfeec247
XL
106 Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
107 0 => {}
1b1a35ee 108 n => Box::from_raw(n as *mut Inner).remutex.destroy(),
dfeec247 109 },
c1a9b12d
SL
110 }
111 }
112
1b1a35ee 113 unsafe fn inner(&self) -> *const Inner {
c1a9b12d
SL
114 match self.lock.load(Ordering::SeqCst) {
115 0 => {}
1b1a35ee 116 n => return n as *const _,
c1a9b12d 117 }
1b1a35ee
XL
118 let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
119 inner.remutex.init();
120 let inner = Box::into_raw(inner);
121 match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
122 0 => inner,
dfeec247 123 n => {
1b1a35ee
XL
124 Box::from_raw(inner).remutex.destroy();
125 n as *const _
dfeec247 126 }
c1a9b12d
SL
127 }
128 }
1a4d82fc 129}
9346a6ac 130
c1a9b12d 131fn kind() -> Kind {
1b1a35ee 132 if c::AcquireSRWLockExclusive::is_available() { Kind::SRWLock } else { Kind::CriticalSection }
c1a9b12d
SL
133}
134
dfeec247 135pub struct ReentrantMutex {
1b1a35ee 136 inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
dfeec247 137}
9346a6ac
AL
138
139unsafe impl Send for ReentrantMutex {}
140unsafe impl Sync for ReentrantMutex {}
141
142impl ReentrantMutex {
ba9703b0 143 pub const fn uninitialized() -> ReentrantMutex {
1b1a35ee 144 ReentrantMutex { inner: MaybeUninit::uninit() }
d9579d0f
AL
145 }
146
ba9703b0 147 pub unsafe fn init(&self) {
1b1a35ee 148 c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
9346a6ac
AL
149 }
150
151 pub unsafe fn lock(&self) {
1b1a35ee 152 c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
9346a6ac
AL
153 }
154
155 #[inline]
156 pub unsafe fn try_lock(&self) -> bool {
1b1a35ee 157 c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
9346a6ac
AL
158 }
159
160 pub unsafe fn unlock(&self) {
1b1a35ee 161 c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
9346a6ac
AL
162 }
163
164 pub unsafe fn destroy(&self) {
1b1a35ee 165 c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
9346a6ac
AL
166 }
167}