]> git.proxmox.com Git - rustc.git/blob - library/std/src/sys/windows/mutex.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / library / std / src / sys / windows / mutex.rs
1 //! System Mutexes
2 //!
3 //! The Windows implementation of mutexes is a little odd and it may not be
4 //! immediately obvious what's going on. The primary oddness is that SRWLock is
5 //! used instead of CriticalSection, and this is done because:
6 //!
7 //! 1. SRWLock is several times faster than CriticalSection according to
8 //! benchmarks performed on both Windows 8 and Windows 7.
9 //!
10 //! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
11 //! Unix implementation deadlocks so consistency is preferred. See #19962 for
12 //! more details.
13 //!
14 //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
15 //! is that there are no guarantees of fairness.
16 //!
17 //! The downside of this approach, however, is that SRWLock is not available on
18 //! Windows XP, so we continue to have a fallback implementation where
19 //! CriticalSection is used and we keep track of who's holding the mutex to
20 //! detect recursive locks.
21
22 use crate::cell::{Cell, UnsafeCell};
23 use crate::mem::{self, MaybeUninit};
24 use crate::sync::atomic::{AtomicUsize, Ordering};
25 use crate::sys::c;
26
27 pub struct Mutex {
28 // This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
29 lock: AtomicUsize,
30 }
31
32 unsafe impl Send for Mutex {}
33 unsafe impl Sync for Mutex {}
34
35 struct Inner {
36 remutex: ReentrantMutex,
37 held: Cell<bool>,
38 }
39
40 #[derive(Clone, Copy)]
41 enum Kind {
42 SRWLock,
43 CriticalSection,
44 }
45
46 #[inline]
47 pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
48 debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
49 &m.lock as *const _ as *mut _
50 }
51
52 impl Mutex {
53 pub const fn new() -> Mutex {
54 Mutex {
55 // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
56 // initializing an SRWLOCK here.
57 lock: AtomicUsize::new(0),
58 }
59 }
60 #[inline]
61 pub unsafe fn init(&mut self) {}
62 pub unsafe fn lock(&self) {
63 match kind() {
64 Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
65 Kind::CriticalSection => {
66 let inner = &*self.inner();
67 inner.remutex.lock();
68 if inner.held.replace(true) {
69 // It was already locked, so we got a recursive lock which we do not want.
70 inner.remutex.unlock();
71 panic!("cannot recursively lock a mutex");
72 }
73 }
74 }
75 }
76 pub unsafe fn try_lock(&self) -> bool {
77 match kind() {
78 Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
79 Kind::CriticalSection => {
80 let inner = &*self.inner();
81 if !inner.remutex.try_lock() {
82 false
83 } else if inner.held.replace(true) {
84 // It was already locked, so we got a recursive lock which we do not want.
85 inner.remutex.unlock();
86 false
87 } else {
88 true
89 }
90 }
91 }
92 }
93 pub unsafe fn unlock(&self) {
94 match kind() {
95 Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
96 Kind::CriticalSection => {
97 let inner = &*(self.lock.load(Ordering::SeqCst) as *const Inner);
98 inner.held.set(false);
99 inner.remutex.unlock();
100 }
101 }
102 }
103 pub unsafe fn destroy(&self) {
104 match kind() {
105 Kind::SRWLock => {}
106 Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
107 0 => {}
108 n => Box::from_raw(n as *mut Inner).remutex.destroy(),
109 },
110 }
111 }
112
113 unsafe fn inner(&self) -> *const Inner {
114 match self.lock.load(Ordering::SeqCst) {
115 0 => {}
116 n => return n as *const _,
117 }
118 let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
119 inner.remutex.init();
120 let inner = Box::into_raw(inner);
121 match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
122 0 => inner,
123 n => {
124 Box::from_raw(inner).remutex.destroy();
125 n as *const _
126 }
127 }
128 }
129 }
130
131 fn kind() -> Kind {
132 if c::AcquireSRWLockExclusive::is_available() { Kind::SRWLock } else { Kind::CriticalSection }
133 }
134
135 pub struct ReentrantMutex {
136 inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
137 }
138
139 unsafe impl Send for ReentrantMutex {}
140 unsafe impl Sync for ReentrantMutex {}
141
142 impl ReentrantMutex {
143 pub const fn uninitialized() -> ReentrantMutex {
144 ReentrantMutex { inner: MaybeUninit::uninit() }
145 }
146
147 pub unsafe fn init(&self) {
148 c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
149 }
150
151 pub unsafe fn lock(&self) {
152 c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
153 }
154
155 #[inline]
156 pub unsafe fn try_lock(&self) -> bool {
157 c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
158 }
159
160 pub unsafe fn unlock(&self) {
161 c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
162 }
163
164 pub unsafe fn destroy(&self) {
165 c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
166 }
167 }