]> git.proxmox.com Git - rustc.git/blob - src/libstd/sys/windows/mutex.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / libstd / sys / windows / mutex.rs
1 //! System Mutexes
2 //!
3 //! The Windows implementation of mutexes is a little odd and it may not be
4 //! immediately obvious what's going on. The primary oddness is that SRWLock is
5 //! used instead of CriticalSection, and this is done because:
6 //!
7 //! 1. SRWLock is several times faster than CriticalSection according to
8 //! benchmarks performed on both Windows 8 and Windows 7.
9 //!
10 //! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
11 //! Unix implementation deadlocks so consistency is preferred. See #19962 for
12 //! more details.
13 //!
14 //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
15 //! is that there are no guarantees of fairness.
16 //!
17 //! The downside of this approach, however, is that SRWLock is not available on
18 //! Windows XP, so we continue to have a fallback implementation where
19 //! CriticalSection is used and we keep track of who's holding the mutex to
20 //! detect recursive locks.
21
22 use crate::cell::UnsafeCell;
23 use crate::mem::{self, MaybeUninit};
24 use crate::sync::atomic::{AtomicUsize, Ordering};
25 use crate::sys::c;
26 use crate::sys::compat;
27
28 pub struct Mutex {
29 lock: AtomicUsize,
30 held: UnsafeCell<bool>,
31 }
32
33 unsafe impl Send for Mutex {}
34 unsafe impl Sync for Mutex {}
35
36 #[derive(Clone, Copy)]
37 enum Kind {
38 SRWLock = 1,
39 CriticalSection = 2,
40 }
41
42 #[inline]
43 pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
44 debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
45 &m.lock as *const _ as *mut _
46 }
47
48 impl Mutex {
49 pub const fn new() -> Mutex {
50 Mutex {
51 // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
52 // initializing an SRWLOCK here.
53 lock: AtomicUsize::new(0),
54 held: UnsafeCell::new(false),
55 }
56 }
57 #[inline]
58 pub unsafe fn init(&mut self) {}
59 pub unsafe fn lock(&self) {
60 match kind() {
61 Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
62 Kind::CriticalSection => {
63 let re = self.remutex();
64 (*re).lock();
65 if !self.flag_locked() {
66 (*re).unlock();
67 panic!("cannot recursively lock a mutex");
68 }
69 }
70 }
71 }
72 pub unsafe fn try_lock(&self) -> bool {
73 match kind() {
74 Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
75 Kind::CriticalSection => {
76 let re = self.remutex();
77 if !(*re).try_lock() {
78 false
79 } else if self.flag_locked() {
80 true
81 } else {
82 (*re).unlock();
83 false
84 }
85 }
86 }
87 }
88 pub unsafe fn unlock(&self) {
89 *self.held.get() = false;
90 match kind() {
91 Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
92 Kind::CriticalSection => (*self.remutex()).unlock(),
93 }
94 }
95 pub unsafe fn destroy(&self) {
96 match kind() {
97 Kind::SRWLock => {}
98 Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
99 0 => {}
100 n => {
101 Box::from_raw(n as *mut ReentrantMutex).destroy();
102 }
103 },
104 }
105 }
106
107 unsafe fn remutex(&self) -> *mut ReentrantMutex {
108 match self.lock.load(Ordering::SeqCst) {
109 0 => {}
110 n => return n as *mut _,
111 }
112 let re = box ReentrantMutex::uninitialized();
113 re.init();
114 let re = Box::into_raw(re);
115 match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
116 0 => re,
117 n => {
118 Box::from_raw(re).destroy();
119 n as *mut _
120 }
121 }
122 }
123
124 unsafe fn flag_locked(&self) -> bool {
125 if *self.held.get() {
126 false
127 } else {
128 *self.held.get() = true;
129 true
130 }
131 }
132 }
133
134 fn kind() -> Kind {
135 static KIND: AtomicUsize = AtomicUsize::new(0);
136
137 let val = KIND.load(Ordering::SeqCst);
138 if val == Kind::SRWLock as usize {
139 return Kind::SRWLock;
140 } else if val == Kind::CriticalSection as usize {
141 return Kind::CriticalSection;
142 }
143
144 let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") {
145 None => Kind::CriticalSection,
146 Some(..) => Kind::SRWLock,
147 };
148 KIND.store(ret as usize, Ordering::SeqCst);
149 ret
150 }
151
152 pub struct ReentrantMutex {
153 inner: UnsafeCell<MaybeUninit<c::CRITICAL_SECTION>>,
154 }
155
156 unsafe impl Send for ReentrantMutex {}
157 unsafe impl Sync for ReentrantMutex {}
158
159 impl ReentrantMutex {
160 pub const fn uninitialized() -> ReentrantMutex {
161 ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) }
162 }
163
164 pub unsafe fn init(&self) {
165 c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr());
166 }
167
168 pub unsafe fn lock(&self) {
169 c::EnterCriticalSection((&mut *self.inner.get()).as_mut_ptr());
170 }
171
172 #[inline]
173 pub unsafe fn try_lock(&self) -> bool {
174 c::TryEnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()) != 0
175 }
176
177 pub unsafe fn unlock(&self) {
178 c::LeaveCriticalSection((&mut *self.inner.get()).as_mut_ptr());
179 }
180
181 pub unsafe fn destroy(&self) {
182 c::DeleteCriticalSection((&mut *self.inner.get()).as_mut_ptr());
183 }
184 }