]> git.proxmox.com Git - rustc.git/blame - src/libstd/sys/windows/mutex.rs
Imported Upstream version 1.3.0+dfsg1
[rustc.git] / src / libstd / sys / windows / mutex.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
c1a9b12d
SL
11//! System Mutexes
12//!
13//! The Windows implementation of mutexes is a little odd and it may not be
14//! immediately obvious what's going on. The primary oddness is that SRWLock is
15//! used instead of CriticalSection, and this is done because:
16//!
17//! 1. SRWLock is several times faster than CriticalSection according to
18//! benchmarks performed on both Windows 8 and Windows 7.
19//!
20//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
21//! Unix implementation deadlocks so consistency is preferred. See #19962 for
22//! more details.
23//!
24//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
25//! is there there are no guarantees of fairness.
26//!
27//! The downside of this approach, however, is that SRWLock is not available on
28//! Windows XP, so we continue to have a fallback implementation where
29//! CriticalSection is used and we keep track of who's holding the mutex to
30//! detect recursive locks.
31
c34b1796
AL
32use prelude::v1::*;
33
85aaf69f 34use cell::UnsafeCell;
9346a6ac 35use mem;
c1a9b12d
SL
36use sync::atomic::{AtomicUsize, Ordering};
37use sys::c;
38use sys::compat;
1a4d82fc 39
c1a9b12d
SL
40pub struct Mutex {
41 lock: AtomicUsize,
42 held: UnsafeCell<bool>,
43}
1a4d82fc 44
c34b1796 45unsafe impl Send for Mutex {}
1a4d82fc
JJ
46unsafe impl Sync for Mutex {}
47
c1a9b12d
SL
48#[derive(Clone, Copy)]
49enum Kind {
50 SRWLock = 1,
51 CriticalSection = 2,
1a4d82fc
JJ
52}
53
c1a9b12d
SL
54#[inline]
55pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
56 debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock));
57 &m.lock as *const _ as *mut _
58}
85aaf69f 59
1a4d82fc 60impl Mutex {
62682a34 61 pub const fn new() -> Mutex {
c1a9b12d
SL
62 Mutex {
63 lock: AtomicUsize::new(0),
64 held: UnsafeCell::new(false),
65 }
62682a34 66 }
1a4d82fc 67 pub unsafe fn lock(&self) {
c1a9b12d
SL
68 match kind() {
69 Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
70 Kind::CriticalSection => {
71 let re = self.remutex();
72 (*re).lock();
73 if !self.flag_locked() {
74 (*re).unlock();
75 panic!("cannot recursively lock a mutex");
76 }
77 }
78 }
1a4d82fc 79 }
1a4d82fc 80 pub unsafe fn try_lock(&self) -> bool {
c1a9b12d
SL
81 match kind() {
82 Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
83 Kind::CriticalSection => {
84 let re = self.remutex();
85 if !(*re).try_lock() {
86 false
87 } else if self.flag_locked() {
88 true
89 } else {
90 (*re).unlock();
91 false
92 }
93 }
94 }
1a4d82fc 95 }
1a4d82fc 96 pub unsafe fn unlock(&self) {
c1a9b12d
SL
97 *self.held.get() = false;
98 match kind() {
99 Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
100 Kind::CriticalSection => (*self.remutex()).unlock(),
101 }
1a4d82fc
JJ
102 }
103 pub unsafe fn destroy(&self) {
c1a9b12d
SL
104 match kind() {
105 Kind::SRWLock => {}
106 Kind::CriticalSection => {
107 match self.lock.load(Ordering::SeqCst) {
108 0 => {}
109 n => { Box::from_raw(n as *mut ReentrantMutex).destroy(); }
110 }
111 }
112 }
113 }
114
115 unsafe fn remutex(&self) -> *mut ReentrantMutex {
116 match self.lock.load(Ordering::SeqCst) {
117 0 => {}
118 n => return n as *mut _,
119 }
120 let mut re = Box::new(ReentrantMutex::uninitialized());
121 re.init();
122 let re = Box::into_raw(re);
123 match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
124 0 => re,
125 n => { Box::from_raw(re).destroy(); n as *mut _ }
126 }
127 }
128
129 unsafe fn flag_locked(&self) -> bool {
130 if *self.held.get() {
131 false
132 } else {
133 *self.held.get() = true;
134 true
135 }
136
1a4d82fc
JJ
137 }
138}
9346a6ac 139
c1a9b12d
SL
140fn kind() -> Kind {
141 static KIND: AtomicUsize = AtomicUsize::new(0);
142
143 let val = KIND.load(Ordering::SeqCst);
144 if val == Kind::SRWLock as usize {
145 return Kind::SRWLock
146 } else if val == Kind::CriticalSection as usize {
147 return Kind::CriticalSection
148 }
149
150 let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") {
151 None => Kind::CriticalSection,
152 Some(..) => Kind::SRWLock,
153 };
154 KIND.store(ret as usize, Ordering::SeqCst);
155 return ret;
156}
157
158pub struct ReentrantMutex { inner: UnsafeCell<c::CRITICAL_SECTION> }
9346a6ac
AL
159
160unsafe impl Send for ReentrantMutex {}
161unsafe impl Sync for ReentrantMutex {}
162
163impl ReentrantMutex {
d9579d0f
AL
164 pub unsafe fn uninitialized() -> ReentrantMutex {
165 mem::uninitialized()
166 }
167
168 pub unsafe fn init(&mut self) {
c1a9b12d 169 c::InitializeCriticalSection(self.inner.get());
9346a6ac
AL
170 }
171
172 pub unsafe fn lock(&self) {
c1a9b12d 173 c::EnterCriticalSection(self.inner.get());
9346a6ac
AL
174 }
175
176 #[inline]
177 pub unsafe fn try_lock(&self) -> bool {
c1a9b12d 178 c::TryEnterCriticalSection(self.inner.get()) != 0
9346a6ac
AL
179 }
180
181 pub unsafe fn unlock(&self) {
c1a9b12d 182 c::LeaveCriticalSection(self.inner.get());
9346a6ac
AL
183 }
184
185 pub unsafe fn destroy(&self) {
c1a9b12d 186 c::DeleteCriticalSection(self.inner.get());
9346a6ac
AL
187 }
188}