]> git.proxmox.com Git - rustc.git/blame - library/std/src/sys_common/remutex.rs
New upstream version 1.62.1+dfsg1
[rustc.git] / library / std / src / sys_common / remutex.rs
CommitLineData
1b1a35ee
XL
1#[cfg(all(test, not(target_os = "emscripten")))]
2mod tests;
3
04454e1e 4use crate::cell::UnsafeCell;
2a314972 5use crate::marker::PhantomPinned;
532ac7d7 6use crate::ops::Deref;
dfeec247 7use crate::panic::{RefUnwindSafe, UnwindSafe};
2a314972 8use crate::pin::Pin;
04454e1e 9use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
5e7ed085 10use crate::sys::locks as sys;
9346a6ac
AL
11
12/// A re-entrant mutual exclusion
13///
d9579d0f
AL
14/// This mutex will block *other* threads waiting for the lock to become
15/// available. The thread which has already locked the mutex can lock it
16/// multiple times without blocking, preventing a common source of deadlocks.
04454e1e
FG
17///
18/// This is used by stdout().lock() and friends.
19///
20/// ## Implementation details
21///
22/// The 'owner' field tracks which thread has locked the mutex.
23///
24/// We use current_thread_unique_ptr() as the thread identifier,
25/// which is just the address of a thread local variable.
26///
27/// If `owner` is set to the identifier of the current thread,
28/// we assume the mutex is already locked and instead of locking it again,
29/// we increment `lock_count`.
30///
31/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
32/// it reaches zero.
33///
34/// `lock_count` is protected by the mutex and only accessed by the thread that has
35/// locked the mutex, so needs no synchronization.
36///
37/// `owner` can be checked by other threads that want to see if they already
38/// hold the lock, so needs to be atomic. If it compares equal, we're on the
39/// same thread that holds the mutex and memory access can use relaxed ordering
40/// since we're not dealing with multiple threads. If it compares unequal,
41/// synchronization is left to the mutex, making relaxed memory ordering for
42/// the `owner` field fine in all cases.
9346a6ac 43pub struct ReentrantMutex<T> {
04454e1e
FG
44 mutex: sys::Mutex,
45 owner: AtomicUsize,
46 lock_count: UnsafeCell<u32>,
9346a6ac 47 data: T,
2a314972 48 _pinned: PhantomPinned,
9346a6ac
AL
49}
50
51unsafe impl<T: Send> Send for ReentrantMutex<T> {}
52unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
53
8faf50e0
XL
54impl<T> UnwindSafe for ReentrantMutex<T> {}
55impl<T> RefUnwindSafe for ReentrantMutex<T> {}
56
9346a6ac
AL
57/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
58/// dropped (falls out of scope), the lock will be unlocked.
59///
60/// The data protected by the mutex can be accessed through this guard via its
e9174d1e
SL
61/// Deref implementation.
62///
63/// # Mutability
64///
65/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
66/// because implementation of the trait would violate Rust’s reference aliasing
67/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
68/// guarded data.
94b46f34 69#[must_use = "if unused the ReentrantMutex will immediately unlock"]
9346a6ac 70pub struct ReentrantMutexGuard<'a, T: 'a> {
2a314972 71 lock: Pin<&'a ReentrantMutex<T>>,
9346a6ac
AL
72}
73
2a314972 74impl<T> !Send for ReentrantMutexGuard<'_, T> {}
9346a6ac 75
9346a6ac
AL
76impl<T> ReentrantMutex<T> {
77 /// Creates a new reentrant mutex in an unlocked state.
ba9703b0
XL
78 ///
79 /// # Unsafety
80 ///
81 /// This function is unsafe because it is required that `init` is called
82 /// once this mutex is in its final resting place, and only then are the
83 /// lock/unlock methods safe.
84 pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
2a314972 85 ReentrantMutex {
04454e1e
FG
86 mutex: sys::Mutex::new(),
87 owner: AtomicUsize::new(0),
88 lock_count: UnsafeCell::new(0),
2a314972
XL
89 data: t,
90 _pinned: PhantomPinned,
91 }
ba9703b0
XL
92 }
93
94 /// Initializes this mutex so it's ready for use.
95 ///
96 /// # Unsafety
97 ///
98 /// Unsafe to call more than once, and must be called after this will no
99 /// longer move in memory.
2a314972 100 pub unsafe fn init(self: Pin<&mut Self>) {
04454e1e 101 self.get_unchecked_mut().mutex.init()
9346a6ac
AL
102 }
103
104 /// Acquires a mutex, blocking the current thread until it is able to do so.
105 ///
106 /// This function will block the caller until it is available to acquire the mutex.
107 /// Upon returning, the thread is the only thread with the mutex held. When the thread
108 /// calling this method already holds the lock, the call shall succeed without
109 /// blocking.
110 ///
7453a54e 111 /// # Errors
9346a6ac
AL
112 ///
113 /// If another user of this mutex panicked while holding the mutex, then
114 /// this call will return failure if the mutex would otherwise be
115 /// acquired.
2a314972 116 pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
04454e1e
FG
117 let this_thread = current_thread_unique_ptr();
118 // Safety: We only touch lock_count when we own the lock,
119 // and since self is pinned we can safely call the lock() on the mutex.
120 unsafe {
121 if self.owner.load(Relaxed) == this_thread {
122 self.increment_lock_count();
123 } else {
124 self.mutex.lock();
125 self.owner.store(this_thread, Relaxed);
126 debug_assert_eq!(*self.lock_count.get(), 0);
127 *self.lock_count.get() = 1;
128 }
129 }
2a314972 130 ReentrantMutexGuard { lock: self }
9346a6ac
AL
131 }
132
133 /// Attempts to acquire this lock.
134 ///
135 /// If the lock could not be acquired at this time, then `Err` is returned.
136 /// Otherwise, an RAII guard is returned.
137 ///
138 /// This function does not block.
139 ///
7453a54e 140 /// # Errors
9346a6ac
AL
141 ///
142 /// If another user of this mutex panicked while holding the mutex, then
143 /// this call will return failure if the mutex would otherwise be
144 /// acquired.
2a314972 145 pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
04454e1e
FG
146 let this_thread = current_thread_unique_ptr();
147 // Safety: We only touch lock_count when we own the lock,
148 // and since self is pinned we can safely call the try_lock on the mutex.
149 unsafe {
150 if self.owner.load(Relaxed) == this_thread {
151 self.increment_lock_count();
152 Some(ReentrantMutexGuard { lock: self })
153 } else if self.mutex.try_lock() {
154 self.owner.store(this_thread, Relaxed);
155 debug_assert_eq!(*self.lock_count.get(), 0);
156 *self.lock_count.get() = 1;
157 Some(ReentrantMutexGuard { lock: self })
158 } else {
159 None
160 }
2a314972 161 }
9346a6ac 162 }
04454e1e
FG
163
164 unsafe fn increment_lock_count(&self) {
165 *self.lock_count.get() = (*self.lock_count.get())
166 .checked_add(1)
167 .expect("lock count overflow in reentrant mutex");
168 }
9346a6ac
AL
169}
170
9346a6ac
AL
171impl<T> Drop for ReentrantMutex<T> {
172 fn drop(&mut self) {
04454e1e
FG
173 // Safety: We're the unique owner of this mutex and not going to use it afterwards.
174 unsafe { self.mutex.destroy() }
9346a6ac
AL
175 }
176}
177
9fa01778 178impl<T> Deref for ReentrantMutexGuard<'_, T> {
9346a6ac
AL
179 type Target = T;
180
e9174d1e 181 fn deref(&self) -> &T {
1b1a35ee 182 &self.lock.data
9346a6ac
AL
183 }
184}
185
9fa01778 186impl<T> Drop for ReentrantMutexGuard<'_, T> {
9346a6ac
AL
187 #[inline]
188 fn drop(&mut self) {
04454e1e 189 // Safety: We own the lock, and the lock is pinned.
9346a6ac 190 unsafe {
04454e1e
FG
191 *self.lock.lock_count.get() -= 1;
192 if *self.lock.lock_count.get() == 0 {
193 self.lock.owner.store(0, Relaxed);
194 self.lock.mutex.unlock();
195 }
9346a6ac
AL
196 }
197 }
9346a6ac 198}
04454e1e
FG
199
200/// Get an address that is unique per running thread.
201///
202/// This can be used as a non-null usize-sized ID.
203pub fn current_thread_unique_ptr() -> usize {
204 // Use a non-drop type to make sure it's still available during thread destruction.
205 thread_local! { static X: u8 = const { 0 } }
206 X.with(|x| <*const _>::addr(x))
207}