1 #[cfg(all(test, not(target_os = "emscripten")))]
4 use crate::cell
::UnsafeCell
;
6 use crate::panic
::{RefUnwindSafe, UnwindSafe}
;
7 use crate::sync
::atomic
::{AtomicUsize, Ordering::Relaxed}
;
8 use crate::sys
::locks
as sys
;
10 /// A re-entrant mutual exclusion
12 /// This mutex will block *other* threads waiting for the lock to become
13 /// available. The thread which has already locked the mutex can lock it
14 /// multiple times without blocking, preventing a common source of deadlocks.
16 /// This is used by stdout().lock() and friends.
18 /// ## Implementation details
20 /// The 'owner' field tracks which thread has locked the mutex.
22 /// We use current_thread_unique_ptr() as the thread identifier,
23 /// which is just the address of a thread local variable.
25 /// If `owner` is set to the identifier of the current thread,
26 /// we assume the mutex is already locked and instead of locking it again,
27 /// we increment `lock_count`.
29 /// When unlocking, we decrement `lock_count`, and only unlock the mutex when
32 /// `lock_count` is protected by the mutex and only accessed by the thread that has
33 /// locked the mutex, so needs no synchronization.
35 /// `owner` can be checked by other threads that want to see if they already
36 /// hold the lock, so needs to be atomic. If it compares equal, we're on the
37 /// same thread that holds the mutex and memory access can use relaxed ordering
38 /// since we're not dealing with multiple threads. If it compares unequal,
39 /// synchronization is left to the mutex, making relaxed memory ordering for
40 /// the `owner` field fine in all cases.
41 pub struct ReentrantMutex
<T
> {
44 lock_count
: UnsafeCell
<u32>,
48 unsafe impl<T
: Send
> Send
for ReentrantMutex
<T
> {}
49 unsafe impl<T
: Send
> Sync
for ReentrantMutex
<T
> {}
51 impl<T
> UnwindSafe
for ReentrantMutex
<T
> {}
52 impl<T
> RefUnwindSafe
for ReentrantMutex
<T
> {}
54 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
55 /// dropped (falls out of scope), the lock will be unlocked.
57 /// The data protected by the mutex can be accessed through this guard via its
58 /// Deref implementation.
62 /// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
63 /// because implementation of the trait would violate Rust’s reference aliasing
64 /// rules. Use interior mutability (usually `RefCell`) in order to mutate the
66 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
67 pub struct ReentrantMutexGuard
<'a
, T
: 'a
> {
68 lock
: &'a ReentrantMutex
<T
>,
71 impl<T
> !Send
for ReentrantMutexGuard
<'_
, T
> {}
73 impl<T
> ReentrantMutex
<T
> {
74 /// Creates a new reentrant mutex in an unlocked state.
75 pub const fn new(t
: T
) -> ReentrantMutex
<T
> {
77 mutex
: sys
::Mutex
::new(),
78 owner
: AtomicUsize
::new(0),
79 lock_count
: UnsafeCell
::new(0),
84 /// Acquires a mutex, blocking the current thread until it is able to do so.
86 /// This function will block the caller until it is available to acquire the mutex.
87 /// Upon returning, the thread is the only thread with the mutex held. When the thread
88 /// calling this method already holds the lock, the call shall succeed without
93 /// If another user of this mutex panicked while holding the mutex, then
94 /// this call will return failure if the mutex would otherwise be
96 pub fn lock(&self) -> ReentrantMutexGuard
<'_
, T
> {
97 let this_thread
= current_thread_unique_ptr();
98 // Safety: We only touch lock_count when we own the lock.
100 if self.owner
.load(Relaxed
) == this_thread
{
101 self.increment_lock_count();
104 self.owner
.store(this_thread
, Relaxed
);
105 debug_assert_eq
!(*self.lock_count
.get(), 0);
106 *self.lock_count
.get() = 1;
109 ReentrantMutexGuard { lock: self }
112 /// Attempts to acquire this lock.
114 /// If the lock could not be acquired at this time, then `Err` is returned.
115 /// Otherwise, an RAII guard is returned.
117 /// This function does not block.
121 /// If another user of this mutex panicked while holding the mutex, then
122 /// this call will return failure if the mutex would otherwise be
124 pub fn try_lock(&self) -> Option
<ReentrantMutexGuard
<'_
, T
>> {
125 let this_thread
= current_thread_unique_ptr();
126 // Safety: We only touch lock_count when we own the lock.
128 if self.owner
.load(Relaxed
) == this_thread
{
129 self.increment_lock_count();
130 Some(ReentrantMutexGuard { lock: self }
)
131 } else if self.mutex
.try_lock() {
132 self.owner
.store(this_thread
, Relaxed
);
133 debug_assert_eq
!(*self.lock_count
.get(), 0);
134 *self.lock_count
.get() = 1;
135 Some(ReentrantMutexGuard { lock: self }
)
142 unsafe fn increment_lock_count(&self) {
143 *self.lock_count
.get() = (*self.lock_count
.get())
145 .expect("lock count overflow in reentrant mutex");
149 impl<T
> Deref
for ReentrantMutexGuard
<'_
, T
> {
152 fn deref(&self) -> &T
{
157 impl<T
> Drop
for ReentrantMutexGuard
<'_
, T
> {
160 // Safety: We own the lock.
162 *self.lock
.lock_count
.get() -= 1;
163 if *self.lock
.lock_count
.get() == 0 {
164 self.lock
.owner
.store(0, Relaxed
);
165 self.lock
.mutex
.unlock();
171 /// Get an address that is unique per running thread.
173 /// This can be used as a non-null usize-sized ID.
174 pub fn current_thread_unique_ptr() -> usize {
175 // Use a non-drop type to make sure it's still available during thread destruction.
176 thread_local
! { static X: u8 = const { 0 }
}
177 X
.with(|x
| <*const _
>::addr(x
))