1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops
::{Deref, DerefMut}
;
19 use sys_common
::mutex
as sys
;
20 use sys_common
::poison
::{self, TryLockError, TryLockResult, LockResult}
;
22 /// A mutual exclusion primitive useful for protecting shared data
24 /// This mutex will block threads waiting for the lock to become available. The
25 /// mutex can also be statically initialized or created via a `new`
26 /// constructor. Each mutex has a type parameter which represents the data that
27 /// it is protecting. The data can only be accessed through the RAII guards
28 /// returned from `lock` and `try_lock`, which guarantees that the data is only
29 /// ever accessed when the mutex is locked.
33 /// The mutexes in this module implement a strategy called "poisoning" where a
34 /// mutex is considered poisoned whenever a thread panics while holding the
35 /// lock. Once a mutex is poisoned, all other threads are unable to access the
36 /// data by default as it is likely tainted (some invariant is not being
39 /// For a mutex, this means that the `lock` and `try_lock` methods return a
40 /// `Result` which indicates whether a mutex has been poisoned or not. Most
41 /// usage of a mutex will simply `unwrap()` these results, propagating panics
42 /// among threads to ensure that a possibly invalid invariant is not witnessed.
44 /// A poisoned mutex, however, does not prevent all access to the underlying
45 /// data. The `PoisonError` type has an `into_inner` method which will return
46 /// the guard that would have otherwise been returned on a successful lock. This
47 /// allows access to the data, despite the lock being poisoned.
52 /// use std::sync::{Arc, Mutex};
54 /// use std::sync::mpsc::channel;
56 /// const N: usize = 10;
58 /// // Spawn a few threads to increment a shared variable (non-atomically), and
59 /// // let the main thread know once all increments are done.
61 /// // Here we're using an Arc to share memory among threads, and the data inside
62 /// // the Arc is protected with a mutex.
63 /// let data = Arc::new(Mutex::new(0));
65 /// let (tx, rx) = channel();
67 /// let (data, tx) = (data.clone(), tx.clone());
68 /// thread::spawn(move || {
69 /// // The shared state can only be accessed once the lock is held.
70 /// // Our non-atomic increment is safe because we're the only thread
71 /// // which can access the shared state when the lock is held.
73 /// // We unwrap() the return value to assert that we are not expecting
74 /// // threads to ever fail while holding the lock.
75 /// let mut data = data.lock().unwrap();
78 /// tx.send(()).unwrap();
80 /// // the lock is unlocked here when `data` goes out of scope.
84 /// rx.recv().unwrap();
87 /// To recover from a poisoned mutex:
90 /// use std::sync::{Arc, Mutex};
93 /// let lock = Arc::new(Mutex::new(0_u32));
94 /// let lock2 = lock.clone();
96 /// let _ = thread::spawn(move || -> () {
97 /// // This thread will acquire the mutex first, unwrapping the result of
98 /// // `lock` because the lock has not been poisoned.
99 /// let _guard = lock2.lock().unwrap();
101 /// // This panic while holding the lock (`_guard` is in scope) will poison
106 /// // The lock is poisoned by this point, but the returned result can be
107 /// // pattern matched on to return the underlying guard on both branches.
108 /// let mut guard = match lock.lock() {
109 /// Ok(guard) => guard,
110 /// Err(poisoned) => poisoned.into_inner(),
115 #[stable(feature = "rust1", since = "1.0.0")]
116 pub struct Mutex
<T
: ?Sized
> {
117 // Note that this static mutex is in a *box*, not inlined into the struct
118 // itself. Once a native mutex has been used once, its address can never
119 // change (it can't be moved). This mutex type can be safely moved at any
120 // time, so to ensure that the native mutex is used correctly we box the
121 // inner lock to give it a constant address.
122 inner
: Box
<StaticMutex
>,
126 // these are the only places where `T: Send` matters; all other
127 // functionality works fine on a single thread.
128 #[stable(feature = "rust1", since = "1.0.0")]
129 unsafe impl<T
: ?Sized
+ Send
> Send
for Mutex
<T
> { }
130 #[stable(feature = "rust1", since = "1.0.0")]
131 unsafe impl<T
: ?Sized
+ Send
> Sync
for Mutex
<T
> { }
133 /// The static mutex type is provided to allow for static allocation of mutexes.
135 /// Note that this is a separate type because using a Mutex correctly means that
136 /// it needs to have a destructor run. In Rust, statics are not allowed to have
137 /// destructors. As a result, a `StaticMutex` has one extra method when compared
138 /// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
139 /// documentation can be found directly on the method.
144 /// #![feature(static_mutex)]
146 /// use std::sync::{StaticMutex, MUTEX_INIT};
148 /// static LOCK: StaticMutex = MUTEX_INIT;
151 /// let _g = LOCK.lock().unwrap();
152 /// // do some productive work
154 /// // lock is unlocked here.
156 #[unstable(feature = "static_mutex",
157 reason
= "may be merged with Mutex in the future",
159 pub struct StaticMutex
{
161 poison
: poison
::Flag
,
164 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
165 /// dropped (falls out of scope), the lock will be unlocked.
167 /// The data protected by the mutex can be access through this guard via its
168 /// `Deref` and `DerefMut` implementations
170 #[stable(feature = "rust1", since = "1.0.0")]
171 pub struct MutexGuard
<'a
, T
: ?Sized
+ 'a
> {
172 // funny underscores due to how Deref/DerefMut currently work (they
173 // disregard field privacy).
174 __lock
: &'a StaticMutex
,
176 __poison
: poison
::Guard
,
179 #[stable(feature = "rust1", since = "1.0.0")]
180 impl<'a
, T
: ?Sized
> !marker
::Send
for MutexGuard
<'a
, T
> {}
182 /// Static initialization of a mutex. This constant can be used to initialize
183 /// other mutex constants.
184 #[unstable(feature = "static_mutex",
185 reason
= "may be merged with Mutex in the future",
187 pub const MUTEX_INIT
: StaticMutex
= StaticMutex
::new();
190 /// Creates a new mutex in an unlocked state ready for use.
191 #[stable(feature = "rust1", since = "1.0.0")]
192 pub fn new(t
: T
) -> Mutex
<T
> {
194 inner
: box StaticMutex
::new(),
195 data
: UnsafeCell
::new(t
),
200 impl<T
: ?Sized
> Mutex
<T
> {
201 /// Acquires a mutex, blocking the current thread until it is able to do so.
203 /// This function will block the local thread until it is available to acquire
204 /// the mutex. Upon returning, the thread is the only thread with the mutex
205 /// held. An RAII guard is returned to allow scoped unlock of the lock. When
206 /// the guard goes out of scope, the mutex will be unlocked.
210 /// If another user of this mutex panicked while holding the mutex, then
211 /// this call will return an error once the mutex is acquired.
212 #[stable(feature = "rust1", since = "1.0.0")]
213 pub fn lock(&self) -> LockResult
<MutexGuard
<T
>> {
215 self.inner
.lock
.lock();
216 MutexGuard
::new(&*self.inner
, &self.data
)
220 /// Attempts to acquire this lock.
222 /// If the lock could not be acquired at this time, then `Err` is returned.
223 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
224 /// guard is dropped.
226 /// This function does not block.
230 /// If another user of this mutex panicked while holding the mutex, then
231 /// this call will return failure if the mutex would otherwise be
233 #[stable(feature = "rust1", since = "1.0.0")]
234 pub fn try_lock(&self) -> TryLockResult
<MutexGuard
<T
>> {
236 if self.inner
.lock
.try_lock() {
237 Ok(MutexGuard
::new(&*self.inner
, &self.data
)?
)
239 Err(TryLockError
::WouldBlock
)
244 /// Determines whether the lock is poisoned.
246 /// If another thread is active, the lock can still become poisoned at any
247 /// time. You should not trust a `false` value for program correctness
248 /// without additional synchronization.
250 #[stable(feature = "sync_poison", since = "1.2.0")]
251 pub fn is_poisoned(&self) -> bool
{
252 self.inner
.poison
.get()
255 /// Consumes this mutex, returning the underlying data.
259 /// If another user of this mutex panicked while holding the mutex, then
260 /// this call will return an error instead.
261 #[stable(feature = "mutex_into_inner", since = "1.6.0")]
262 pub fn into_inner(self) -> LockResult
<T
> where T
: Sized
{
263 // We know statically that there are no outstanding references to
264 // `self` so there's no need to lock the inner StaticMutex.
266 // To get the inner value, we'd like to call `data.into_inner()`,
267 // but because `Mutex` impl-s `Drop`, we can't move out of it, so
268 // we'll have to destructure it manually instead.
270 // Like `let Mutex { inner, data } = self`.
271 let (inner
, data
) = {
272 let Mutex { ref inner, ref data }
= self;
273 (ptr
::read(inner
), ptr
::read(data
))
276 inner
.lock
.destroy(); // Keep in sync with the `Drop` impl.
278 poison
::map_result(inner
.poison
.borrow(), |_
| data
.into_inner())
282 /// Returns a mutable reference to the underlying data.
284 /// Since this call borrows the `Mutex` mutably, no actual locking needs to
285 /// take place---the mutable borrow statically guarantees no locks exist.
289 /// If another user of this mutex panicked while holding the mutex, then
290 /// this call will return an error instead.
291 #[stable(feature = "mutex_get_mut", since = "1.6.0")]
292 pub fn get_mut(&mut self) -> LockResult
<&mut T
> {
293 // We know statically that there are no other references to `self`, so
294 // there's no need to lock the inner StaticMutex.
295 let data
= unsafe { &mut *self.data.get() }
;
296 poison
::map_result(self.inner
.poison
.borrow(), |_
| data
)
300 #[stable(feature = "rust1", since = "1.0.0")]
301 impl<T
: ?Sized
> Drop
for Mutex
<T
> {
302 #[unsafe_destructor_blind_to_params]
304 // This is actually safe b/c we know that there is no further usage of
305 // this mutex (it's up to the user to arrange for a mutex to get
306 // dropped, that's not our job)
308 // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
309 unsafe { self.inner.lock.destroy() }
313 #[stable(feature = "rust1", since = "1.0.0")]
314 impl<T
: ?Sized
+ fmt
::Debug
> fmt
::Debug
for Mutex
<T
> {
315 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
316 match self.try_lock() {
317 Ok(guard
) => write
!(f
, "Mutex {{ data: {:?} }}", &*guard
),
318 Err(TryLockError
::Poisoned(err
)) => {
319 write
!(f
, "Mutex {{ data: Poisoned({:?}) }}", &**err
.get_ref())
321 Err(TryLockError
::WouldBlock
) => write
!(f
, "Mutex {{ <locked> }}")
326 struct Dummy(UnsafeCell
<()>);
327 unsafe impl Sync
for Dummy {}
328 static DUMMY
: Dummy
= Dummy(UnsafeCell
::new(()));
330 #[unstable(feature = "static_mutex",
331 reason
= "may be merged with Mutex in the future",
334 /// Creates a new mutex in an unlocked state ready for use.
335 pub const fn new() -> StaticMutex
{
337 lock
: sys
::Mutex
::new(),
338 poison
: poison
::Flag
::new(),
342 /// Acquires this lock, see `Mutex::lock`
344 pub fn lock(&'
static self) -> LockResult
<MutexGuard
<()>> {
347 MutexGuard
::new(self, &DUMMY
.0)
351 /// Attempts to grab this lock, see `Mutex::try_lock`
353 pub fn try_lock(&'
static self) -> TryLockResult
<MutexGuard
<()>> {
355 if self.lock
.try_lock() {
356 Ok(MutexGuard
::new(self, &DUMMY
.0)?
)
358 Err(TryLockError
::WouldBlock
)
363 /// Deallocates resources associated with this static mutex.
365 /// This method is unsafe because it provides no guarantees that there are
366 /// no active users of this mutex, and safety is not guaranteed if there are
367 /// active users of this mutex.
369 /// This method is required to ensure that there are no memory leaks on
370 /// *all* platforms. It may be the case that some platforms do not leak
371 /// memory if this method is not called, but this is not guaranteed to be
372 /// true on all platforms.
373 pub unsafe fn destroy(&'
static self) {
378 impl<'mutex
, T
: ?Sized
> MutexGuard
<'mutex
, T
> {
380 unsafe fn new(lock
: &'mutex StaticMutex
, data
: &'mutex UnsafeCell
<T
>)
381 -> LockResult
<MutexGuard
<'mutex
, T
>> {
382 poison
::map_result(lock
.poison
.borrow(), |guard
| {
385 __data
: &mut *data
.get(),
392 #[stable(feature = "rust1", since = "1.0.0")]
393 impl<'mutex
, T
: ?Sized
> Deref
for MutexGuard
<'mutex
, T
> {
396 fn deref(&self) -> &T {self.__data }
399 #[stable(feature = "rust1", since = "1.0.0")]
400 impl<'mutex
, T
: ?Sized
> DerefMut
for MutexGuard
<'mutex
, T
> {
401 fn deref_mut(&mut self) -> &mut T { self.__data }
404 #[stable(feature = "rust1", since = "1.0.0")]
405 impl<'a
, T
: ?Sized
> Drop
for MutexGuard
<'a
, T
> {
409 self.__lock
.poison
.done(&self.__poison
);
410 self.__lock
.lock
.unlock();
415 pub fn guard_lock
<'a
, T
: ?Sized
>(guard
: &MutexGuard
<'a
, T
>) -> &'a sys
::Mutex
{
419 pub fn guard_poison
<'a
, T
: ?Sized
>(guard
: &MutexGuard
<'a
, T
>) -> &'a poison
::Flag
{
427 use sync
::mpsc
::channel
;
428 use sync
::{Arc, Mutex, StaticMutex, Condvar}
;
429 use sync
::atomic
::{AtomicUsize, Ordering}
;
432 struct Packet
<T
>(Arc
<(Mutex
<T
>, Condvar
)>);
434 #[derive(Eq, PartialEq, Debug)]
437 unsafe impl<T
: Send
> Send
for Packet
<T
> {}
438 unsafe impl<T
> Sync
for Packet
<T
> {}
442 let m
= Mutex
::new(());
443 drop(m
.lock().unwrap());
444 drop(m
.lock().unwrap());
449 static M
: StaticMutex
= StaticMutex
::new();
451 drop(M
.lock().unwrap());
452 drop(M
.lock().unwrap());
459 static M
: StaticMutex
= StaticMutex
::new();
460 static mut CNT
: u32 = 0;
467 let _g
= M
.lock().unwrap();
473 let (tx
, rx
) = channel();
475 let tx2
= tx
.clone();
476 thread
::spawn(move|| { inc(); tx2.send(()).unwrap(); }
);
477 let tx2
= tx
.clone();
478 thread
::spawn(move|| { inc(); tx2.send(()).unwrap(); }
);
485 assert_eq
!(unsafe {CNT}
, J
* K
* 2);
493 let m
= Mutex
::new(());
494 *m
.try_lock().unwrap() = ();
498 fn test_into_inner() {
499 let m
= Mutex
::new(NonCopy(10));
500 assert_eq
!(m
.into_inner().unwrap(), NonCopy(10));
504 fn test_into_inner_drop() {
505 struct Foo(Arc
<AtomicUsize
>);
508 self.0.fetch_add
(1, Ordering
::SeqCst
);
511 let num_drops
= Arc
::new(AtomicUsize
::new(0));
512 let m
= Mutex
::new(Foo(num_drops
.clone()));
513 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
515 let _inner
= m
.into_inner().unwrap();
516 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
518 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 1);
522 fn test_into_inner_poison() {
523 let m
= Arc
::new(Mutex
::new(NonCopy(10)));
525 let _
= thread
::spawn(move || {
526 let _lock
= m2
.lock().unwrap();
527 panic
!("test panic in inner thread to poison mutex");
530 assert
!(m
.is_poisoned());
531 match Arc
::try_unwrap(m
).unwrap().into_inner() {
532 Err(e
) => assert_eq
!(e
.into_inner(), NonCopy(10)),
533 Ok(x
) => panic
!("into_inner of poisoned Mutex is Ok: {:?}", x
),
539 let mut m
= Mutex
::new(NonCopy(10));
540 *m
.get_mut().unwrap() = NonCopy(20);
541 assert_eq
!(m
.into_inner().unwrap(), NonCopy(20));
545 fn test_get_mut_poison() {
546 let m
= Arc
::new(Mutex
::new(NonCopy(10)));
548 let _
= thread
::spawn(move || {
549 let _lock
= m2
.lock().unwrap();
550 panic
!("test panic in inner thread to poison mutex");
553 assert
!(m
.is_poisoned());
554 match Arc
::try_unwrap(m
).unwrap().get_mut() {
555 Err(e
) => assert_eq
!(*e
.into_inner(), NonCopy(10)),
556 Ok(x
) => panic
!("get_mut of poisoned Mutex is Ok: {:?}", x
),
561 fn test_mutex_arc_condvar() {
562 let packet
= Packet(Arc
::new((Mutex
::new(false), Condvar
::new())));
563 let packet2
= Packet(packet
.0.clone());
564 let (tx
, rx
) = channel();
565 let _t
= thread
::spawn(move|| {
566 // wait until parent gets in
568 let &(ref lock
, ref cvar
) = &*packet2
.0
;
569 let mut lock
= lock
.lock().unwrap();
574 let &(ref lock
, ref cvar
) = &*packet
.0;
575 let mut lock
= lock
.lock().unwrap();
576 tx
.send(()).unwrap();
579 lock
= cvar
.wait(lock
).unwrap();
584 fn test_arc_condvar_poison() {
585 let packet
= Packet(Arc
::new((Mutex
::new(1), Condvar
::new())));
586 let packet2
= Packet(packet
.0.clone());
587 let (tx
, rx
) = channel();
589 let _t
= thread
::spawn(move || -> () {
591 let &(ref lock
, ref cvar
) = &*packet2
.0
;
592 let _g
= lock
.lock().unwrap();
594 // Parent should fail when it wakes up.
598 let &(ref lock
, ref cvar
) = &*packet
.0;
599 let mut lock
= lock
.lock().unwrap();
600 tx
.send(()).unwrap();
602 match cvar
.wait(lock
) {
605 assert_eq
!(*lock
, 1);
613 fn test_mutex_arc_poison() {
614 let arc
= Arc
::new(Mutex
::new(1));
615 assert
!(!arc
.is_poisoned());
616 let arc2
= arc
.clone();
617 let _
= thread
::spawn(move|| {
618 let lock
= arc2
.lock().unwrap();
619 assert_eq
!(*lock
, 2);
621 assert
!(arc
.lock().is_err());
622 assert
!(arc
.is_poisoned());
626 fn test_mutex_arc_nested() {
627 // Tests nested mutexes and access
628 // to underlying data.
629 let arc
= Arc
::new(Mutex
::new(1));
630 let arc2
= Arc
::new(Mutex
::new(arc
));
631 let (tx
, rx
) = channel();
632 let _t
= thread
::spawn(move|| {
633 let lock
= arc2
.lock().unwrap();
634 let lock2
= lock
.lock().unwrap();
635 assert_eq
!(*lock2
, 1);
636 tx
.send(()).unwrap();
642 fn test_mutex_arc_access_in_unwind() {
643 let arc
= Arc
::new(Mutex
::new(1));
644 let arc2
= arc
.clone();
645 let _
= thread
::spawn(move|| -> () {
649 impl Drop
for Unwinder
{
651 *self.i
.lock().unwrap() += 1;
654 let _u
= Unwinder { i: arc2 }
;
657 let lock
= arc
.lock().unwrap();
658 assert_eq
!(*lock
, 2);
662 fn test_mutex_unsized() {
663 let mutex
: &Mutex
<[i32]> = &Mutex
::new([1, 2, 3]);
665 let b
= &mut *mutex
.lock().unwrap();
669 let comp
: &[i32] = &[4, 2, 5];
670 assert_eq
!(&*mutex
.lock().unwrap(), comp
);