1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
15 use ops
::{Deref, DerefMut}
;
17 use sys_common
::mutex
as sys
;
18 use sys_common
::poison
::{self, TryLockError, TryLockResult, LockResult}
;
20 /// A mutual exclusion primitive useful for protecting shared data
22 /// This mutex will block threads waiting for the lock to become available. The
23 /// mutex can also be statically initialized or created via a `new`
24 /// constructor. Each mutex has a type parameter which represents the data that
25 /// it is protecting. The data can only be accessed through the RAII guards
26 /// returned from `lock` and `try_lock`, which guarantees that the data is only
27 /// ever accessed when the mutex is locked.
31 /// The mutexes in this module implement a strategy called "poisoning" where a
32 /// mutex is considered poisoned whenever a thread panics while holding the
33 /// lock. Once a mutex is poisoned, all other threads are unable to access the
34 /// data by default as it is likely tainted (some invariant is not being
37 /// For a mutex, this means that the `lock` and `try_lock` methods return a
38 /// `Result` which indicates whether a mutex has been poisoned or not. Most
39 /// usage of a mutex will simply `unwrap()` these results, propagating panics
40 /// among threads to ensure that a possibly invalid invariant is not witnessed.
42 /// A poisoned mutex, however, does not prevent all access to the underlying
43 /// data. The `PoisonError` type has an `into_inner` method which will return
44 /// the guard that would have otherwise been returned on a successful lock. This
45 /// allows access to the data, despite the lock being poisoned.
50 /// use std::sync::{Arc, Mutex};
52 /// use std::sync::mpsc::channel;
54 /// const N: usize = 10;
56 /// // Spawn a few threads to increment a shared variable (non-atomically), and
57 /// // let the main thread know once all increments are done.
59 /// // Here we're using an Arc to share memory among threads, and the data inside
60 /// // the Arc is protected with a mutex.
61 /// let data = Arc::new(Mutex::new(0));
63 /// let (tx, rx) = channel();
65 /// let (data, tx) = (data.clone(), tx.clone());
66 /// thread::spawn(move || {
67 /// // The shared state can only be accessed once the lock is held.
68 /// // Our non-atomic increment is safe because we're the only thread
69 /// // which can access the shared state when the lock is held.
71 /// // We unwrap() the return value to assert that we are not expecting
72 /// // threads to ever fail while holding the lock.
73 /// let mut data = data.lock().unwrap();
76 /// tx.send(()).unwrap();
78 /// // the lock is unlocked here when `data` goes out of scope.
82 /// rx.recv().unwrap();
85 /// To recover from a poisoned mutex:
88 /// use std::sync::{Arc, Mutex};
91 /// let lock = Arc::new(Mutex::new(0_u32));
92 /// let lock2 = lock.clone();
94 /// let _ = thread::spawn(move || -> () {
95 /// // This thread will acquire the mutex first, unwrapping the result of
96 /// // `lock` because the lock has not been poisoned.
97 /// let _guard = lock2.lock().unwrap();
99 /// // This panic while holding the lock (`_guard` is in scope) will poison
104 /// // The lock is poisoned by this point, but the returned result can be
105 /// // pattern matched on to return the underlying guard on both branches.
106 /// let mut guard = match lock.lock() {
107 /// Ok(guard) => guard,
108 /// Err(poisoned) => poisoned.into_inner(),
113 #[stable(feature = "rust1", since = "1.0.0")]
114 pub struct Mutex
<T
: ?Sized
> {
115 // Note that this mutex is in a *box*, not inlined into the struct itself.
116 // Once a native mutex has been used once, its address can never change (it
117 // can't be moved). This mutex type can be safely moved at any time, so to
118 // ensure that the native mutex is used correctly we box the inner lock to
119 // give it a constant address.
120 inner
: Box
<sys
::Mutex
>,
121 poison
: poison
::Flag
,
125 // these are the only places where `T: Send` matters; all other
126 // functionality works fine on a single thread.
127 #[stable(feature = "rust1", since = "1.0.0")]
128 unsafe impl<T
: ?Sized
+ Send
> Send
for Mutex
<T
> { }
129 #[stable(feature = "rust1", since = "1.0.0")]
130 unsafe impl<T
: ?Sized
+ Send
> Sync
for Mutex
<T
> { }
132 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
133 /// dropped (falls out of scope), the lock will be unlocked.
135 /// The data protected by the mutex can be access through this guard via its
136 /// `Deref` and `DerefMut` implementations
138 #[stable(feature = "rust1", since = "1.0.0")]
139 pub struct MutexGuard
<'a
, T
: ?Sized
+ 'a
> {
140 // funny underscores due to how Deref/DerefMut currently work (they
141 // disregard field privacy).
142 __lock
: &'a Mutex
<T
>,
143 __poison
: poison
::Guard
,
146 #[stable(feature = "rust1", since = "1.0.0")]
147 impl<'a
, T
: ?Sized
> !marker
::Send
for MutexGuard
<'a
, T
> {}
150 /// Creates a new mutex in an unlocked state ready for use.
151 #[stable(feature = "rust1", since = "1.0.0")]
152 pub fn new(t
: T
) -> Mutex
<T
> {
154 inner
: box sys
::Mutex
::new(),
155 poison
: poison
::Flag
::new(),
156 data
: UnsafeCell
::new(t
),
165 impl<T
: ?Sized
> Mutex
<T
> {
166 /// Acquires a mutex, blocking the current thread until it is able to do so.
168 /// This function will block the local thread until it is available to acquire
169 /// the mutex. Upon returning, the thread is the only thread with the mutex
170 /// held. An RAII guard is returned to allow scoped unlock of the lock. When
171 /// the guard goes out of scope, the mutex will be unlocked.
173 /// The exact behavior on locking a mutex in the thread which already holds
174 /// the lock is left unspecified. However, this function will not return on
175 /// the second call (it might panic or deadlock, for example).
179 /// If another user of this mutex panicked while holding the mutex, then
180 /// this call will return an error once the mutex is acquired.
184 /// This function might panic when called if the lock is already held by
185 /// the current thread.
186 #[stable(feature = "rust1", since = "1.0.0")]
187 pub fn lock(&self) -> LockResult
<MutexGuard
<T
>> {
190 MutexGuard
::new(self)
194 /// Attempts to acquire this lock.
196 /// If the lock could not be acquired at this time, then `Err` is returned.
197 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
198 /// guard is dropped.
200 /// This function does not block.
204 /// If another user of this mutex panicked while holding the mutex, then
205 /// this call will return failure if the mutex would otherwise be
207 #[stable(feature = "rust1", since = "1.0.0")]
208 pub fn try_lock(&self) -> TryLockResult
<MutexGuard
<T
>> {
210 if self.inner
.try_lock() {
211 Ok(MutexGuard
::new(self)?
)
213 Err(TryLockError
::WouldBlock
)
218 /// Determines whether the lock is poisoned.
220 /// If another thread is active, the lock can still become poisoned at any
221 /// time. You should not trust a `false` value for program correctness
222 /// without additional synchronization.
224 #[stable(feature = "sync_poison", since = "1.2.0")]
225 pub fn is_poisoned(&self) -> bool
{
229 /// Consumes this mutex, returning the underlying data.
233 /// If another user of this mutex panicked while holding the mutex, then
234 /// this call will return an error instead.
235 #[stable(feature = "mutex_into_inner", since = "1.6.0")]
236 pub fn into_inner(self) -> LockResult
<T
> where T
: Sized
{
237 // We know statically that there are no outstanding references to
238 // `self` so there's no need to lock the inner lock.
240 // To get the inner value, we'd like to call `data.into_inner()`,
241 // but because `Mutex` impl-s `Drop`, we can't move out of it, so
242 // we'll have to destructure it manually instead.
244 // Like `let Mutex { inner, poison, data } = self`.
245 let (inner
, poison
, data
) = {
246 let Mutex { ref inner, ref poison, ref data }
= self;
247 (ptr
::read(inner
), ptr
::read(poison
), ptr
::read(data
))
250 inner
.destroy(); // Keep in sync with the `Drop` impl.
253 poison
::map_result(poison
.borrow(), |_
| data
.into_inner())
257 /// Returns a mutable reference to the underlying data.
259 /// Since this call borrows the `Mutex` mutably, no actual locking needs to
260 /// take place---the mutable borrow statically guarantees no locks exist.
264 /// If another user of this mutex panicked while holding the mutex, then
265 /// this call will return an error instead.
266 #[stable(feature = "mutex_get_mut", since = "1.6.0")]
267 pub fn get_mut(&mut self) -> LockResult
<&mut T
> {
268 // We know statically that there are no other references to `self`, so
269 // there's no need to lock the inner lock.
270 let data
= unsafe { &mut *self.data.get() }
;
271 poison
::map_result(self.poison
.borrow(), |_
| data
)
275 #[stable(feature = "rust1", since = "1.0.0")]
276 impl<T
: ?Sized
> Drop
for Mutex
<T
> {
277 #[unsafe_destructor_blind_to_params]
279 // This is actually safe b/c we know that there is no further usage of
280 // this mutex (it's up to the user to arrange for a mutex to get
281 // dropped, that's not our job)
283 // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
284 unsafe { self.inner.destroy() }
288 #[stable(feature = "mutex_default", since = "1.9.0")]
289 impl<T
: ?Sized
+ Default
> Default
for Mutex
<T
> {
290 /// Creates a `Mutex<T>`, with the `Default` value for T.
291 fn default() -> Mutex
<T
> {
292 Mutex
::new(Default
::default())
296 #[stable(feature = "rust1", since = "1.0.0")]
297 impl<T
: ?Sized
+ fmt
::Debug
> fmt
::Debug
for Mutex
<T
> {
298 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
299 match self.try_lock() {
300 Ok(guard
) => write
!(f
, "Mutex {{ data: {:?} }}", &*guard
),
301 Err(TryLockError
::Poisoned(err
)) => {
302 write
!(f
, "Mutex {{ data: Poisoned({:?}) }}", &**err
.get_ref())
304 Err(TryLockError
::WouldBlock
) => write
!(f
, "Mutex {{ <locked> }}")
309 impl<'mutex
, T
: ?Sized
> MutexGuard
<'mutex
, T
> {
310 unsafe fn new(lock
: &'mutex Mutex
<T
>) -> LockResult
<MutexGuard
<'mutex
, T
>> {
311 poison
::map_result(lock
.poison
.borrow(), |guard
| {
320 #[stable(feature = "rust1", since = "1.0.0")]
321 impl<'mutex
, T
: ?Sized
> Deref
for MutexGuard
<'mutex
, T
> {
324 fn deref(&self) -> &T
{
325 unsafe { &*self.__lock.data.get() }
329 #[stable(feature = "rust1", since = "1.0.0")]
330 impl<'mutex
, T
: ?Sized
> DerefMut
for MutexGuard
<'mutex
, T
> {
331 fn deref_mut(&mut self) -> &mut T
{
332 unsafe { &mut *self.__lock.data.get() }
336 #[stable(feature = "rust1", since = "1.0.0")]
337 impl<'a
, T
: ?Sized
> Drop
for MutexGuard
<'a
, T
> {
341 self.__lock
.poison
.done(&self.__poison
);
342 self.__lock
.inner
.unlock();
347 pub fn guard_lock
<'a
, T
: ?Sized
>(guard
: &MutexGuard
<'a
, T
>) -> &'a sys
::Mutex
{
351 pub fn guard_poison
<'a
, T
: ?Sized
>(guard
: &MutexGuard
<'a
, T
>) -> &'a poison
::Flag
{
355 #[cfg(all(test, not(target_os = "emscripten")))]
357 use sync
::mpsc
::channel
;
358 use sync
::{Arc, Mutex, Condvar}
;
359 use sync
::atomic
::{AtomicUsize, Ordering}
;
362 struct Packet
<T
>(Arc
<(Mutex
<T
>, Condvar
)>);
364 #[derive(Eq, PartialEq, Debug)]
367 unsafe impl<T
: Send
> Send
for Packet
<T
> {}
368 unsafe impl<T
> Sync
for Packet
<T
> {}
372 let m
= Mutex
::new(());
373 drop(m
.lock().unwrap());
374 drop(m
.lock().unwrap());
382 let m
= Arc
::new(Mutex
::new(0));
384 fn inc(m
: &Mutex
<u32>) {
386 *m
.lock().unwrap() += 1;
390 let (tx
, rx
) = channel();
392 let tx2
= tx
.clone();
394 thread
::spawn(move|| { inc(&m2); tx2.send(()).unwrap(); }
);
395 let tx2
= tx
.clone();
397 thread
::spawn(move|| { inc(&m2); tx2.send(()).unwrap(); }
);
404 assert_eq
!(*m
.lock().unwrap(), J
* K
* 2);
409 let m
= Mutex
::new(());
410 *m
.try_lock().unwrap() = ();
414 fn test_into_inner() {
415 let m
= Mutex
::new(NonCopy(10));
416 assert_eq
!(m
.into_inner().unwrap(), NonCopy(10));
420 fn test_into_inner_drop() {
421 struct Foo(Arc
<AtomicUsize
>);
424 self.0.fetch_add
(1, Ordering
::SeqCst
);
427 let num_drops
= Arc
::new(AtomicUsize
::new(0));
428 let m
= Mutex
::new(Foo(num_drops
.clone()));
429 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
431 let _inner
= m
.into_inner().unwrap();
432 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
434 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 1);
438 fn test_into_inner_poison() {
439 let m
= Arc
::new(Mutex
::new(NonCopy(10)));
441 let _
= thread
::spawn(move || {
442 let _lock
= m2
.lock().unwrap();
443 panic
!("test panic in inner thread to poison mutex");
446 assert
!(m
.is_poisoned());
447 match Arc
::try_unwrap(m
).unwrap().into_inner() {
448 Err(e
) => assert_eq
!(e
.into_inner(), NonCopy(10)),
449 Ok(x
) => panic
!("into_inner of poisoned Mutex is Ok: {:?}", x
),
455 let mut m
= Mutex
::new(NonCopy(10));
456 *m
.get_mut().unwrap() = NonCopy(20);
457 assert_eq
!(m
.into_inner().unwrap(), NonCopy(20));
461 fn test_get_mut_poison() {
462 let m
= Arc
::new(Mutex
::new(NonCopy(10)));
464 let _
= thread
::spawn(move || {
465 let _lock
= m2
.lock().unwrap();
466 panic
!("test panic in inner thread to poison mutex");
469 assert
!(m
.is_poisoned());
470 match Arc
::try_unwrap(m
).unwrap().get_mut() {
471 Err(e
) => assert_eq
!(*e
.into_inner(), NonCopy(10)),
472 Ok(x
) => panic
!("get_mut of poisoned Mutex is Ok: {:?}", x
),
477 fn test_mutex_arc_condvar() {
478 let packet
= Packet(Arc
::new((Mutex
::new(false), Condvar
::new())));
479 let packet2
= Packet(packet
.0.clone());
480 let (tx
, rx
) = channel();
481 let _t
= thread
::spawn(move|| {
482 // wait until parent gets in
484 let &(ref lock
, ref cvar
) = &*packet2
.0
;
485 let mut lock
= lock
.lock().unwrap();
490 let &(ref lock
, ref cvar
) = &*packet
.0;
491 let mut lock
= lock
.lock().unwrap();
492 tx
.send(()).unwrap();
495 lock
= cvar
.wait(lock
).unwrap();
500 fn test_arc_condvar_poison() {
501 let packet
= Packet(Arc
::new((Mutex
::new(1), Condvar
::new())));
502 let packet2
= Packet(packet
.0.clone());
503 let (tx
, rx
) = channel();
505 let _t
= thread
::spawn(move || -> () {
507 let &(ref lock
, ref cvar
) = &*packet2
.0
;
508 let _g
= lock
.lock().unwrap();
510 // Parent should fail when it wakes up.
514 let &(ref lock
, ref cvar
) = &*packet
.0;
515 let mut lock
= lock
.lock().unwrap();
516 tx
.send(()).unwrap();
518 match cvar
.wait(lock
) {
521 assert_eq
!(*lock
, 1);
529 fn test_mutex_arc_poison() {
530 let arc
= Arc
::new(Mutex
::new(1));
531 assert
!(!arc
.is_poisoned());
532 let arc2
= arc
.clone();
533 let _
= thread
::spawn(move|| {
534 let lock
= arc2
.lock().unwrap();
535 assert_eq
!(*lock
, 2);
537 assert
!(arc
.lock().is_err());
538 assert
!(arc
.is_poisoned());
542 fn test_mutex_arc_nested() {
543 // Tests nested mutexes and access
544 // to underlying data.
545 let arc
= Arc
::new(Mutex
::new(1));
546 let arc2
= Arc
::new(Mutex
::new(arc
));
547 let (tx
, rx
) = channel();
548 let _t
= thread
::spawn(move|| {
549 let lock
= arc2
.lock().unwrap();
550 let lock2
= lock
.lock().unwrap();
551 assert_eq
!(*lock2
, 1);
552 tx
.send(()).unwrap();
558 fn test_mutex_arc_access_in_unwind() {
559 let arc
= Arc
::new(Mutex
::new(1));
560 let arc2
= arc
.clone();
561 let _
= thread
::spawn(move|| -> () {
565 impl Drop
for Unwinder
{
567 *self.i
.lock().unwrap() += 1;
570 let _u
= Unwinder { i: arc2 }
;
573 let lock
= arc
.lock().unwrap();
574 assert_eq
!(*lock
, 2);
578 fn test_mutex_unsized() {
579 let mutex
: &Mutex
<[i32]> = &Mutex
::new([1, 2, 3]);
581 let b
= &mut *mutex
.lock().unwrap();
585 let comp
: &[i32] = &[4, 2, 5];
586 assert_eq
!(&*mutex
.lock().unwrap(), comp
);