1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use ops
::{Deref, DerefMut}
;
19 use sys_common
::poison
::{self, LockResult, TryLockError, TryLockResult}
;
20 use sys_common
::rwlock
as sys
;
22 /// A reader-writer lock
24 /// This type of lock allows a number of readers or at most one writer at any
25 /// point in time. The write portion of this lock typically allows modification
26 /// of the underlying data (exclusive access) and the read portion of this lock
27 /// typically allows for read-only access (shared access).
29 /// The priority policy of the lock is dependent on the underlying operating
30 /// system's implementation, and this type does not guarantee that any
31 /// particular policy will be used.
33 /// The type parameter `T` represents the data that this lock protects. It is
34 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
35 /// allow concurrent access through readers. The RAII guards returned from the
36 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
37 /// to allow access to the contained of the lock.
41 /// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
42 /// an RwLock may only be poisoned if a panic occurs while it is locked
43 /// exclusively (write mode). If a panic occurs in any reader, then the lock
44 /// will not be poisoned.
49 /// use std::sync::RwLock;
51 /// let lock = RwLock::new(5);
53 /// // many reader locks can be held at once
55 /// let r1 = lock.read().unwrap();
56 /// let r2 = lock.read().unwrap();
57 /// assert_eq!(*r1, 5);
58 /// assert_eq!(*r2, 5);
59 /// } // read locks are dropped at this point
61 /// // only one write lock may be held, however
63 /// let mut w = lock.write().unwrap();
65 /// assert_eq!(*w, 6);
66 /// } // write lock is dropped here
68 #[stable(feature = "rust1", since = "1.0.0")]
69 pub struct RwLock
<T
: ?Sized
> {
70 inner
: Box
<StaticRwLock
>,
74 unsafe impl<T
: ?Sized
+ Send
+ Sync
> Send
for RwLock
<T
> {}
75 unsafe impl<T
: ?Sized
+ Send
+ Sync
> Sync
for RwLock
<T
> {}
77 /// Structure representing a statically allocated RwLock.
79 /// This structure is intended to be used inside of a `static` and will provide
80 /// automatic global access as well as lazy initialization. The internal
81 /// resources of this RwLock, however, must be manually deallocated.
86 /// #![feature(static_rwlock)]
88 /// use std::sync::{StaticRwLock, RW_LOCK_INIT};
90 /// static LOCK: StaticRwLock = RW_LOCK_INIT;
93 /// let _g = LOCK.read().unwrap();
94 /// // ... shared read access
97 /// let _g = LOCK.write().unwrap();
98 /// // ... exclusive write access
100 /// unsafe { LOCK.destroy() } // free all resources
102 #[unstable(feature = "static_rwlock",
103 reason
= "may be merged with RwLock in the future",
105 pub struct StaticRwLock
{
107 poison
: poison
::Flag
,
110 /// Constant initialization for a statically-initialized rwlock.
111 #[unstable(feature = "static_rwlock",
112 reason
= "may be merged with RwLock in the future",
114 pub const RW_LOCK_INIT
: StaticRwLock
= StaticRwLock
::new();
116 /// RAII structure used to release the shared read access of a lock when
119 #[stable(feature = "rust1", since = "1.0.0")]
120 pub struct RwLockReadGuard
<'a
, T
: ?Sized
+ 'a
> {
121 __lock
: &'a StaticRwLock
,
122 __data
: &'a UnsafeCell
<T
>,
125 impl<'a
, T
: ?Sized
> !marker
::Send
for RwLockReadGuard
<'a
, T
> {}
127 /// RAII structure used to release the exclusive write access of a lock when
130 #[stable(feature = "rust1", since = "1.0.0")]
131 pub struct RwLockWriteGuard
<'a
, T
: ?Sized
+ 'a
> {
132 __lock
: &'a StaticRwLock
,
133 __data
: &'a UnsafeCell
<T
>,
134 __poison
: poison
::Guard
,
137 impl<'a
, T
: ?Sized
> !marker
::Send
for RwLockWriteGuard
<'a
, T
> {}
140 /// Creates a new instance of an `RwLock<T>` which is unlocked.
145 /// use std::sync::RwLock;
147 /// let lock = RwLock::new(5);
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub fn new(t
: T
) -> RwLock
<T
> {
151 RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
155 impl<T
: ?Sized
> RwLock
<T
> {
156 /// Locks this rwlock with shared read access, blocking the current thread
157 /// until it can be acquired.
159 /// The calling thread will be blocked until there are no more writers which
160 /// hold the lock. There may be other readers currently inside the lock when
161 /// this method returns. This method does not provide any guarantees with
162 /// respect to the ordering of whether contentious readers or writers will
163 /// acquire the lock first.
165 /// Returns an RAII guard which will release this thread's shared access
166 /// once it is dropped.
170 /// This function will return an error if the RwLock is poisoned. An RwLock
171 /// is poisoned whenever a writer panics while holding an exclusive lock.
172 /// The failure will occur immediately after the lock has been acquired.
174 #[stable(feature = "rust1", since = "1.0.0")]
175 pub fn read(&self) -> LockResult
<RwLockReadGuard
<T
>> {
176 unsafe { self.inner.lock.read() }
177 RwLockReadGuard
::new(&*self.inner
, &self.data
)
180 /// Attempts to acquire this rwlock with shared read access.
182 /// If the access could not be granted at this time, then `Err` is returned.
183 /// Otherwise, an RAII guard is returned which will release the shared access
184 /// when it is dropped.
186 /// This function does not block.
188 /// This function does not provide any guarantees with respect to the ordering
189 /// of whether contentious readers or writers will acquire the lock first.
193 /// This function will return an error if the RwLock is poisoned. An RwLock
194 /// is poisoned whenever a writer panics while holding an exclusive lock. An
195 /// error will only be returned if the lock would have otherwise been
198 #[stable(feature = "rust1", since = "1.0.0")]
199 pub fn try_read(&self) -> TryLockResult
<RwLockReadGuard
<T
>> {
200 if unsafe { self.inner.lock.try_read() }
{
201 Ok(try
!(RwLockReadGuard
::new(&*self.inner
, &self.data
)))
203 Err(TryLockError
::WouldBlock
)
207 /// Locks this rwlock with exclusive write access, blocking the current
208 /// thread until it can be acquired.
210 /// This function will not return while other writers or other readers
211 /// currently have access to the lock.
213 /// Returns an RAII guard which will drop the write access of this rwlock
218 /// This function will return an error if the RwLock is poisoned. An RwLock
219 /// is poisoned whenever a writer panics while holding an exclusive lock.
220 /// An error will be returned when the lock is acquired.
222 #[stable(feature = "rust1", since = "1.0.0")]
223 pub fn write(&self) -> LockResult
<RwLockWriteGuard
<T
>> {
224 unsafe { self.inner.lock.write() }
225 RwLockWriteGuard
::new(&*self.inner
, &self.data
)
228 /// Attempts to lock this rwlock with exclusive write access.
230 /// If the lock could not be acquired at this time, then `Err` is returned.
231 /// Otherwise, an RAII guard is returned which will release the lock when
234 /// This function does not block.
236 /// This function does not provide any guarantees with respect to the ordering
237 /// of whether contentious readers or writers will acquire the lock first.
241 /// This function will return an error if the RwLock is poisoned. An RwLock
242 /// is poisoned whenever a writer panics while holding an exclusive lock. An
243 /// error will only be returned if the lock would have otherwise been
246 #[stable(feature = "rust1", since = "1.0.0")]
247 pub fn try_write(&self) -> TryLockResult
<RwLockWriteGuard
<T
>> {
248 if unsafe { self.inner.lock.try_write() }
{
249 Ok(try
!(RwLockWriteGuard
::new(&*self.inner
, &self.data
)))
251 Err(TryLockError
::WouldBlock
)
255 /// Determines whether the lock is poisoned.
257 /// If another thread is active, the lock can still become poisoned at any
258 /// time. You should not trust a `false` value for program correctness
259 /// without additional synchronization.
261 #[stable(feature = "sync_poison", since = "1.2.0")]
262 pub fn is_poisoned(&self) -> bool
{
263 self.inner
.poison
.get()
266 /// Consumes this `RwLock`, returning the underlying data.
270 /// This function will return an error if the RwLock is poisoned. An RwLock
271 /// is poisoned whenever a writer panics while holding an exclusive lock. An
272 /// error will only be returned if the lock would have otherwise been
274 #[unstable(feature = "rwlock_into_inner", reason = "recently added", issue = "28968")]
275 pub fn into_inner(self) -> LockResult
<T
> where T
: Sized
{
276 // We know statically that there are no outstanding references to
277 // `self` so there's no need to lock the inner StaticRwLock.
279 // To get the inner value, we'd like to call `data.into_inner()`,
280 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
281 // we'll have to destructure it manually instead.
283 // Like `let RwLock { inner, data } = self`.
284 let (inner
, data
) = {
285 let RwLock { ref inner, ref data }
= self;
286 (ptr
::read(inner
), ptr
::read(data
))
289 inner
.lock
.destroy(); // Keep in sync with the `Drop` impl.
291 poison
::map_result(inner
.poison
.borrow(), |_
| data
.into_inner())
295 /// Returns a mutable reference to the underlying data.
297 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
298 /// take place---the mutable borrow statically guarantees no locks exist.
302 /// This function will return an error if the RwLock is poisoned. An RwLock
303 /// is poisoned whenever a writer panics while holding an exclusive lock. An
304 /// error will only be returned if the lock would have otherwise been
306 #[unstable(feature = "rwlock_get_mut", reason = "recently added", issue = "28968")]
307 pub fn get_mut(&mut self) -> LockResult
<&mut T
> {
308 // We know statically that there are no other references to `self`, so
309 // there's no need to lock the inner StaticRwLock.
310 let data
= unsafe { &mut *self.data.get() }
;
311 poison
::map_result(self.inner
.poison
.borrow(), |_
| data
)
315 #[stable(feature = "rust1", since = "1.0.0")]
316 impl<T
: ?Sized
> Drop
for RwLock
<T
> {
317 #[unsafe_destructor_blind_to_params]
319 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
320 unsafe { self.inner.lock.destroy() }
324 #[stable(feature = "rust1", since = "1.0.0")]
325 impl<T
: ?Sized
+ fmt
::Debug
> fmt
::Debug
for RwLock
<T
> {
326 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
327 match self.try_read() {
328 Ok(guard
) => write
!(f
, "RwLock {{ data: {:?} }}", &*guard
),
329 Err(TryLockError
::Poisoned(err
)) => {
330 write
!(f
, "RwLock {{ data: Poisoned({:?}) }}", &**err
.get_ref())
332 Err(TryLockError
::WouldBlock
) => write
!(f
, "RwLock {{ <locked> }}")
337 struct Dummy(UnsafeCell
<()>);
338 unsafe impl Sync
for Dummy {}
339 static DUMMY
: Dummy
= Dummy(UnsafeCell
::new(()));
341 #[unstable(feature = "static_rwlock",
342 reason
= "may be merged with RwLock in the future",
345 /// Creates a new rwlock.
346 pub const fn new() -> StaticRwLock
{
348 lock
: sys
::RWLock
::new(),
349 poison
: poison
::Flag
::new(),
353 /// Locks this rwlock with shared read access, blocking the current thread
354 /// until it can be acquired.
356 /// See `RwLock::read`.
358 pub fn read(&'
static self) -> LockResult
<RwLockReadGuard
<'
static, ()>> {
359 unsafe { self.lock.read() }
360 RwLockReadGuard
::new(self, &DUMMY
.0)
363 /// Attempts to acquire this lock with shared read access.
365 /// See `RwLock::try_read`.
367 pub fn try_read(&'
static self)
368 -> TryLockResult
<RwLockReadGuard
<'
static, ()>> {
369 if unsafe { self.lock.try_read() }
{
370 Ok(try
!(RwLockReadGuard
::new(self, &DUMMY
.0)))
372 Err(TryLockError
::WouldBlock
)
376 /// Locks this rwlock with exclusive write access, blocking the current
377 /// thread until it can be acquired.
379 /// See `RwLock::write`.
381 pub fn write(&'
static self) -> LockResult
<RwLockWriteGuard
<'
static, ()>> {
382 unsafe { self.lock.write() }
383 RwLockWriteGuard
::new(self, &DUMMY
.0)
386 /// Attempts to lock this rwlock with exclusive write access.
388 /// See `RwLock::try_write`.
390 pub fn try_write(&'
static self)
391 -> TryLockResult
<RwLockWriteGuard
<'
static, ()>> {
392 if unsafe { self.lock.try_write() }
{
393 Ok(try
!(RwLockWriteGuard
::new(self, &DUMMY
.0)))
395 Err(TryLockError
::WouldBlock
)
399 /// Deallocates all resources associated with this static lock.
401 /// This method is unsafe to call as there is no guarantee that there are no
402 /// active users of the lock, and this also doesn't prevent any future users
403 /// of this lock. This method is required to be called to not leak memory on
405 pub unsafe fn destroy(&'
static self) {
410 impl<'rwlock
, T
: ?Sized
> RwLockReadGuard
<'rwlock
, T
> {
411 fn new(lock
: &'rwlock StaticRwLock
, data
: &'rwlock UnsafeCell
<T
>)
412 -> LockResult
<RwLockReadGuard
<'rwlock
, T
>> {
413 poison
::map_result(lock
.poison
.borrow(), |_
| {
422 impl<'rwlock
, T
: ?Sized
> RwLockWriteGuard
<'rwlock
, T
> {
423 fn new(lock
: &'rwlock StaticRwLock
, data
: &'rwlock UnsafeCell
<T
>)
424 -> LockResult
<RwLockWriteGuard
<'rwlock
, T
>> {
425 poison
::map_result(lock
.poison
.borrow(), |guard
| {
435 #[stable(feature = "rust1", since = "1.0.0")]
436 impl<'rwlock
, T
: ?Sized
> Deref
for RwLockReadGuard
<'rwlock
, T
> {
439 fn deref(&self) -> &T { unsafe { &*self.__data.get() }
}
442 #[stable(feature = "rust1", since = "1.0.0")]
443 impl<'rwlock
, T
: ?Sized
> Deref
for RwLockWriteGuard
<'rwlock
, T
> {
446 fn deref(&self) -> &T { unsafe { &*self.__data.get() }
}
449 #[stable(feature = "rust1", since = "1.0.0")]
450 impl<'rwlock
, T
: ?Sized
> DerefMut
for RwLockWriteGuard
<'rwlock
, T
> {
451 fn deref_mut(&mut self) -> &mut T
{
452 unsafe { &mut *self.__data.get() }
456 #[stable(feature = "rust1", since = "1.0.0")]
457 impl<'a
, T
: ?Sized
> Drop
for RwLockReadGuard
<'a
, T
> {
459 unsafe { self.__lock.lock.read_unlock(); }
463 #[stable(feature = "rust1", since = "1.0.0")]
464 impl<'a
, T
: ?Sized
> Drop
for RwLockWriteGuard
<'a
, T
> {
466 self.__lock
.poison
.done(&self.__poison
);
467 unsafe { self.__lock.lock.write_unlock(); }
473 #![allow(deprecated)] // rand
477 use rand
::{self, Rng}
;
478 use sync
::mpsc
::channel
;
480 use sync
::{Arc, RwLock, StaticRwLock, TryLockError}
;
481 use sync
::atomic
::{AtomicUsize, Ordering}
;
483 #[derive(Eq, PartialEq, Debug)]
488 let l
= RwLock
::new(());
489 drop(l
.read().unwrap());
490 drop(l
.write().unwrap());
491 drop((l
.read().unwrap(), l
.read().unwrap()));
492 drop(l
.write().unwrap());
497 static R
: StaticRwLock
= StaticRwLock
::new();
498 drop(R
.read().unwrap());
499 drop(R
.write().unwrap());
500 drop((R
.read().unwrap(), R
.read().unwrap()));
501 drop(R
.write().unwrap());
502 unsafe { R.destroy(); }
507 static R
: StaticRwLock
= StaticRwLock
::new();
509 const M
: usize = 1000;
511 let (tx
, rx
) = channel
::<()>();
514 thread
::spawn(move|| {
515 let mut rng
= rand
::thread_rng();
517 if rng
.gen_weighted_bool(N
) {
518 drop(R
.write().unwrap());
520 drop(R
.read().unwrap());
528 unsafe { R.destroy(); }
532 fn test_rw_arc_poison_wr() {
533 let arc
= Arc
::new(RwLock
::new(1));
534 let arc2
= arc
.clone();
535 let _
: Result
<(), _
> = thread
::spawn(move|| {
536 let _lock
= arc2
.write().unwrap();
539 assert
!(arc
.read().is_err());
543 fn test_rw_arc_poison_ww() {
544 let arc
= Arc
::new(RwLock
::new(1));
545 assert
!(!arc
.is_poisoned());
546 let arc2
= arc
.clone();
547 let _
: Result
<(), _
> = thread
::spawn(move|| {
548 let _lock
= arc2
.write().unwrap();
551 assert
!(arc
.write().is_err());
552 assert
!(arc
.is_poisoned());
556 fn test_rw_arc_no_poison_rr() {
557 let arc
= Arc
::new(RwLock
::new(1));
558 let arc2
= arc
.clone();
559 let _
: Result
<(), _
> = thread
::spawn(move|| {
560 let _lock
= arc2
.read().unwrap();
563 let lock
= arc
.read().unwrap();
564 assert_eq
!(*lock
, 1);
567 fn test_rw_arc_no_poison_rw() {
568 let arc
= Arc
::new(RwLock
::new(1));
569 let arc2
= arc
.clone();
570 let _
: Result
<(), _
> = thread
::spawn(move|| {
571 let _lock
= arc2
.read().unwrap();
574 let lock
= arc
.write().unwrap();
575 assert_eq
!(*lock
, 1);
580 let arc
= Arc
::new(RwLock
::new(0));
581 let arc2
= arc
.clone();
582 let (tx
, rx
) = channel();
584 thread
::spawn(move|| {
585 let mut lock
= arc2
.write().unwrap();
592 tx
.send(()).unwrap();
595 // Readers try to catch the writer in the act
596 let mut children
= Vec
::new();
598 let arc3
= arc
.clone();
599 children
.push(thread
::spawn(move|| {
600 let lock
= arc3
.read().unwrap();
605 // Wait for children to pass their asserts
607 assert
!(r
.join().is_ok());
610 // Wait for writer to finish
612 let lock
= arc
.read().unwrap();
613 assert_eq
!(*lock
, 10);
617 fn test_rw_arc_access_in_unwind() {
618 let arc
= Arc
::new(RwLock
::new(1));
619 let arc2
= arc
.clone();
620 let _
= thread
::spawn(move|| -> () {
622 i
: Arc
<RwLock
<isize>>,
624 impl Drop
for Unwinder
{
626 let mut lock
= self.i
.write().unwrap();
630 let _u
= Unwinder { i: arc2 }
;
633 let lock
= arc
.read().unwrap();
634 assert_eq
!(*lock
, 2);
638 fn test_rwlock_unsized() {
639 let rw
: &RwLock
<[i32]> = &RwLock
::new([1, 2, 3]);
641 let b
= &mut *rw
.write().unwrap();
645 let comp
: &[i32] = &[4, 2, 5];
646 assert_eq
!(&*rw
.read().unwrap(), comp
);
650 fn test_rwlock_try_write() {
653 let lock
= RwLock
::new(0isize
);
654 let read_guard
= lock
.read().unwrap();
656 let write_result
= lock
.try_write();
658 Err(TryLockError
::WouldBlock
) => (),
659 Ok(_
) => assert
!(false, "try_write should not succeed while read_guard is in scope"),
660 Err(_
) => assert
!(false, "unexpected error"),
667 fn test_into_inner() {
668 let m
= RwLock
::new(NonCopy(10));
669 assert_eq
!(m
.into_inner().unwrap(), NonCopy(10));
673 fn test_into_inner_drop() {
674 struct Foo(Arc
<AtomicUsize
>);
677 self.0.fetch_add
(1, Ordering
::SeqCst
);
680 let num_drops
= Arc
::new(AtomicUsize
::new(0));
681 let m
= RwLock
::new(Foo(num_drops
.clone()));
682 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
684 let _inner
= m
.into_inner().unwrap();
685 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
687 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 1);
691 fn test_into_inner_poison() {
692 let m
= Arc
::new(RwLock
::new(NonCopy(10)));
694 let _
= thread
::spawn(move || {
695 let _lock
= m2
.write().unwrap();
696 panic
!("test panic in inner thread to poison RwLock");
699 assert
!(m
.is_poisoned());
700 match Arc
::try_unwrap(m
).unwrap().into_inner() {
701 Err(e
) => assert_eq
!(e
.into_inner(), NonCopy(10)),
702 Ok(x
) => panic
!("into_inner of poisoned RwLock is Ok: {:?}", x
),
708 let mut m
= RwLock
::new(NonCopy(10));
709 *m
.get_mut().unwrap() = NonCopy(20);
710 assert_eq
!(m
.into_inner().unwrap(), NonCopy(20));
714 fn test_get_mut_poison() {
715 let m
= Arc
::new(RwLock
::new(NonCopy(10)));
717 let _
= thread
::spawn(move || {
718 let _lock
= m2
.write().unwrap();
719 panic
!("test panic in inner thread to poison RwLock");
722 assert
!(m
.is_poisoned());
723 match Arc
::try_unwrap(m
).unwrap().get_mut() {
724 Err(e
) => assert_eq
!(*e
.into_inner(), NonCopy(10)),
725 Ok(x
) => panic
!("get_mut of poisoned RwLock is Ok: {:?}", x
),