1 // Copyright 2016 Amanieu d'Antras
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
8 use crate::raw_rwlock
::RawRwLock
;
11 /// A reader-writer lock
13 /// This type of lock allows a number of readers or at most one writer at any
14 /// point in time. The write portion of this lock typically allows modification
15 /// of the underlying data (exclusive access) and the read portion of this lock
16 /// typically allows for read-only access (shared access).
18 /// This lock uses a task-fair locking policy which avoids both reader and
19 /// writer starvation. This means that readers trying to acquire the lock will
20 /// block even if the lock is unlocked when there are writers waiting to acquire
21 /// the lock. Because of this, attempts to recursively acquire a read lock
22 /// within a single thread may result in a deadlock.
24 /// The type parameter `T` represents the data that this lock protects. It is
25 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
26 /// allow concurrent access through readers. The RAII guards returned from the
27 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
28 /// to allow access to the contained of the lock.
32 /// A typical unfair lock can often end up in a situation where a single thread
33 /// quickly acquires and releases the same lock in succession, which can starve
34 /// other threads waiting to acquire the rwlock. While this improves performance
35 /// because it doesn't force a context switch when a thread tries to re-acquire
36 /// a rwlock it has just released, this can starve other threads.
38 /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350)
39 /// to ensure that the lock will be fair on average without sacrificing
40 /// performance. This is done by forcing a fair unlock on average every 0.5ms,
41 /// which will force the lock to go to the next thread waiting for the rwlock.
43 /// Additionally, any critical section longer than 1ms will always use a fair
44 /// unlock, which has a negligible performance impact compared to the length of
45 /// the critical section.
47 /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair`
48 /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply
49 /// dropping the guard.
51 /// # Differences from the standard library `RwLock`
53 /// - Supports atomically downgrading a write lock into a read lock.
54 /// - Task-fair locking policy instead of an unspecified platform default.
55 /// - No poisoning, the lock is released normally on panic.
56 /// - Only requires 1 word of space, whereas the standard library boxes the
57 /// `RwLock` due to platform limitations.
58 /// - Can be statically constructed (requires the `const_fn` nightly feature).
59 /// - Does not require any drop glue when dropped.
60 /// - Inline fast path for the uncontended case.
61 /// - Efficient handling of micro-contention using adaptive spinning.
62 /// - Allows raw locking & unlocking without a guard.
63 /// - Supports eventual fairness so that the rwlock is fair on average.
64 /// - Optionally allows making the rwlock fair by calling
65 /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`.
70 /// use parking_lot::RwLock;
72 /// let lock = RwLock::new(5);
74 /// // many reader locks can be held at once
76 /// let r1 = lock.read();
77 /// let r2 = lock.read();
78 /// assert_eq!(*r1, 5);
79 /// assert_eq!(*r2, 5);
80 /// } // read locks are dropped at this point
82 /// // only one write lock may be held, however
84 /// let mut w = lock.write();
86 /// assert_eq!(*w, 6);
87 /// } // write lock is dropped here
89 pub type RwLock
<T
> = lock_api
::RwLock
<RawRwLock
, T
>;
91 /// Creates a new instance of an `RwLock<T>` which is unlocked.
93 /// This allows creating a `RwLock<T>` in a constant context on stable Rust.
94 pub const fn const_rwlock
<T
>(val
: T
) -> RwLock
<T
> {
95 RwLock
::const_new(<RawRwLock
as lock_api
::RawRwLock
>::INIT
, val
)
98 /// RAII structure used to release the shared read access of a lock when
100 pub type RwLockReadGuard
<'a
, T
> = lock_api
::RwLockReadGuard
<'a
, RawRwLock
, T
>;
102 /// RAII structure used to release the exclusive write access of a lock when
104 pub type RwLockWriteGuard
<'a
, T
> = lock_api
::RwLockWriteGuard
<'a
, RawRwLock
, T
>;
106 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
107 /// subfield of the protected data.
109 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
110 /// former doesn't support temporarily unlocking and re-locking, since that
111 /// could introduce soundness issues if the locked object is modified by another
113 pub type MappedRwLockReadGuard
<'a
, T
> = lock_api
::MappedRwLockReadGuard
<'a
, RawRwLock
, T
>;
115 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
116 /// subfield of the protected data.
118 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
119 /// former doesn't support temporarily unlocking and re-locking, since that
120 /// could introduce soundness issues if the locked object is modified by another
122 pub type MappedRwLockWriteGuard
<'a
, T
> = lock_api
::MappedRwLockWriteGuard
<'a
, RawRwLock
, T
>;
124 /// RAII structure used to release the upgradable read access of a lock when
126 pub type RwLockUpgradableReadGuard
<'a
, T
> = lock_api
::RwLockUpgradableReadGuard
<'a
, RawRwLock
, T
>;
130 use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}
;
132 use std
::sync
::atomic
::{AtomicUsize, Ordering}
;
133 use std
::sync
::mpsc
::channel
;
136 use std
::time
::Duration
;
138 #[cfg(feature = "serde")]
139 use bincode
::{deserialize, serialize}
;
141 #[derive(Eq, PartialEq, Debug)]
146 let l
= RwLock
::new(());
149 drop(l
.upgradable_read());
150 drop((l
.read(), l
.read()));
151 drop((l
.read(), l
.upgradable_read()));
160 let r
= Arc
::new(RwLock
::new(()));
162 let (tx
, rx
) = channel
::<()>();
166 thread
::spawn(move || {
167 let mut rng
= rand
::thread_rng();
169 if rng
.gen_bool(1.0 / N
as f64) {
183 fn test_rw_arc_no_poison_wr() {
184 let arc
= Arc
::new(RwLock
::new(1));
185 let arc2
= arc
.clone();
186 let _
: Result
<(), _
> = thread
::spawn(move || {
187 let _lock
= arc2
.write();
191 let lock
= arc
.read();
192 assert_eq
!(*lock
, 1);
196 fn test_rw_arc_no_poison_ww() {
197 let arc
= Arc
::new(RwLock
::new(1));
198 let arc2
= arc
.clone();
199 let _
: Result
<(), _
> = thread
::spawn(move || {
200 let _lock
= arc2
.write();
204 let lock
= arc
.write();
205 assert_eq
!(*lock
, 1);
209 fn test_rw_arc_no_poison_rr() {
210 let arc
= Arc
::new(RwLock
::new(1));
211 let arc2
= arc
.clone();
212 let _
: Result
<(), _
> = thread
::spawn(move || {
213 let _lock
= arc2
.read();
217 let lock
= arc
.read();
218 assert_eq
!(*lock
, 1);
222 fn test_rw_arc_no_poison_rw() {
223 let arc
= Arc
::new(RwLock
::new(1));
224 let arc2
= arc
.clone();
225 let _
: Result
<(), _
> = thread
::spawn(move || {
226 let _lock
= arc2
.read();
230 let lock
= arc
.write();
231 assert_eq
!(*lock
, 1);
236 let arc
= Arc
::new(RwLock
::new(0));
237 let arc2
= arc
.clone();
238 let (tx
, rx
) = channel();
240 thread
::spawn(move || {
242 let mut lock
= arc2
.write();
248 tx
.send(()).unwrap();
251 let mut children
= Vec
::new();
253 // Upgradable readers try to catch the writer in the act and also
254 // try to touch the value
256 let arc3
= arc
.clone();
257 children
.push(thread
::spawn(move || {
258 let lock
= arc3
.upgradable_read();
262 let mut lock
= RwLockUpgradableReadGuard
::upgrade(lock
);
263 assert_eq
!(tmp
, *lock
);
270 // Readers try to catch the writers in the act
272 let arc4
= arc
.clone();
273 children
.push(thread
::spawn(move || {
274 let lock
= arc4
.read();
279 // Wait for children to pass their asserts
281 assert
!(r
.join().is_ok());
284 // Wait for writer to finish
286 let lock
= arc
.read();
287 assert_eq
!(*lock
, 15);
292 let arc
= Arc
::new(RwLock
::new(0));
293 let arc2
= arc
.clone();
294 let (tx
, rx
) = channel();
296 thread
::spawn(move || {
297 let mut lock
= arc2
.write();
304 tx
.send(()).unwrap();
307 // Readers try to catch the writer in the act
308 let mut children
= Vec
::new();
310 let arc3
= arc
.clone();
311 children
.push(thread
::spawn(move || {
312 let lock
= arc3
.read();
317 // Wait for children to pass their asserts
319 assert
!(r
.join().is_ok());
322 // Wait for writer to finish
324 let lock
= arc
.read();
325 assert_eq
!(*lock
, 10);
329 fn test_rw_arc_access_in_unwind() {
330 let arc
= Arc
::new(RwLock
::new(1));
331 let arc2
= arc
.clone();
332 let _
= thread
::spawn(move || {
334 i
: Arc
<RwLock
<isize>>,
336 impl Drop
for Unwinder
{
338 let mut lock
= self.i
.write();
342 let _u
= Unwinder { i: arc2 }
;
346 let lock
= arc
.read();
347 assert_eq
!(*lock
, 2);
351 fn test_rwlock_unsized() {
352 let rw
: &RwLock
<[i32]> = &RwLock
::new([1, 2, 3]);
354 let b
= &mut *rw
.write();
358 let comp
: &[i32] = &[4, 2, 5];
359 assert_eq
!(&*rw
.read(), comp
);
363 fn test_rwlock_try_read() {
364 let lock
= RwLock
::new(0isize
);
366 let read_guard
= lock
.read();
368 let read_result
= lock
.try_read();
370 read_result
.is_some(),
371 "try_read should succeed while read_guard is in scope"
377 let upgrade_guard
= lock
.upgradable_read();
379 let read_result
= lock
.try_read();
381 read_result
.is_some(),
382 "try_read should succeed while upgrade_guard is in scope"
388 let write_guard
= lock
.write();
390 let read_result
= lock
.try_read();
392 read_result
.is_none(),
393 "try_read should fail while write_guard is in scope"
401 fn test_rwlock_try_write() {
402 let lock
= RwLock
::new(0isize
);
404 let read_guard
= lock
.read();
406 let write_result
= lock
.try_write();
408 write_result
.is_none(),
409 "try_write should fail while read_guard is in scope"
415 let upgrade_guard
= lock
.upgradable_read();
417 let write_result
= lock
.try_write();
419 write_result
.is_none(),
420 "try_write should fail while upgrade_guard is in scope"
426 let write_guard
= lock
.write();
428 let write_result
= lock
.try_write();
430 write_result
.is_none(),
431 "try_write should fail while write_guard is in scope"
439 fn test_rwlock_try_upgrade() {
440 let lock
= RwLock
::new(0isize
);
442 let read_guard
= lock
.read();
444 let upgrade_result
= lock
.try_upgradable_read();
446 upgrade_result
.is_some(),
447 "try_upgradable_read should succeed while read_guard is in scope"
453 let upgrade_guard
= lock
.upgradable_read();
455 let upgrade_result
= lock
.try_upgradable_read();
457 upgrade_result
.is_none(),
458 "try_upgradable_read should fail while upgrade_guard is in scope"
464 let write_guard
= lock
.write();
466 let upgrade_result
= lock
.try_upgradable_read();
468 upgrade_result
.is_none(),
469 "try_upgradable should fail while write_guard is in scope"
477 fn test_into_inner() {
478 let m
= RwLock
::new(NonCopy(10));
479 assert_eq
!(m
.into_inner(), NonCopy(10));
483 fn test_into_inner_drop() {
484 struct Foo(Arc
<AtomicUsize
>);
487 self.0.fetch_add
(1, Ordering
::SeqCst
);
490 let num_drops
= Arc
::new(AtomicUsize
::new(0));
491 let m
= RwLock
::new(Foo(num_drops
.clone()));
492 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
494 let _inner
= m
.into_inner();
495 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 0);
497 assert_eq
!(num_drops
.load(Ordering
::SeqCst
), 1);
502 let mut m
= RwLock
::new(NonCopy(10));
503 *m
.get_mut() = NonCopy(20);
504 assert_eq
!(m
.into_inner(), NonCopy(20));
508 fn test_rwlockguard_sync() {
509 fn sync
<T
: Sync
>(_
: T
) {}
511 let rwlock
= RwLock
::new(());
513 sync(rwlock
.write());
517 fn test_rwlock_downgrade() {
518 let x
= Arc
::new(RwLock
::new(0));
519 let mut handles
= Vec
::new();
522 handles
.push(thread
::spawn(move || {
524 let mut writer
= x
.write();
526 let cur_val
= *writer
;
527 let reader
= RwLockWriteGuard
::downgrade(writer
);
528 assert_eq
!(cur_val
, *reader
);
532 for handle
in handles
{
533 handle
.join().unwrap()
535 assert_eq
!(*x
.read(), 800);
539 fn test_rwlock_recursive() {
540 let arc
= Arc
::new(RwLock
::new(1));
541 let arc2
= arc
.clone();
542 let _lock1
= arc
.read();
543 thread
::spawn(move || {
544 let _lock
= arc2
.write();
547 if cfg
!(not(all(target_env
= "sgx", target_vendor
= "fortanix"))) {
548 thread
::sleep(Duration
::from_millis(100));
550 // FIXME: https://github.com/fortanix/rust-sgx/issues/31
556 // A normal read would block here since there is a pending writer
557 let _lock2
= arc
.read_recursive();
561 fn test_rwlock_debug() {
562 let x
= RwLock
::new(vec
![0u8, 10]);
564 assert_eq
!(format
!("{:?}", x
), "RwLock { data: [0, 10] }");
565 let _lock
= x
.write();
566 assert_eq
!(format
!("{:?}", x
), "RwLock { data: <locked> }");
571 let rwlock
= RwLock
::new(Arc
::new(1));
572 let a
= rwlock
.read_recursive();
574 assert_eq
!(Arc
::strong_count(&b
), 2);
577 #[cfg(feature = "serde")]
580 let contents
: Vec
<u8> = vec
![0, 1, 2];
581 let mutex
= RwLock
::new(contents
.clone());
583 let serialized
= serialize(&mutex
).unwrap();
584 let deserialized
: RwLock
<Vec
<u8>> = deserialize(&serialized
).unwrap();
586 assert_eq
!(*(mutex
.read()), *(deserialized
.read()));
587 assert_eq
!(contents
, *(deserialized
.read()));
591 fn test_issue_203() {
592 struct Bar(RwLock
<()>);
596 let _n
= self.0.write();
601 static B
: Bar
= Bar(RwLock
::new(()));
607 let a
= RwLock
::new(());