]> git.proxmox.com Git - rustc.git/blame - vendor/parking_lot/src/rwlock.rs
New upstream version 1.38.0+dfsg1
[rustc.git] / vendor / parking_lot / src / rwlock.rs
CommitLineData
83c7162d
XL
1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
b7449926 8use lock_api;
83c7162d
XL
9use raw_rwlock::RawRwLock;
10
83c7162d
XL
11/// A reader-writer lock
12///
13/// This type of lock allows a number of readers or at most one writer at any
14/// point in time. The write portion of this lock typically allows modification
15/// of the underlying data (exclusive access) and the read portion of this lock
16/// typically allows for read-only access (shared access).
17///
18/// This lock uses a task-fair locking policy which avoids both reader and
19/// writer starvation. This means that readers trying to acquire the lock will
20/// block even if the lock is unlocked when there are writers waiting to acquire
21/// the lock. Because of this, attempts to recursively acquire a read lock
22/// within a single thread may result in a deadlock.
23///
24/// The type parameter `T` represents the data that this lock protects. It is
25/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
26/// allow concurrent access through readers. The RAII guards returned from the
27/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
28/// to allow access to the contained of the lock.
29///
30/// # Fairness
31///
32/// A typical unfair lock can often end up in a situation where a single thread
33/// quickly acquires and releases the same lock in succession, which can starve
34/// other threads waiting to acquire the rwlock. While this improves performance
35/// because it doesn't force a context switch when a thread tries to re-acquire
36/// a rwlock it has just released, this can starve other threads.
37///
38/// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350)
39/// to ensure that the lock will be fair on average without sacrificing
40/// performance. This is done by forcing a fair unlock on average every 0.5ms,
41/// which will force the lock to go to the next thread waiting for the rwlock.
42///
43/// Additionally, any critical section longer than 1ms will always use a fair
44/// unlock, which has a negligible performance impact compared to the length of
45/// the critical section.
46///
47/// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair`
48/// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply
49/// dropping the guard.
50///
51/// # Differences from the standard library `RwLock`
52///
53/// - Supports atomically downgrading a write lock into a read lock.
54/// - Task-fair locking policy instead of an unspecified platform default.
55/// - No poisoning, the lock is released normally on panic.
56/// - Only requires 1 word of space, whereas the standard library boxes the
57/// `RwLock` due to platform limitations.
58/// - Can be statically constructed (requires the `const_fn` nightly feature).
59/// - Does not require any drop glue when dropped.
60/// - Inline fast path for the uncontended case.
61/// - Efficient handling of micro-contention using adaptive spinning.
62/// - Allows raw locking & unlocking without a guard.
63/// - Supports eventual fairness so that the rwlock is fair on average.
64/// - Optionally allows making the rwlock fair by calling
65/// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`.
66///
67/// # Examples
68///
69/// ```
70/// use parking_lot::RwLock;
71///
72/// let lock = RwLock::new(5);
73///
74/// // many reader locks can be held at once
75/// {
76/// let r1 = lock.read();
77/// let r2 = lock.read();
78/// assert_eq!(*r1, 5);
79/// assert_eq!(*r2, 5);
80/// } // read locks are dropped at this point
81///
82/// // only one write lock may be held, however
83/// {
84/// let mut w = lock.write();
85/// *w += 1;
86/// assert_eq!(*w, 6);
87/// } // write lock is dropped here
88/// ```
b7449926 89pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
83c7162d
XL
90
91/// RAII structure used to release the shared read access of a lock when
92/// dropped.
b7449926 93pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
83c7162d
XL
94
95/// RAII structure used to release the exclusive write access of a lock when
96/// dropped.
b7449926 97pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
83c7162d 98
b7449926
XL
99/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
100/// subfield of the protected data.
101///
102/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
103/// former doesn't support temporarily unlocking and re-locking, since that
104/// could introduce soundness issues if the locked object is modified by another
105/// thread.
106pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
107
108/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
109/// subfield of the protected data.
110///
111/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
112/// former doesn't support temporarily unlocking and re-locking, since that
113/// could introduce soundness issues if the locked object is modified by another
114/// thread.
115pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
83c7162d
XL
116
117/// RAII structure used to release the upgradable read access of a lock when
118/// dropped.
9fa01778 119pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
83c7162d
XL
120
121#[cfg(test)]
122mod tests {
123 extern crate rand;
124 use self::rand::Rng;
b7449926 125 use std::sync::atomic::{AtomicUsize, Ordering};
83c7162d 126 use std::sync::mpsc::channel;
83c7162d 127 use std::sync::Arc;
b7449926 128 use std::thread;
83c7162d 129 use std::time::Duration;
b7449926 130 use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
83c7162d
XL
131
132 #[derive(Eq, PartialEq, Debug)]
133 struct NonCopy(i32);
134
135 #[test]
136 fn smoke() {
137 let l = RwLock::new(());
138 drop(l.read());
139 drop(l.write());
140 drop(l.upgradable_read());
141 drop((l.read(), l.read()));
142 drop((l.read(), l.upgradable_read()));
143 drop(l.write());
144 }
145
146 #[test]
147 fn frob() {
148 const N: u32 = 10;
149 const M: u32 = 1000;
150
151 let r = Arc::new(RwLock::new(()));
152
153 let (tx, rx) = channel::<()>();
154 for _ in 0..N {
155 let tx = tx.clone();
156 let r = r.clone();
157 thread::spawn(move || {
158 let mut rng = rand::thread_rng();
159 for _ in 0..M {
b7449926 160 if rng.gen_bool(1.0 / N as f64) {
83c7162d
XL
161 drop(r.write());
162 } else {
163 drop(r.read());
164 }
165 }
166 drop(tx);
167 });
168 }
169 drop(tx);
170 let _ = rx.recv();
171 }
172
173 #[test]
174 fn test_rw_arc_no_poison_wr() {
175 let arc = Arc::new(RwLock::new(1));
176 let arc2 = arc.clone();
177 let _: Result<(), _> = thread::spawn(move || {
178 let _lock = arc2.write();
179 panic!();
9fa01778
XL
180 })
181 .join();
83c7162d
XL
182 let lock = arc.read();
183 assert_eq!(*lock, 1);
184 }
185
186 #[test]
187 fn test_rw_arc_no_poison_ww() {
188 let arc = Arc::new(RwLock::new(1));
189 let arc2 = arc.clone();
190 let _: Result<(), _> = thread::spawn(move || {
191 let _lock = arc2.write();
192 panic!();
9fa01778
XL
193 })
194 .join();
83c7162d
XL
195 let lock = arc.write();
196 assert_eq!(*lock, 1);
197 }
198
199 #[test]
200 fn test_rw_arc_no_poison_rr() {
201 let arc = Arc::new(RwLock::new(1));
202 let arc2 = arc.clone();
203 let _: Result<(), _> = thread::spawn(move || {
204 let _lock = arc2.read();
205 panic!();
9fa01778
XL
206 })
207 .join();
83c7162d
XL
208 let lock = arc.read();
209 assert_eq!(*lock, 1);
210 }
211
212 #[test]
213 fn test_rw_arc_no_poison_rw() {
214 let arc = Arc::new(RwLock::new(1));
215 let arc2 = arc.clone();
216 let _: Result<(), _> = thread::spawn(move || {
217 let _lock = arc2.read();
218 panic!()
9fa01778
XL
219 })
220 .join();
83c7162d
XL
221 let lock = arc.write();
222 assert_eq!(*lock, 1);
223 }
224
225 #[test]
226 fn test_ruw_arc() {
227 let arc = Arc::new(RwLock::new(0));
228 let arc2 = arc.clone();
229 let (tx, rx) = channel();
230
231 thread::spawn(move || {
232 for _ in 0..10 {
233 let mut lock = arc2.write();
234 let tmp = *lock;
235 *lock = -1;
236 thread::yield_now();
237 *lock = tmp + 1;
238 }
239 tx.send(()).unwrap();
240 });
241
242 let mut children = Vec::new();
243
244 // Upgradable readers try to catch the writer in the act and also
245 // try to touch the value
246 for _ in 0..5 {
247 let arc3 = arc.clone();
248 children.push(thread::spawn(move || {
249 let lock = arc3.upgradable_read();
250 let tmp = *lock;
251 assert!(tmp >= 0);
252 thread::yield_now();
b7449926 253 let mut lock = RwLockUpgradableReadGuard::upgrade(lock);
83c7162d
XL
254 assert_eq!(tmp, *lock);
255 *lock = -1;
256 thread::yield_now();
257 *lock = tmp + 1;
258 }));
259 }
260
261 // Readers try to catch the writers in the act
262 for _ in 0..5 {
263 let arc4 = arc.clone();
264 children.push(thread::spawn(move || {
265 let lock = arc4.read();
266 assert!(*lock >= 0);
267 }));
268 }
269
270 // Wait for children to pass their asserts
271 for r in children {
272 assert!(r.join().is_ok());
273 }
274
275 // Wait for writer to finish
276 rx.recv().unwrap();
277 let lock = arc.read();
278 assert_eq!(*lock, 15);
279 }
280
281 #[test]
282 fn test_rw_arc() {
283 let arc = Arc::new(RwLock::new(0));
284 let arc2 = arc.clone();
285 let (tx, rx) = channel();
286
287 thread::spawn(move || {
288 let mut lock = arc2.write();
289 for _ in 0..10 {
290 let tmp = *lock;
291 *lock = -1;
292 thread::yield_now();
293 *lock = tmp + 1;
294 }
295 tx.send(()).unwrap();
296 });
297
298 // Readers try to catch the writer in the act
299 let mut children = Vec::new();
300 for _ in 0..5 {
301 let arc3 = arc.clone();
302 children.push(thread::spawn(move || {
303 let lock = arc3.read();
304 assert!(*lock >= 0);
305 }));
306 }
307
308 // Wait for children to pass their asserts
309 for r in children {
310 assert!(r.join().is_ok());
311 }
312
313 // Wait for writer to finish
314 rx.recv().unwrap();
315 let lock = arc.read();
316 assert_eq!(*lock, 10);
317 }
318
319 #[test]
320 fn test_rw_arc_access_in_unwind() {
321 let arc = Arc::new(RwLock::new(1));
322 let arc2 = arc.clone();
323 let _ = thread::spawn(move || -> () {
324 struct Unwinder {
325 i: Arc<RwLock<isize>>,
326 }
327 impl Drop for Unwinder {
328 fn drop(&mut self) {
329 let mut lock = self.i.write();
330 *lock += 1;
331 }
332 }
333 let _u = Unwinder { i: arc2 };
334 panic!();
9fa01778
XL
335 })
336 .join();
83c7162d
XL
337 let lock = arc.read();
338 assert_eq!(*lock, 2);
339 }
340
341 #[test]
342 fn test_rwlock_unsized() {
343 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
344 {
345 let b = &mut *rw.write();
346 b[0] = 4;
347 b[2] = 5;
348 }
349 let comp: &[i32] = &[4, 2, 5];
350 assert_eq!(&*rw.read(), comp);
351 }
352
353 #[test]
354 fn test_rwlock_try_read() {
355 let lock = RwLock::new(0isize);
356 {
357 let read_guard = lock.read();
358
359 let read_result = lock.try_read();
360 assert!(
361 read_result.is_some(),
362 "try_read should succeed while read_guard is in scope"
363 );
364
365 drop(read_guard);
366 }
367 {
368 let upgrade_guard = lock.upgradable_read();
369
370 let read_result = lock.try_read();
371 assert!(
372 read_result.is_some(),
373 "try_read should succeed while upgrade_guard is in scope"
374 );
375
376 drop(upgrade_guard);
377 }
378 {
379 let write_guard = lock.write();
380
381 let read_result = lock.try_read();
382 assert!(
383 read_result.is_none(),
384 "try_read should fail while write_guard is in scope"
385 );
386
387 drop(write_guard);
388 }
389 }
390
391 #[test]
392 fn test_rwlock_try_write() {
393 let lock = RwLock::new(0isize);
394 {
395 let read_guard = lock.read();
396
397 let write_result = lock.try_write();
398 assert!(
399 write_result.is_none(),
400 "try_write should fail while read_guard is in scope"
401 );
402
403 drop(read_guard);
404 }
405 {
406 let upgrade_guard = lock.upgradable_read();
407
408 let write_result = lock.try_write();
409 assert!(
410 write_result.is_none(),
411 "try_write should fail while upgrade_guard is in scope"
412 );
413
414 drop(upgrade_guard);
415 }
416 {
417 let write_guard = lock.write();
418
419 let write_result = lock.try_write();
420 assert!(
421 write_result.is_none(),
422 "try_write should fail while write_guard is in scope"
423 );
424
425 drop(write_guard);
426 }
427 }
428
429 #[test]
430 fn test_rwlock_try_upgrade() {
431 let lock = RwLock::new(0isize);
432 {
433 let read_guard = lock.read();
434
435 let upgrade_result = lock.try_upgradable_read();
436 assert!(
437 upgrade_result.is_some(),
438 "try_upgradable_read should succeed while read_guard is in scope"
439 );
440
441 drop(read_guard);
442 }
443 {
444 let upgrade_guard = lock.upgradable_read();
445
446 let upgrade_result = lock.try_upgradable_read();
447 assert!(
448 upgrade_result.is_none(),
449 "try_upgradable_read should fail while upgrade_guard is in scope"
450 );
451
452 drop(upgrade_guard);
453 }
454 {
455 let write_guard = lock.write();
456
457 let upgrade_result = lock.try_upgradable_read();
458 assert!(
459 upgrade_result.is_none(),
460 "try_upgradable should fail while write_guard is in scope"
461 );
462
463 drop(write_guard);
464 }
465 }
466
467 #[test]
468 fn test_into_inner() {
469 let m = RwLock::new(NonCopy(10));
470 assert_eq!(m.into_inner(), NonCopy(10));
471 }
472
473 #[test]
474 fn test_into_inner_drop() {
475 struct Foo(Arc<AtomicUsize>);
476 impl Drop for Foo {
477 fn drop(&mut self) {
478 self.0.fetch_add(1, Ordering::SeqCst);
479 }
480 }
481 let num_drops = Arc::new(AtomicUsize::new(0));
482 let m = RwLock::new(Foo(num_drops.clone()));
483 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
484 {
485 let _inner = m.into_inner();
486 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
487 }
488 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
489 }
490
491 #[test]
492 fn test_get_mut() {
493 let mut m = RwLock::new(NonCopy(10));
494 *m.get_mut() = NonCopy(20);
495 assert_eq!(m.into_inner(), NonCopy(20));
496 }
497
498 #[test]
499 fn test_rwlockguard_sync() {
500 fn sync<T: Sync>(_: T) {}
501
502 let rwlock = RwLock::new(());
503 sync(rwlock.read());
504 sync(rwlock.write());
505 }
506
507 #[test]
508 fn test_rwlock_downgrade() {
509 let x = Arc::new(RwLock::new(0));
510 let mut handles = Vec::new();
511 for _ in 0..8 {
512 let x = x.clone();
513 handles.push(thread::spawn(move || {
514 for _ in 0..100 {
515 let mut writer = x.write();
516 *writer += 1;
517 let cur_val = *writer;
b7449926 518 let reader = RwLockWriteGuard::downgrade(writer);
83c7162d
XL
519 assert_eq!(cur_val, *reader);
520 }
521 }));
522 }
523 for handle in handles {
524 handle.join().unwrap()
525 }
526 assert_eq!(*x.read(), 800);
527 }
528
529 #[test]
530 fn test_rwlock_recursive() {
531 let arc = Arc::new(RwLock::new(1));
532 let arc2 = arc.clone();
533 let _lock1 = arc.read();
534 thread::spawn(move || {
535 let _lock = arc2.write();
536 });
537 thread::sleep(Duration::from_millis(100));
538
539 // A normal read would block here since there is a pending writer
540 let _lock2 = arc.read_recursive();
541 }
542
543 #[test]
544 fn test_rwlock_debug() {
545 let x = RwLock::new(vec![0u8, 10]);
546
547 assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
b7449926
XL
548 assert_eq!(
549 format!("{:#?}", x),
550 "RwLock {
83c7162d
XL
551 data: [
552 0,
553 10
554 ]
555}"
556 );
557 let _lock = x.write();
558 assert_eq!(format!("{:?}", x), "RwLock { <locked> }");
559 }
b7449926
XL
560
561 #[test]
562 fn test_clone() {
563 let rwlock = RwLock::new(Arc::new(1));
564 let a = rwlock.read_recursive();
565 let b = a.clone();
566 assert_eq!(Arc::strong_count(&b), 2);
567 }
83c7162d 568}