]>
Commit | Line | Data |
---|---|---|
83c7162d XL |
1 | // Copyright 2016 Amanieu d'Antras |
2 | // | |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or | |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or | |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be | |
6 | // copied, modified, or distributed except according to those terms. | |
7 | ||
e1599b0c | 8 | use crate::raw_rwlock::RawRwLock; |
b7449926 | 9 | use lock_api; |
83c7162d | 10 | |
83c7162d XL |
11 | /// A reader-writer lock |
12 | /// | |
13 | /// This type of lock allows a number of readers or at most one writer at any | |
14 | /// point in time. The write portion of this lock typically allows modification | |
15 | /// of the underlying data (exclusive access) and the read portion of this lock | |
16 | /// typically allows for read-only access (shared access). | |
17 | /// | |
18 | /// This lock uses a task-fair locking policy which avoids both reader and | |
19 | /// writer starvation. This means that readers trying to acquire the lock will | |
20 | /// block even if the lock is unlocked when there are writers waiting to acquire | |
21 | /// the lock. Because of this, attempts to recursively acquire a read lock | |
22 | /// within a single thread may result in a deadlock. | |
23 | /// | |
24 | /// The type parameter `T` represents the data that this lock protects. It is | |
25 | /// required that `T` satisfies `Send` to be shared across threads and `Sync` to | |
26 | /// allow concurrent access through readers. The RAII guards returned from the | |
27 | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) | |
28 | /// to allow access to the contained of the lock. | |
29 | /// | |
30 | /// # Fairness | |
31 | /// | |
32 | /// A typical unfair lock can often end up in a situation where a single thread | |
33 | /// quickly acquires and releases the same lock in succession, which can starve | |
34 | /// other threads waiting to acquire the rwlock. While this improves performance | |
35 | /// because it doesn't force a context switch when a thread tries to re-acquire | |
36 | /// a rwlock it has just released, this can starve other threads. | |
37 | /// | |
38 | /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) | |
39 | /// to ensure that the lock will be fair on average without sacrificing | |
40 | /// performance. This is done by forcing a fair unlock on average every 0.5ms, | |
41 | /// which will force the lock to go to the next thread waiting for the rwlock. | |
42 | /// | |
43 | /// Additionally, any critical section longer than 1ms will always use a fair | |
44 | /// unlock, which has a negligible performance impact compared to the length of | |
45 | /// the critical section. | |
46 | /// | |
47 | /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` | |
48 | /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply | |
49 | /// dropping the guard. | |
50 | /// | |
51 | /// # Differences from the standard library `RwLock` | |
52 | /// | |
53 | /// - Supports atomically downgrading a write lock into a read lock. | |
54 | /// - Task-fair locking policy instead of an unspecified platform default. | |
55 | /// - No poisoning, the lock is released normally on panic. | |
56 | /// - Only requires 1 word of space, whereas the standard library boxes the | |
57 | /// `RwLock` due to platform limitations. | |
58 | /// - Can be statically constructed (requires the `const_fn` nightly feature). | |
59 | /// - Does not require any drop glue when dropped. | |
60 | /// - Inline fast path for the uncontended case. | |
61 | /// - Efficient handling of micro-contention using adaptive spinning. | |
62 | /// - Allows raw locking & unlocking without a guard. | |
63 | /// - Supports eventual fairness so that the rwlock is fair on average. | |
64 | /// - Optionally allows making the rwlock fair by calling | |
65 | /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. | |
66 | /// | |
67 | /// # Examples | |
68 | /// | |
69 | /// ``` | |
70 | /// use parking_lot::RwLock; | |
71 | /// | |
72 | /// let lock = RwLock::new(5); | |
73 | /// | |
74 | /// // many reader locks can be held at once | |
75 | /// { | |
76 | /// let r1 = lock.read(); | |
77 | /// let r2 = lock.read(); | |
78 | /// assert_eq!(*r1, 5); | |
79 | /// assert_eq!(*r2, 5); | |
80 | /// } // read locks are dropped at this point | |
81 | /// | |
82 | /// // only one write lock may be held, however | |
83 | /// { | |
84 | /// let mut w = lock.write(); | |
85 | /// *w += 1; | |
86 | /// assert_eq!(*w, 6); | |
87 | /// } // write lock is dropped here | |
88 | /// ``` | |
b7449926 | 89 | pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>; |
83c7162d XL |
90 | |
91 | /// RAII structure used to release the shared read access of a lock when | |
92 | /// dropped. | |
b7449926 | 93 | pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; |
83c7162d XL |
94 | |
95 | /// RAII structure used to release the exclusive write access of a lock when | |
96 | /// dropped. | |
b7449926 | 97 | pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; |
83c7162d | 98 | |
b7449926 XL |
99 | /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a |
100 | /// subfield of the protected data. | |
101 | /// | |
102 | /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the | |
103 | /// former doesn't support temporarily unlocking and re-locking, since that | |
104 | /// could introduce soundness issues if the locked object is modified by another | |
105 | /// thread. | |
106 | pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; | |
107 | ||
108 | /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a | |
109 | /// subfield of the protected data. | |
110 | /// | |
111 | /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the | |
112 | /// former doesn't support temporarily unlocking and re-locking, since that | |
113 | /// could introduce soundness issues if the locked object is modified by another | |
114 | /// thread. | |
115 | pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; | |
83c7162d XL |
116 | |
117 | /// RAII structure used to release the upgradable read access of a lock when | |
118 | /// dropped. | |
9fa01778 | 119 | pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; |
83c7162d XL |
120 | |
121 | #[cfg(test)] | |
122 | mod tests { | |
e1599b0c XL |
123 | use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; |
124 | use rand::Rng; | |
b7449926 | 125 | use std::sync::atomic::{AtomicUsize, Ordering}; |
83c7162d | 126 | use std::sync::mpsc::channel; |
83c7162d | 127 | use std::sync::Arc; |
b7449926 | 128 | use std::thread; |
83c7162d | 129 | use std::time::Duration; |
e1599b0c XL |
130 | |
131 | #[cfg(feature = "serde")] | |
132 | use bincode::{deserialize, serialize}; | |
83c7162d XL |
133 | |
134 | #[derive(Eq, PartialEq, Debug)] | |
135 | struct NonCopy(i32); | |
136 | ||
137 | #[test] | |
138 | fn smoke() { | |
139 | let l = RwLock::new(()); | |
140 | drop(l.read()); | |
141 | drop(l.write()); | |
142 | drop(l.upgradable_read()); | |
143 | drop((l.read(), l.read())); | |
144 | drop((l.read(), l.upgradable_read())); | |
145 | drop(l.write()); | |
146 | } | |
147 | ||
148 | #[test] | |
149 | fn frob() { | |
150 | const N: u32 = 10; | |
151 | const M: u32 = 1000; | |
152 | ||
153 | let r = Arc::new(RwLock::new(())); | |
154 | ||
155 | let (tx, rx) = channel::<()>(); | |
156 | for _ in 0..N { | |
157 | let tx = tx.clone(); | |
158 | let r = r.clone(); | |
159 | thread::spawn(move || { | |
160 | let mut rng = rand::thread_rng(); | |
161 | for _ in 0..M { | |
b7449926 | 162 | if rng.gen_bool(1.0 / N as f64) { |
83c7162d XL |
163 | drop(r.write()); |
164 | } else { | |
165 | drop(r.read()); | |
166 | } | |
167 | } | |
168 | drop(tx); | |
169 | }); | |
170 | } | |
171 | drop(tx); | |
172 | let _ = rx.recv(); | |
173 | } | |
174 | ||
175 | #[test] | |
176 | fn test_rw_arc_no_poison_wr() { | |
177 | let arc = Arc::new(RwLock::new(1)); | |
178 | let arc2 = arc.clone(); | |
179 | let _: Result<(), _> = thread::spawn(move || { | |
180 | let _lock = arc2.write(); | |
181 | panic!(); | |
9fa01778 XL |
182 | }) |
183 | .join(); | |
83c7162d XL |
184 | let lock = arc.read(); |
185 | assert_eq!(*lock, 1); | |
186 | } | |
187 | ||
188 | #[test] | |
189 | fn test_rw_arc_no_poison_ww() { | |
190 | let arc = Arc::new(RwLock::new(1)); | |
191 | let arc2 = arc.clone(); | |
192 | let _: Result<(), _> = thread::spawn(move || { | |
193 | let _lock = arc2.write(); | |
194 | panic!(); | |
9fa01778 XL |
195 | }) |
196 | .join(); | |
83c7162d XL |
197 | let lock = arc.write(); |
198 | assert_eq!(*lock, 1); | |
199 | } | |
200 | ||
201 | #[test] | |
202 | fn test_rw_arc_no_poison_rr() { | |
203 | let arc = Arc::new(RwLock::new(1)); | |
204 | let arc2 = arc.clone(); | |
205 | let _: Result<(), _> = thread::spawn(move || { | |
206 | let _lock = arc2.read(); | |
207 | panic!(); | |
9fa01778 XL |
208 | }) |
209 | .join(); | |
83c7162d XL |
210 | let lock = arc.read(); |
211 | assert_eq!(*lock, 1); | |
212 | } | |
213 | ||
214 | #[test] | |
215 | fn test_rw_arc_no_poison_rw() { | |
216 | let arc = Arc::new(RwLock::new(1)); | |
217 | let arc2 = arc.clone(); | |
218 | let _: Result<(), _> = thread::spawn(move || { | |
219 | let _lock = arc2.read(); | |
220 | panic!() | |
9fa01778 XL |
221 | }) |
222 | .join(); | |
83c7162d XL |
223 | let lock = arc.write(); |
224 | assert_eq!(*lock, 1); | |
225 | } | |
226 | ||
227 | #[test] | |
228 | fn test_ruw_arc() { | |
229 | let arc = Arc::new(RwLock::new(0)); | |
230 | let arc2 = arc.clone(); | |
231 | let (tx, rx) = channel(); | |
232 | ||
233 | thread::spawn(move || { | |
234 | for _ in 0..10 { | |
235 | let mut lock = arc2.write(); | |
236 | let tmp = *lock; | |
237 | *lock = -1; | |
238 | thread::yield_now(); | |
239 | *lock = tmp + 1; | |
240 | } | |
241 | tx.send(()).unwrap(); | |
242 | }); | |
243 | ||
244 | let mut children = Vec::new(); | |
245 | ||
246 | // Upgradable readers try to catch the writer in the act and also | |
247 | // try to touch the value | |
248 | for _ in 0..5 { | |
249 | let arc3 = arc.clone(); | |
250 | children.push(thread::spawn(move || { | |
251 | let lock = arc3.upgradable_read(); | |
252 | let tmp = *lock; | |
253 | assert!(tmp >= 0); | |
254 | thread::yield_now(); | |
b7449926 | 255 | let mut lock = RwLockUpgradableReadGuard::upgrade(lock); |
83c7162d XL |
256 | assert_eq!(tmp, *lock); |
257 | *lock = -1; | |
258 | thread::yield_now(); | |
259 | *lock = tmp + 1; | |
260 | })); | |
261 | } | |
262 | ||
263 | // Readers try to catch the writers in the act | |
264 | for _ in 0..5 { | |
265 | let arc4 = arc.clone(); | |
266 | children.push(thread::spawn(move || { | |
267 | let lock = arc4.read(); | |
268 | assert!(*lock >= 0); | |
269 | })); | |
270 | } | |
271 | ||
272 | // Wait for children to pass their asserts | |
273 | for r in children { | |
274 | assert!(r.join().is_ok()); | |
275 | } | |
276 | ||
277 | // Wait for writer to finish | |
278 | rx.recv().unwrap(); | |
279 | let lock = arc.read(); | |
280 | assert_eq!(*lock, 15); | |
281 | } | |
282 | ||
283 | #[test] | |
284 | fn test_rw_arc() { | |
285 | let arc = Arc::new(RwLock::new(0)); | |
286 | let arc2 = arc.clone(); | |
287 | let (tx, rx) = channel(); | |
288 | ||
289 | thread::spawn(move || { | |
290 | let mut lock = arc2.write(); | |
291 | for _ in 0..10 { | |
292 | let tmp = *lock; | |
293 | *lock = -1; | |
294 | thread::yield_now(); | |
295 | *lock = tmp + 1; | |
296 | } | |
297 | tx.send(()).unwrap(); | |
298 | }); | |
299 | ||
300 | // Readers try to catch the writer in the act | |
301 | let mut children = Vec::new(); | |
302 | for _ in 0..5 { | |
303 | let arc3 = arc.clone(); | |
304 | children.push(thread::spawn(move || { | |
305 | let lock = arc3.read(); | |
306 | assert!(*lock >= 0); | |
307 | })); | |
308 | } | |
309 | ||
310 | // Wait for children to pass their asserts | |
311 | for r in children { | |
312 | assert!(r.join().is_ok()); | |
313 | } | |
314 | ||
315 | // Wait for writer to finish | |
316 | rx.recv().unwrap(); | |
317 | let lock = arc.read(); | |
318 | assert_eq!(*lock, 10); | |
319 | } | |
320 | ||
321 | #[test] | |
322 | fn test_rw_arc_access_in_unwind() { | |
323 | let arc = Arc::new(RwLock::new(1)); | |
324 | let arc2 = arc.clone(); | |
325 | let _ = thread::spawn(move || -> () { | |
326 | struct Unwinder { | |
327 | i: Arc<RwLock<isize>>, | |
328 | } | |
329 | impl Drop for Unwinder { | |
330 | fn drop(&mut self) { | |
331 | let mut lock = self.i.write(); | |
332 | *lock += 1; | |
333 | } | |
334 | } | |
335 | let _u = Unwinder { i: arc2 }; | |
336 | panic!(); | |
9fa01778 XL |
337 | }) |
338 | .join(); | |
83c7162d XL |
339 | let lock = arc.read(); |
340 | assert_eq!(*lock, 2); | |
341 | } | |
342 | ||
343 | #[test] | |
344 | fn test_rwlock_unsized() { | |
345 | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); | |
346 | { | |
347 | let b = &mut *rw.write(); | |
348 | b[0] = 4; | |
349 | b[2] = 5; | |
350 | } | |
351 | let comp: &[i32] = &[4, 2, 5]; | |
352 | assert_eq!(&*rw.read(), comp); | |
353 | } | |
354 | ||
355 | #[test] | |
356 | fn test_rwlock_try_read() { | |
357 | let lock = RwLock::new(0isize); | |
358 | { | |
359 | let read_guard = lock.read(); | |
360 | ||
361 | let read_result = lock.try_read(); | |
e1599b0c | 362 | assert!(read_result.is_some(), "try_read should succeed while read_guard is in scope"); |
83c7162d XL |
363 | |
364 | drop(read_guard); | |
365 | } | |
366 | { | |
367 | let upgrade_guard = lock.upgradable_read(); | |
368 | ||
369 | let read_result = lock.try_read(); | |
370 | assert!( | |
371 | read_result.is_some(), | |
372 | "try_read should succeed while upgrade_guard is in scope" | |
373 | ); | |
374 | ||
375 | drop(upgrade_guard); | |
376 | } | |
377 | { | |
378 | let write_guard = lock.write(); | |
379 | ||
380 | let read_result = lock.try_read(); | |
e1599b0c | 381 | assert!(read_result.is_none(), "try_read should fail while write_guard is in scope"); |
83c7162d XL |
382 | |
383 | drop(write_guard); | |
384 | } | |
385 | } | |
386 | ||
387 | #[test] | |
388 | fn test_rwlock_try_write() { | |
389 | let lock = RwLock::new(0isize); | |
390 | { | |
391 | let read_guard = lock.read(); | |
392 | ||
393 | let write_result = lock.try_write(); | |
e1599b0c | 394 | assert!(write_result.is_none(), "try_write should fail while read_guard is in scope"); |
83c7162d XL |
395 | |
396 | drop(read_guard); | |
397 | } | |
398 | { | |
399 | let upgrade_guard = lock.upgradable_read(); | |
400 | ||
401 | let write_result = lock.try_write(); | |
402 | assert!( | |
403 | write_result.is_none(), | |
404 | "try_write should fail while upgrade_guard is in scope" | |
405 | ); | |
406 | ||
407 | drop(upgrade_guard); | |
408 | } | |
409 | { | |
410 | let write_guard = lock.write(); | |
411 | ||
412 | let write_result = lock.try_write(); | |
e1599b0c | 413 | assert!(write_result.is_none(), "try_write should fail while write_guard is in scope"); |
83c7162d XL |
414 | |
415 | drop(write_guard); | |
416 | } | |
417 | } | |
418 | ||
419 | #[test] | |
420 | fn test_rwlock_try_upgrade() { | |
421 | let lock = RwLock::new(0isize); | |
422 | { | |
423 | let read_guard = lock.read(); | |
424 | ||
425 | let upgrade_result = lock.try_upgradable_read(); | |
426 | assert!( | |
427 | upgrade_result.is_some(), | |
428 | "try_upgradable_read should succeed while read_guard is in scope" | |
429 | ); | |
430 | ||
431 | drop(read_guard); | |
432 | } | |
433 | { | |
434 | let upgrade_guard = lock.upgradable_read(); | |
435 | ||
436 | let upgrade_result = lock.try_upgradable_read(); | |
437 | assert!( | |
438 | upgrade_result.is_none(), | |
439 | "try_upgradable_read should fail while upgrade_guard is in scope" | |
440 | ); | |
441 | ||
442 | drop(upgrade_guard); | |
443 | } | |
444 | { | |
445 | let write_guard = lock.write(); | |
446 | ||
447 | let upgrade_result = lock.try_upgradable_read(); | |
448 | assert!( | |
449 | upgrade_result.is_none(), | |
450 | "try_upgradable should fail while write_guard is in scope" | |
451 | ); | |
452 | ||
453 | drop(write_guard); | |
454 | } | |
455 | } | |
456 | ||
457 | #[test] | |
458 | fn test_into_inner() { | |
459 | let m = RwLock::new(NonCopy(10)); | |
460 | assert_eq!(m.into_inner(), NonCopy(10)); | |
461 | } | |
462 | ||
463 | #[test] | |
464 | fn test_into_inner_drop() { | |
465 | struct Foo(Arc<AtomicUsize>); | |
466 | impl Drop for Foo { | |
467 | fn drop(&mut self) { | |
468 | self.0.fetch_add(1, Ordering::SeqCst); | |
469 | } | |
470 | } | |
471 | let num_drops = Arc::new(AtomicUsize::new(0)); | |
472 | let m = RwLock::new(Foo(num_drops.clone())); | |
473 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); | |
474 | { | |
475 | let _inner = m.into_inner(); | |
476 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); | |
477 | } | |
478 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); | |
479 | } | |
480 | ||
481 | #[test] | |
482 | fn test_get_mut() { | |
483 | let mut m = RwLock::new(NonCopy(10)); | |
484 | *m.get_mut() = NonCopy(20); | |
485 | assert_eq!(m.into_inner(), NonCopy(20)); | |
486 | } | |
487 | ||
488 | #[test] | |
489 | fn test_rwlockguard_sync() { | |
490 | fn sync<T: Sync>(_: T) {} | |
491 | ||
492 | let rwlock = RwLock::new(()); | |
493 | sync(rwlock.read()); | |
494 | sync(rwlock.write()); | |
495 | } | |
496 | ||
497 | #[test] | |
498 | fn test_rwlock_downgrade() { | |
499 | let x = Arc::new(RwLock::new(0)); | |
500 | let mut handles = Vec::new(); | |
501 | for _ in 0..8 { | |
502 | let x = x.clone(); | |
503 | handles.push(thread::spawn(move || { | |
504 | for _ in 0..100 { | |
505 | let mut writer = x.write(); | |
506 | *writer += 1; | |
507 | let cur_val = *writer; | |
b7449926 | 508 | let reader = RwLockWriteGuard::downgrade(writer); |
83c7162d XL |
509 | assert_eq!(cur_val, *reader); |
510 | } | |
511 | })); | |
512 | } | |
513 | for handle in handles { | |
514 | handle.join().unwrap() | |
515 | } | |
516 | assert_eq!(*x.read(), 800); | |
517 | } | |
518 | ||
519 | #[test] | |
520 | fn test_rwlock_recursive() { | |
521 | let arc = Arc::new(RwLock::new(1)); | |
522 | let arc2 = arc.clone(); | |
523 | let _lock1 = arc.read(); | |
524 | thread::spawn(move || { | |
525 | let _lock = arc2.write(); | |
526 | }); | |
e1599b0c XL |
527 | |
528 | if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { | |
529 | thread::sleep(Duration::from_millis(100)); | |
530 | } else { | |
531 | // FIXME: https://github.com/fortanix/rust-sgx/issues/31 | |
532 | for _ in 0..100 { | |
533 | thread::yield_now(); | |
534 | } | |
535 | } | |
83c7162d XL |
536 | |
537 | // A normal read would block here since there is a pending writer | |
538 | let _lock2 = arc.read_recursive(); | |
539 | } | |
540 | ||
541 | #[test] | |
542 | fn test_rwlock_debug() { | |
543 | let x = RwLock::new(vec![0u8, 10]); | |
544 | ||
545 | assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); | |
83c7162d | 546 | let _lock = x.write(); |
e1599b0c | 547 | assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }"); |
83c7162d | 548 | } |
b7449926 XL |
549 | |
550 | #[test] | |
551 | fn test_clone() { | |
552 | let rwlock = RwLock::new(Arc::new(1)); | |
553 | let a = rwlock.read_recursive(); | |
554 | let b = a.clone(); | |
555 | assert_eq!(Arc::strong_count(&b), 2); | |
556 | } | |
e1599b0c XL |
557 | |
558 | #[cfg(feature = "serde")] | |
559 | #[test] | |
560 | fn test_serde() { | |
561 | let contents: Vec<u8> = vec![0, 1, 2]; | |
562 | let mutex = RwLock::new(contents.clone()); | |
563 | ||
564 | let serialized = serialize(&mutex).unwrap(); | |
565 | let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap(); | |
566 | ||
567 | assert_eq!(*(mutex.read()), *(deserialized.read())); | |
568 | assert_eq!(contents, *(deserialized.read())); | |
569 | } | |
83c7162d | 570 | } |