]> git.proxmox.com Git - rustc.git/blob - src/libstd/sync/rwlock.rs
New upstream version 1.15.0+dfsg1
[rustc.git] / src / libstd / sync / rwlock.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use cell::UnsafeCell;
12 use fmt;
13 use marker;
14 use mem;
15 use ops::{Deref, DerefMut};
16 use ptr;
17 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
18 use sys_common::rwlock as sys;
19
20 /// A reader-writer lock
21 ///
22 /// This type of lock allows a number of readers or at most one writer at any
23 /// point in time. The write portion of this lock typically allows modification
24 /// of the underlying data (exclusive access) and the read portion of this lock
25 /// typically allows for read-only access (shared access).
26 ///
27 /// The priority policy of the lock is dependent on the underlying operating
28 /// system's implementation, and this type does not guarantee that any
29 /// particular policy will be used.
30 ///
31 /// The type parameter `T` represents the data that this lock protects. It is
32 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
33 /// allow concurrent access through readers. The RAII guards returned from the
34 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
35 /// to allow access to the contained of the lock.
36 ///
37 /// # Poisoning
38 ///
39 /// An `RwLock`, like `Mutex`, will become poisoned on a panic. Note, however,
40 /// that an `RwLock` may only be poisoned if a panic occurs while it is locked
41 /// exclusively (write mode). If a panic occurs in any reader, then the lock
42 /// will not be poisoned.
43 ///
44 /// # Examples
45 ///
46 /// ```
47 /// use std::sync::RwLock;
48 ///
49 /// let lock = RwLock::new(5);
50 ///
51 /// // many reader locks can be held at once
52 /// {
53 /// let r1 = lock.read().unwrap();
54 /// let r2 = lock.read().unwrap();
55 /// assert_eq!(*r1, 5);
56 /// assert_eq!(*r2, 5);
57 /// } // read locks are dropped at this point
58 ///
59 /// // only one write lock may be held, however
60 /// {
61 /// let mut w = lock.write().unwrap();
62 /// *w += 1;
63 /// assert_eq!(*w, 6);
64 /// } // write lock is dropped here
65 /// ```
66 #[stable(feature = "rust1", since = "1.0.0")]
67 pub struct RwLock<T: ?Sized> {
68 inner: Box<sys::RWLock>,
69 poison: poison::Flag,
70 data: UnsafeCell<T>,
71 }
72
73 #[stable(feature = "rust1", since = "1.0.0")]
74 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
75 #[stable(feature = "rust1", since = "1.0.0")]
76 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
77
78 /// RAII structure used to release the shared read access of a lock when
79 /// dropped.
80 ///
81 /// This structure is created by the [`read()`] and [`try_read()`] methods on
82 /// [`RwLock`].
83 ///
84 /// [`read()`]: struct.RwLock.html#method.read
85 /// [`try_read()`]: struct.RwLock.html#method.try_read
86 /// [`RwLock`]: struct.RwLock.html
87 #[must_use]
88 #[stable(feature = "rust1", since = "1.0.0")]
89 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
90 __lock: &'a RwLock<T>,
91 }
92
93 #[stable(feature = "rust1", since = "1.0.0")]
94 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
95
96 /// RAII structure used to release the exclusive write access of a lock when
97 /// dropped.
98 ///
99 /// This structure is created by the [`write()`] and [`try_write()`] methods
100 /// on [`RwLock`].
101 ///
102 /// [`write()`]: struct.RwLock.html#method.write
103 /// [`try_write()`]: struct.RwLock.html#method.try_write
104 /// [`RwLock`]: struct.RwLock.html
105 #[must_use]
106 #[stable(feature = "rust1", since = "1.0.0")]
107 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
108 __lock: &'a RwLock<T>,
109 __poison: poison::Guard,
110 }
111
112 #[stable(feature = "rust1", since = "1.0.0")]
113 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
114
115 impl<T> RwLock<T> {
116 /// Creates a new instance of an `RwLock<T>` which is unlocked.
117 ///
118 /// # Examples
119 ///
120 /// ```
121 /// use std::sync::RwLock;
122 ///
123 /// let lock = RwLock::new(5);
124 /// ```
125 #[stable(feature = "rust1", since = "1.0.0")]
126 pub fn new(t: T) -> RwLock<T> {
127 RwLock {
128 inner: box sys::RWLock::new(),
129 poison: poison::Flag::new(),
130 data: UnsafeCell::new(t),
131 }
132 }
133 }
134
135 impl<T: ?Sized> RwLock<T> {
136 /// Locks this rwlock with shared read access, blocking the current thread
137 /// until it can be acquired.
138 ///
139 /// The calling thread will be blocked until there are no more writers which
140 /// hold the lock. There may be other readers currently inside the lock when
141 /// this method returns. This method does not provide any guarantees with
142 /// respect to the ordering of whether contentious readers or writers will
143 /// acquire the lock first.
144 ///
145 /// Returns an RAII guard which will release this thread's shared access
146 /// once it is dropped.
147 ///
148 /// # Errors
149 ///
150 /// This function will return an error if the RwLock is poisoned. An RwLock
151 /// is poisoned whenever a writer panics while holding an exclusive lock.
152 /// The failure will occur immediately after the lock has been acquired.
153 ///
154 /// # Panics
155 ///
156 /// This function might panic when called if the lock is already held by the current thread.
157 #[inline]
158 #[stable(feature = "rust1", since = "1.0.0")]
159 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
160 unsafe {
161 self.inner.read();
162 RwLockReadGuard::new(self)
163 }
164 }
165
166 /// Attempts to acquire this rwlock with shared read access.
167 ///
168 /// If the access could not be granted at this time, then `Err` is returned.
169 /// Otherwise, an RAII guard is returned which will release the shared access
170 /// when it is dropped.
171 ///
172 /// This function does not block.
173 ///
174 /// This function does not provide any guarantees with respect to the ordering
175 /// of whether contentious readers or writers will acquire the lock first.
176 ///
177 /// # Errors
178 ///
179 /// This function will return an error if the RwLock is poisoned. An RwLock
180 /// is poisoned whenever a writer panics while holding an exclusive lock. An
181 /// error will only be returned if the lock would have otherwise been
182 /// acquired.
183 #[inline]
184 #[stable(feature = "rust1", since = "1.0.0")]
185 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
186 unsafe {
187 if self.inner.try_read() {
188 Ok(RwLockReadGuard::new(self)?)
189 } else {
190 Err(TryLockError::WouldBlock)
191 }
192 }
193 }
194
195 /// Locks this rwlock with exclusive write access, blocking the current
196 /// thread until it can be acquired.
197 ///
198 /// This function will not return while other writers or other readers
199 /// currently have access to the lock.
200 ///
201 /// Returns an RAII guard which will drop the write access of this rwlock
202 /// when dropped.
203 ///
204 /// # Errors
205 ///
206 /// This function will return an error if the RwLock is poisoned. An RwLock
207 /// is poisoned whenever a writer panics while holding an exclusive lock.
208 /// An error will be returned when the lock is acquired.
209 ///
210 /// # Panics
211 ///
212 /// This function might panic when called if the lock is already held by the current thread.
213 #[inline]
214 #[stable(feature = "rust1", since = "1.0.0")]
215 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
216 unsafe {
217 self.inner.write();
218 RwLockWriteGuard::new(self)
219 }
220 }
221
222 /// Attempts to lock this rwlock with exclusive write access.
223 ///
224 /// If the lock could not be acquired at this time, then `Err` is returned.
225 /// Otherwise, an RAII guard is returned which will release the lock when
226 /// it is dropped.
227 ///
228 /// This function does not block.
229 ///
230 /// This function does not provide any guarantees with respect to the ordering
231 /// of whether contentious readers or writers will acquire the lock first.
232 ///
233 /// # Errors
234 ///
235 /// This function will return an error if the RwLock is poisoned. An RwLock
236 /// is poisoned whenever a writer panics while holding an exclusive lock. An
237 /// error will only be returned if the lock would have otherwise been
238 /// acquired.
239 #[inline]
240 #[stable(feature = "rust1", since = "1.0.0")]
241 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
242 unsafe {
243 if self.inner.try_write() {
244 Ok(RwLockWriteGuard::new(self)?)
245 } else {
246 Err(TryLockError::WouldBlock)
247 }
248 }
249 }
250
251 /// Determines whether the lock is poisoned.
252 ///
253 /// If another thread is active, the lock can still become poisoned at any
254 /// time. You should not trust a `false` value for program correctness
255 /// without additional synchronization.
256 #[inline]
257 #[stable(feature = "sync_poison", since = "1.2.0")]
258 pub fn is_poisoned(&self) -> bool {
259 self.poison.get()
260 }
261
262 /// Consumes this `RwLock`, returning the underlying data.
263 ///
264 /// # Errors
265 ///
266 /// This function will return an error if the RwLock is poisoned. An RwLock
267 /// is poisoned whenever a writer panics while holding an exclusive lock. An
268 /// error will only be returned if the lock would have otherwise been
269 /// acquired.
270 #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
271 pub fn into_inner(self) -> LockResult<T> where T: Sized {
272 // We know statically that there are no outstanding references to
273 // `self` so there's no need to lock the inner lock.
274 //
275 // To get the inner value, we'd like to call `data.into_inner()`,
276 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
277 // we'll have to destructure it manually instead.
278 unsafe {
279 // Like `let RwLock { inner, poison, data } = self`.
280 let (inner, poison, data) = {
281 let RwLock { ref inner, ref poison, ref data } = self;
282 (ptr::read(inner), ptr::read(poison), ptr::read(data))
283 };
284 mem::forget(self);
285 inner.destroy(); // Keep in sync with the `Drop` impl.
286 drop(inner);
287
288 poison::map_result(poison.borrow(), |_| data.into_inner())
289 }
290 }
291
292 /// Returns a mutable reference to the underlying data.
293 ///
294 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
295 /// take place---the mutable borrow statically guarantees no locks exist.
296 ///
297 /// # Errors
298 ///
299 /// This function will return an error if the RwLock is poisoned. An RwLock
300 /// is poisoned whenever a writer panics while holding an exclusive lock. An
301 /// error will only be returned if the lock would have otherwise been
302 /// acquired.
303 #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
304 pub fn get_mut(&mut self) -> LockResult<&mut T> {
305 // We know statically that there are no other references to `self`, so
306 // there's no need to lock the inner lock.
307 let data = unsafe { &mut *self.data.get() };
308 poison::map_result(self.poison.borrow(), |_| data)
309 }
310 }
311
312 #[stable(feature = "rust1", since = "1.0.0")]
313 impl<T: ?Sized> Drop for RwLock<T> {
314 #[unsafe_destructor_blind_to_params]
315 fn drop(&mut self) {
316 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
317 unsafe { self.inner.destroy() }
318 }
319 }
320
321 #[stable(feature = "rust1", since = "1.0.0")]
322 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
323 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
324 match self.try_read() {
325 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
326 Err(TryLockError::Poisoned(err)) => {
327 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
328 },
329 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
330 }
331 }
332 }
333
334 #[stable(feature = "rw_lock_default", since = "1.9.0")]
335 impl<T: Default> Default for RwLock<T> {
336 /// Creates a new `RwLock<T>`, with the `Default` value for T.
337 fn default() -> RwLock<T> {
338 RwLock::new(Default::default())
339 }
340 }
341
342 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
343 unsafe fn new(lock: &'rwlock RwLock<T>)
344 -> LockResult<RwLockReadGuard<'rwlock, T>> {
345 poison::map_result(lock.poison.borrow(), |_| {
346 RwLockReadGuard {
347 __lock: lock,
348 }
349 })
350 }
351 }
352
353 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
354 unsafe fn new(lock: &'rwlock RwLock<T>)
355 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
356 poison::map_result(lock.poison.borrow(), |guard| {
357 RwLockWriteGuard {
358 __lock: lock,
359 __poison: guard,
360 }
361 })
362 }
363 }
364
365 #[stable(feature = "rust1", since = "1.0.0")]
366 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
367 type Target = T;
368
369 fn deref(&self) -> &T {
370 unsafe { &*self.__lock.data.get() }
371 }
372 }
373
374 #[stable(feature = "rust1", since = "1.0.0")]
375 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
376 type Target = T;
377
378 fn deref(&self) -> &T {
379 unsafe { &*self.__lock.data.get() }
380 }
381 }
382
383 #[stable(feature = "rust1", since = "1.0.0")]
384 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
385 fn deref_mut(&mut self) -> &mut T {
386 unsafe { &mut *self.__lock.data.get() }
387 }
388 }
389
390 #[stable(feature = "rust1", since = "1.0.0")]
391 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
392 fn drop(&mut self) {
393 unsafe { self.__lock.inner.read_unlock(); }
394 }
395 }
396
397 #[stable(feature = "rust1", since = "1.0.0")]
398 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
399 fn drop(&mut self) {
400 self.__lock.poison.done(&self.__poison);
401 unsafe { self.__lock.inner.write_unlock(); }
402 }
403 }
404
405 #[cfg(all(test, not(target_os = "emscripten")))]
406 mod tests {
407 #![allow(deprecated)] // rand
408
409 use rand::{self, Rng};
410 use sync::mpsc::channel;
411 use thread;
412 use sync::{Arc, RwLock, TryLockError};
413 use sync::atomic::{AtomicUsize, Ordering};
414
415 #[derive(Eq, PartialEq, Debug)]
416 struct NonCopy(i32);
417
418 #[test]
419 fn smoke() {
420 let l = RwLock::new(());
421 drop(l.read().unwrap());
422 drop(l.write().unwrap());
423 drop((l.read().unwrap(), l.read().unwrap()));
424 drop(l.write().unwrap());
425 }
426
427 #[test]
428 fn frob() {
429 const N: usize = 10;
430 const M: usize = 1000;
431
432 let r = Arc::new(RwLock::new(()));
433
434 let (tx, rx) = channel::<()>();
435 for _ in 0..N {
436 let tx = tx.clone();
437 let r = r.clone();
438 thread::spawn(move || {
439 let mut rng = rand::thread_rng();
440 for _ in 0..M {
441 if rng.gen_weighted_bool(N) {
442 drop(r.write().unwrap());
443 } else {
444 drop(r.read().unwrap());
445 }
446 }
447 drop(tx);
448 });
449 }
450 drop(tx);
451 let _ = rx.recv();
452 }
453
454 #[test]
455 fn test_rw_arc_poison_wr() {
456 let arc = Arc::new(RwLock::new(1));
457 let arc2 = arc.clone();
458 let _: Result<(), _> = thread::spawn(move|| {
459 let _lock = arc2.write().unwrap();
460 panic!();
461 }).join();
462 assert!(arc.read().is_err());
463 }
464
465 #[test]
466 fn test_rw_arc_poison_ww() {
467 let arc = Arc::new(RwLock::new(1));
468 assert!(!arc.is_poisoned());
469 let arc2 = arc.clone();
470 let _: Result<(), _> = thread::spawn(move|| {
471 let _lock = arc2.write().unwrap();
472 panic!();
473 }).join();
474 assert!(arc.write().is_err());
475 assert!(arc.is_poisoned());
476 }
477
478 #[test]
479 fn test_rw_arc_no_poison_rr() {
480 let arc = Arc::new(RwLock::new(1));
481 let arc2 = arc.clone();
482 let _: Result<(), _> = thread::spawn(move|| {
483 let _lock = arc2.read().unwrap();
484 panic!();
485 }).join();
486 let lock = arc.read().unwrap();
487 assert_eq!(*lock, 1);
488 }
489 #[test]
490 fn test_rw_arc_no_poison_rw() {
491 let arc = Arc::new(RwLock::new(1));
492 let arc2 = arc.clone();
493 let _: Result<(), _> = thread::spawn(move|| {
494 let _lock = arc2.read().unwrap();
495 panic!()
496 }).join();
497 let lock = arc.write().unwrap();
498 assert_eq!(*lock, 1);
499 }
500
501 #[test]
502 fn test_rw_arc() {
503 let arc = Arc::new(RwLock::new(0));
504 let arc2 = arc.clone();
505 let (tx, rx) = channel();
506
507 thread::spawn(move|| {
508 let mut lock = arc2.write().unwrap();
509 for _ in 0..10 {
510 let tmp = *lock;
511 *lock = -1;
512 thread::yield_now();
513 *lock = tmp + 1;
514 }
515 tx.send(()).unwrap();
516 });
517
518 // Readers try to catch the writer in the act
519 let mut children = Vec::new();
520 for _ in 0..5 {
521 let arc3 = arc.clone();
522 children.push(thread::spawn(move|| {
523 let lock = arc3.read().unwrap();
524 assert!(*lock >= 0);
525 }));
526 }
527
528 // Wait for children to pass their asserts
529 for r in children {
530 assert!(r.join().is_ok());
531 }
532
533 // Wait for writer to finish
534 rx.recv().unwrap();
535 let lock = arc.read().unwrap();
536 assert_eq!(*lock, 10);
537 }
538
539 #[test]
540 fn test_rw_arc_access_in_unwind() {
541 let arc = Arc::new(RwLock::new(1));
542 let arc2 = arc.clone();
543 let _ = thread::spawn(move|| -> () {
544 struct Unwinder {
545 i: Arc<RwLock<isize>>,
546 }
547 impl Drop for Unwinder {
548 fn drop(&mut self) {
549 let mut lock = self.i.write().unwrap();
550 *lock += 1;
551 }
552 }
553 let _u = Unwinder { i: arc2 };
554 panic!();
555 }).join();
556 let lock = arc.read().unwrap();
557 assert_eq!(*lock, 2);
558 }
559
560 #[test]
561 fn test_rwlock_unsized() {
562 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
563 {
564 let b = &mut *rw.write().unwrap();
565 b[0] = 4;
566 b[2] = 5;
567 }
568 let comp: &[i32] = &[4, 2, 5];
569 assert_eq!(&*rw.read().unwrap(), comp);
570 }
571
572 #[test]
573 fn test_rwlock_try_write() {
574 let lock = RwLock::new(0isize);
575 let read_guard = lock.read().unwrap();
576
577 let write_result = lock.try_write();
578 match write_result {
579 Err(TryLockError::WouldBlock) => (),
580 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
581 Err(_) => assert!(false, "unexpected error"),
582 }
583
584 drop(read_guard);
585 }
586
587 #[test]
588 fn test_into_inner() {
589 let m = RwLock::new(NonCopy(10));
590 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
591 }
592
593 #[test]
594 fn test_into_inner_drop() {
595 struct Foo(Arc<AtomicUsize>);
596 impl Drop for Foo {
597 fn drop(&mut self) {
598 self.0.fetch_add(1, Ordering::SeqCst);
599 }
600 }
601 let num_drops = Arc::new(AtomicUsize::new(0));
602 let m = RwLock::new(Foo(num_drops.clone()));
603 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
604 {
605 let _inner = m.into_inner().unwrap();
606 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
607 }
608 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
609 }
610
611 #[test]
612 fn test_into_inner_poison() {
613 let m = Arc::new(RwLock::new(NonCopy(10)));
614 let m2 = m.clone();
615 let _ = thread::spawn(move || {
616 let _lock = m2.write().unwrap();
617 panic!("test panic in inner thread to poison RwLock");
618 }).join();
619
620 assert!(m.is_poisoned());
621 match Arc::try_unwrap(m).unwrap().into_inner() {
622 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
623 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
624 }
625 }
626
627 #[test]
628 fn test_get_mut() {
629 let mut m = RwLock::new(NonCopy(10));
630 *m.get_mut().unwrap() = NonCopy(20);
631 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
632 }
633
634 #[test]
635 fn test_get_mut_poison() {
636 let m = Arc::new(RwLock::new(NonCopy(10)));
637 let m2 = m.clone();
638 let _ = thread::spawn(move || {
639 let _lock = m2.write().unwrap();
640 panic!("test panic in inner thread to poison RwLock");
641 }).join();
642
643 assert!(m.is_poisoned());
644 match Arc::try_unwrap(m).unwrap().get_mut() {
645 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
646 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
647 }
648 }
649 }