]> git.proxmox.com Git - rustc.git/blob - src/libstd/sync/rwlock.rs
Imported Upstream version 1.5.0+dfsg1
[rustc.git] / src / libstd / sync / rwlock.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use prelude::v1::*;
12
13 use cell::UnsafeCell;
14 use fmt;
15 use marker;
16 use mem;
17 use ops::{Deref, DerefMut};
18 use ptr;
19 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
20 use sys_common::rwlock as sys;
21
22 /// A reader-writer lock
23 ///
24 /// This type of lock allows a number of readers or at most one writer at any
25 /// point in time. The write portion of this lock typically allows modification
26 /// of the underlying data (exclusive access) and the read portion of this lock
27 /// typically allows for read-only access (shared access).
28 ///
29 /// The priority policy of the lock is dependent on the underlying operating
30 /// system's implementation, and this type does not guarantee that any
31 /// particular policy will be used.
32 ///
33 /// The type parameter `T` represents the data that this lock protects. It is
34 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
35 /// allow concurrent access through readers. The RAII guards returned from the
36 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
37 /// to allow access to the contained of the lock.
38 ///
39 /// # Poisoning
40 ///
41 /// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
42 /// an RwLock may only be poisoned if a panic occurs while it is locked
43 /// exclusively (write mode). If a panic occurs in any reader, then the lock
44 /// will not be poisoned.
45 ///
46 /// # Examples
47 ///
48 /// ```
49 /// use std::sync::RwLock;
50 ///
51 /// let lock = RwLock::new(5);
52 ///
53 /// // many reader locks can be held at once
54 /// {
55 /// let r1 = lock.read().unwrap();
56 /// let r2 = lock.read().unwrap();
57 /// assert_eq!(*r1, 5);
58 /// assert_eq!(*r2, 5);
59 /// } // read locks are dropped at this point
60 ///
61 /// // only one write lock may be held, however
62 /// {
63 /// let mut w = lock.write().unwrap();
64 /// *w += 1;
65 /// assert_eq!(*w, 6);
66 /// } // write lock is dropped here
67 /// ```
68 #[stable(feature = "rust1", since = "1.0.0")]
69 pub struct RwLock<T: ?Sized> {
70 inner: Box<StaticRwLock>,
71 data: UnsafeCell<T>,
72 }
73
74 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
75 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
76
77 /// Structure representing a statically allocated RwLock.
78 ///
79 /// This structure is intended to be used inside of a `static` and will provide
80 /// automatic global access as well as lazy initialization. The internal
81 /// resources of this RwLock, however, must be manually deallocated.
82 ///
83 /// # Examples
84 ///
85 /// ```
86 /// #![feature(static_rwlock)]
87 ///
88 /// use std::sync::{StaticRwLock, RW_LOCK_INIT};
89 ///
90 /// static LOCK: StaticRwLock = RW_LOCK_INIT;
91 ///
92 /// {
93 /// let _g = LOCK.read().unwrap();
94 /// // ... shared read access
95 /// }
96 /// {
97 /// let _g = LOCK.write().unwrap();
98 /// // ... exclusive write access
99 /// }
100 /// unsafe { LOCK.destroy() } // free all resources
101 /// ```
102 #[unstable(feature = "static_rwlock",
103 reason = "may be merged with RwLock in the future",
104 issue = "27717")]
105 pub struct StaticRwLock {
106 lock: sys::RWLock,
107 poison: poison::Flag,
108 }
109
110 /// Constant initialization for a statically-initialized rwlock.
111 #[unstable(feature = "static_rwlock",
112 reason = "may be merged with RwLock in the future",
113 issue = "27717")]
114 pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
115
116 /// RAII structure used to release the shared read access of a lock when
117 /// dropped.
118 #[must_use]
119 #[stable(feature = "rust1", since = "1.0.0")]
120 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
121 __lock: &'a StaticRwLock,
122 __data: &'a UnsafeCell<T>,
123 }
124
125 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
126
127 /// RAII structure used to release the exclusive write access of a lock when
128 /// dropped.
129 #[must_use]
130 #[stable(feature = "rust1", since = "1.0.0")]
131 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
132 __lock: &'a StaticRwLock,
133 __data: &'a UnsafeCell<T>,
134 __poison: poison::Guard,
135 }
136
137 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
138
139 impl<T> RwLock<T> {
140 /// Creates a new instance of an `RwLock<T>` which is unlocked.
141 ///
142 /// # Examples
143 ///
144 /// ```
145 /// use std::sync::RwLock;
146 ///
147 /// let lock = RwLock::new(5);
148 /// ```
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub fn new(t: T) -> RwLock<T> {
151 RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
152 }
153 }
154
155 impl<T: ?Sized> RwLock<T> {
156 /// Locks this rwlock with shared read access, blocking the current thread
157 /// until it can be acquired.
158 ///
159 /// The calling thread will be blocked until there are no more writers which
160 /// hold the lock. There may be other readers currently inside the lock when
161 /// this method returns. This method does not provide any guarantees with
162 /// respect to the ordering of whether contentious readers or writers will
163 /// acquire the lock first.
164 ///
165 /// Returns an RAII guard which will release this thread's shared access
166 /// once it is dropped.
167 ///
168 /// # Failure
169 ///
170 /// This function will return an error if the RwLock is poisoned. An RwLock
171 /// is poisoned whenever a writer panics while holding an exclusive lock.
172 /// The failure will occur immediately after the lock has been acquired.
173 #[inline]
174 #[stable(feature = "rust1", since = "1.0.0")]
175 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
176 unsafe { self.inner.lock.read() }
177 RwLockReadGuard::new(&*self.inner, &self.data)
178 }
179
180 /// Attempts to acquire this rwlock with shared read access.
181 ///
182 /// If the access could not be granted at this time, then `Err` is returned.
183 /// Otherwise, an RAII guard is returned which will release the shared access
184 /// when it is dropped.
185 ///
186 /// This function does not block.
187 ///
188 /// This function does not provide any guarantees with respect to the ordering
189 /// of whether contentious readers or writers will acquire the lock first.
190 ///
191 /// # Failure
192 ///
193 /// This function will return an error if the RwLock is poisoned. An RwLock
194 /// is poisoned whenever a writer panics while holding an exclusive lock. An
195 /// error will only be returned if the lock would have otherwise been
196 /// acquired.
197 #[inline]
198 #[stable(feature = "rust1", since = "1.0.0")]
199 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
200 if unsafe { self.inner.lock.try_read() } {
201 Ok(try!(RwLockReadGuard::new(&*self.inner, &self.data)))
202 } else {
203 Err(TryLockError::WouldBlock)
204 }
205 }
206
207 /// Locks this rwlock with exclusive write access, blocking the current
208 /// thread until it can be acquired.
209 ///
210 /// This function will not return while other writers or other readers
211 /// currently have access to the lock.
212 ///
213 /// Returns an RAII guard which will drop the write access of this rwlock
214 /// when dropped.
215 ///
216 /// # Failure
217 ///
218 /// This function will return an error if the RwLock is poisoned. An RwLock
219 /// is poisoned whenever a writer panics while holding an exclusive lock.
220 /// An error will be returned when the lock is acquired.
221 #[inline]
222 #[stable(feature = "rust1", since = "1.0.0")]
223 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
224 unsafe { self.inner.lock.write() }
225 RwLockWriteGuard::new(&*self.inner, &self.data)
226 }
227
228 /// Attempts to lock this rwlock with exclusive write access.
229 ///
230 /// If the lock could not be acquired at this time, then `Err` is returned.
231 /// Otherwise, an RAII guard is returned which will release the lock when
232 /// it is dropped.
233 ///
234 /// This function does not block.
235 ///
236 /// This function does not provide any guarantees with respect to the ordering
237 /// of whether contentious readers or writers will acquire the lock first.
238 ///
239 /// # Failure
240 ///
241 /// This function will return an error if the RwLock is poisoned. An RwLock
242 /// is poisoned whenever a writer panics while holding an exclusive lock. An
243 /// error will only be returned if the lock would have otherwise been
244 /// acquired.
245 #[inline]
246 #[stable(feature = "rust1", since = "1.0.0")]
247 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
248 if unsafe { self.inner.lock.try_write() } {
249 Ok(try!(RwLockWriteGuard::new(&*self.inner, &self.data)))
250 } else {
251 Err(TryLockError::WouldBlock)
252 }
253 }
254
255 /// Determines whether the lock is poisoned.
256 ///
257 /// If another thread is active, the lock can still become poisoned at any
258 /// time. You should not trust a `false` value for program correctness
259 /// without additional synchronization.
260 #[inline]
261 #[stable(feature = "sync_poison", since = "1.2.0")]
262 pub fn is_poisoned(&self) -> bool {
263 self.inner.poison.get()
264 }
265
266 /// Consumes this `RwLock`, returning the underlying data.
267 ///
268 /// # Failure
269 ///
270 /// This function will return an error if the RwLock is poisoned. An RwLock
271 /// is poisoned whenever a writer panics while holding an exclusive lock. An
272 /// error will only be returned if the lock would have otherwise been
273 /// acquired.
274 #[unstable(feature = "rwlock_into_inner", reason = "recently added", issue = "28968")]
275 pub fn into_inner(self) -> LockResult<T> where T: Sized {
276 // We know statically that there are no outstanding references to
277 // `self` so there's no need to lock the inner StaticRwLock.
278 //
279 // To get the inner value, we'd like to call `data.into_inner()`,
280 // but because `RwLock` impl-s `Drop`, we can't move out of it, so
281 // we'll have to destructure it manually instead.
282 unsafe {
283 // Like `let RwLock { inner, data } = self`.
284 let (inner, data) = {
285 let RwLock { ref inner, ref data } = self;
286 (ptr::read(inner), ptr::read(data))
287 };
288 mem::forget(self);
289 inner.lock.destroy(); // Keep in sync with the `Drop` impl.
290
291 poison::map_result(inner.poison.borrow(), |_| data.into_inner())
292 }
293 }
294
295 /// Returns a mutable reference to the underlying data.
296 ///
297 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
298 /// take place---the mutable borrow statically guarantees no locks exist.
299 ///
300 /// # Failure
301 ///
302 /// This function will return an error if the RwLock is poisoned. An RwLock
303 /// is poisoned whenever a writer panics while holding an exclusive lock. An
304 /// error will only be returned if the lock would have otherwise been
305 /// acquired.
306 #[unstable(feature = "rwlock_get_mut", reason = "recently added", issue = "28968")]
307 pub fn get_mut(&mut self) -> LockResult<&mut T> {
308 // We know statically that there are no other references to `self`, so
309 // there's no need to lock the inner StaticRwLock.
310 let data = unsafe { &mut *self.data.get() };
311 poison::map_result(self.inner.poison.borrow(), |_| data )
312 }
313 }
314
315 #[stable(feature = "rust1", since = "1.0.0")]
316 impl<T: ?Sized> Drop for RwLock<T> {
317 #[unsafe_destructor_blind_to_params]
318 fn drop(&mut self) {
319 // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
320 unsafe { self.inner.lock.destroy() }
321 }
322 }
323
324 #[stable(feature = "rust1", since = "1.0.0")]
325 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
326 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
327 match self.try_read() {
328 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
329 Err(TryLockError::Poisoned(err)) => {
330 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
331 },
332 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
333 }
334 }
335 }
336
337 struct Dummy(UnsafeCell<()>);
338 unsafe impl Sync for Dummy {}
339 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
340
341 #[unstable(feature = "static_rwlock",
342 reason = "may be merged with RwLock in the future",
343 issue = "27717")]
344 impl StaticRwLock {
345 /// Creates a new rwlock.
346 pub const fn new() -> StaticRwLock {
347 StaticRwLock {
348 lock: sys::RWLock::new(),
349 poison: poison::Flag::new(),
350 }
351 }
352
353 /// Locks this rwlock with shared read access, blocking the current thread
354 /// until it can be acquired.
355 ///
356 /// See `RwLock::read`.
357 #[inline]
358 pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> {
359 unsafe { self.lock.read() }
360 RwLockReadGuard::new(self, &DUMMY.0)
361 }
362
363 /// Attempts to acquire this lock with shared read access.
364 ///
365 /// See `RwLock::try_read`.
366 #[inline]
367 pub fn try_read(&'static self)
368 -> TryLockResult<RwLockReadGuard<'static, ()>> {
369 if unsafe { self.lock.try_read() } {
370 Ok(try!(RwLockReadGuard::new(self, &DUMMY.0)))
371 } else {
372 Err(TryLockError::WouldBlock)
373 }
374 }
375
376 /// Locks this rwlock with exclusive write access, blocking the current
377 /// thread until it can be acquired.
378 ///
379 /// See `RwLock::write`.
380 #[inline]
381 pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> {
382 unsafe { self.lock.write() }
383 RwLockWriteGuard::new(self, &DUMMY.0)
384 }
385
386 /// Attempts to lock this rwlock with exclusive write access.
387 ///
388 /// See `RwLock::try_write`.
389 #[inline]
390 pub fn try_write(&'static self)
391 -> TryLockResult<RwLockWriteGuard<'static, ()>> {
392 if unsafe { self.lock.try_write() } {
393 Ok(try!(RwLockWriteGuard::new(self, &DUMMY.0)))
394 } else {
395 Err(TryLockError::WouldBlock)
396 }
397 }
398
399 /// Deallocates all resources associated with this static lock.
400 ///
401 /// This method is unsafe to call as there is no guarantee that there are no
402 /// active users of the lock, and this also doesn't prevent any future users
403 /// of this lock. This method is required to be called to not leak memory on
404 /// all platforms.
405 pub unsafe fn destroy(&'static self) {
406 self.lock.destroy()
407 }
408 }
409
410 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
411 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
412 -> LockResult<RwLockReadGuard<'rwlock, T>> {
413 poison::map_result(lock.poison.borrow(), |_| {
414 RwLockReadGuard {
415 __lock: lock,
416 __data: data,
417 }
418 })
419 }
420 }
421
422 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
423 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
424 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
425 poison::map_result(lock.poison.borrow(), |guard| {
426 RwLockWriteGuard {
427 __lock: lock,
428 __data: data,
429 __poison: guard,
430 }
431 })
432 }
433 }
434
435 #[stable(feature = "rust1", since = "1.0.0")]
436 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
437 type Target = T;
438
439 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
440 }
441
442 #[stable(feature = "rust1", since = "1.0.0")]
443 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
444 type Target = T;
445
446 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
447 }
448
449 #[stable(feature = "rust1", since = "1.0.0")]
450 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
451 fn deref_mut(&mut self) -> &mut T {
452 unsafe { &mut *self.__data.get() }
453 }
454 }
455
456 #[stable(feature = "rust1", since = "1.0.0")]
457 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
458 fn drop(&mut self) {
459 unsafe { self.__lock.lock.read_unlock(); }
460 }
461 }
462
463 #[stable(feature = "rust1", since = "1.0.0")]
464 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
465 fn drop(&mut self) {
466 self.__lock.poison.done(&self.__poison);
467 unsafe { self.__lock.lock.write_unlock(); }
468 }
469 }
470
471 #[cfg(test)]
472 mod tests {
473 #![allow(deprecated)] // rand
474
475 use prelude::v1::*;
476
477 use rand::{self, Rng};
478 use sync::mpsc::channel;
479 use thread;
480 use sync::{Arc, RwLock, StaticRwLock, TryLockError};
481 use sync::atomic::{AtomicUsize, Ordering};
482
483 #[derive(Eq, PartialEq, Debug)]
484 struct NonCopy(i32);
485
486 #[test]
487 fn smoke() {
488 let l = RwLock::new(());
489 drop(l.read().unwrap());
490 drop(l.write().unwrap());
491 drop((l.read().unwrap(), l.read().unwrap()));
492 drop(l.write().unwrap());
493 }
494
495 #[test]
496 fn static_smoke() {
497 static R: StaticRwLock = StaticRwLock::new();
498 drop(R.read().unwrap());
499 drop(R.write().unwrap());
500 drop((R.read().unwrap(), R.read().unwrap()));
501 drop(R.write().unwrap());
502 unsafe { R.destroy(); }
503 }
504
505 #[test]
506 fn frob() {
507 static R: StaticRwLock = StaticRwLock::new();
508 const N: usize = 10;
509 const M: usize = 1000;
510
511 let (tx, rx) = channel::<()>();
512 for _ in 0..N {
513 let tx = tx.clone();
514 thread::spawn(move|| {
515 let mut rng = rand::thread_rng();
516 for _ in 0..M {
517 if rng.gen_weighted_bool(N) {
518 drop(R.write().unwrap());
519 } else {
520 drop(R.read().unwrap());
521 }
522 }
523 drop(tx);
524 });
525 }
526 drop(tx);
527 let _ = rx.recv();
528 unsafe { R.destroy(); }
529 }
530
531 #[test]
532 fn test_rw_arc_poison_wr() {
533 let arc = Arc::new(RwLock::new(1));
534 let arc2 = arc.clone();
535 let _: Result<(), _> = thread::spawn(move|| {
536 let _lock = arc2.write().unwrap();
537 panic!();
538 }).join();
539 assert!(arc.read().is_err());
540 }
541
542 #[test]
543 fn test_rw_arc_poison_ww() {
544 let arc = Arc::new(RwLock::new(1));
545 assert!(!arc.is_poisoned());
546 let arc2 = arc.clone();
547 let _: Result<(), _> = thread::spawn(move|| {
548 let _lock = arc2.write().unwrap();
549 panic!();
550 }).join();
551 assert!(arc.write().is_err());
552 assert!(arc.is_poisoned());
553 }
554
555 #[test]
556 fn test_rw_arc_no_poison_rr() {
557 let arc = Arc::new(RwLock::new(1));
558 let arc2 = arc.clone();
559 let _: Result<(), _> = thread::spawn(move|| {
560 let _lock = arc2.read().unwrap();
561 panic!();
562 }).join();
563 let lock = arc.read().unwrap();
564 assert_eq!(*lock, 1);
565 }
566 #[test]
567 fn test_rw_arc_no_poison_rw() {
568 let arc = Arc::new(RwLock::new(1));
569 let arc2 = arc.clone();
570 let _: Result<(), _> = thread::spawn(move|| {
571 let _lock = arc2.read().unwrap();
572 panic!()
573 }).join();
574 let lock = arc.write().unwrap();
575 assert_eq!(*lock, 1);
576 }
577
578 #[test]
579 fn test_rw_arc() {
580 let arc = Arc::new(RwLock::new(0));
581 let arc2 = arc.clone();
582 let (tx, rx) = channel();
583
584 thread::spawn(move|| {
585 let mut lock = arc2.write().unwrap();
586 for _ in 0..10 {
587 let tmp = *lock;
588 *lock = -1;
589 thread::yield_now();
590 *lock = tmp + 1;
591 }
592 tx.send(()).unwrap();
593 });
594
595 // Readers try to catch the writer in the act
596 let mut children = Vec::new();
597 for _ in 0..5 {
598 let arc3 = arc.clone();
599 children.push(thread::spawn(move|| {
600 let lock = arc3.read().unwrap();
601 assert!(*lock >= 0);
602 }));
603 }
604
605 // Wait for children to pass their asserts
606 for r in children {
607 assert!(r.join().is_ok());
608 }
609
610 // Wait for writer to finish
611 rx.recv().unwrap();
612 let lock = arc.read().unwrap();
613 assert_eq!(*lock, 10);
614 }
615
616 #[test]
617 fn test_rw_arc_access_in_unwind() {
618 let arc = Arc::new(RwLock::new(1));
619 let arc2 = arc.clone();
620 let _ = thread::spawn(move|| -> () {
621 struct Unwinder {
622 i: Arc<RwLock<isize>>,
623 }
624 impl Drop for Unwinder {
625 fn drop(&mut self) {
626 let mut lock = self.i.write().unwrap();
627 *lock += 1;
628 }
629 }
630 let _u = Unwinder { i: arc2 };
631 panic!();
632 }).join();
633 let lock = arc.read().unwrap();
634 assert_eq!(*lock, 2);
635 }
636
637 #[test]
638 fn test_rwlock_unsized() {
639 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
640 {
641 let b = &mut *rw.write().unwrap();
642 b[0] = 4;
643 b[2] = 5;
644 }
645 let comp: &[i32] = &[4, 2, 5];
646 assert_eq!(&*rw.read().unwrap(), comp);
647 }
648
649 #[test]
650 fn test_rwlock_try_write() {
651 use mem::drop;
652
653 let lock = RwLock::new(0isize);
654 let read_guard = lock.read().unwrap();
655
656 let write_result = lock.try_write();
657 match write_result {
658 Err(TryLockError::WouldBlock) => (),
659 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
660 Err(_) => assert!(false, "unexpected error"),
661 }
662
663 drop(read_guard);
664 }
665
666 #[test]
667 fn test_into_inner() {
668 let m = RwLock::new(NonCopy(10));
669 assert_eq!(m.into_inner().unwrap(), NonCopy(10));
670 }
671
672 #[test]
673 fn test_into_inner_drop() {
674 struct Foo(Arc<AtomicUsize>);
675 impl Drop for Foo {
676 fn drop(&mut self) {
677 self.0.fetch_add(1, Ordering::SeqCst);
678 }
679 }
680 let num_drops = Arc::new(AtomicUsize::new(0));
681 let m = RwLock::new(Foo(num_drops.clone()));
682 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
683 {
684 let _inner = m.into_inner().unwrap();
685 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
686 }
687 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
688 }
689
690 #[test]
691 fn test_into_inner_poison() {
692 let m = Arc::new(RwLock::new(NonCopy(10)));
693 let m2 = m.clone();
694 let _ = thread::spawn(move || {
695 let _lock = m2.write().unwrap();
696 panic!("test panic in inner thread to poison RwLock");
697 }).join();
698
699 assert!(m.is_poisoned());
700 match Arc::try_unwrap(m).unwrap().into_inner() {
701 Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
702 Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
703 }
704 }
705
706 #[test]
707 fn test_get_mut() {
708 let mut m = RwLock::new(NonCopy(10));
709 *m.get_mut().unwrap() = NonCopy(20);
710 assert_eq!(m.into_inner().unwrap(), NonCopy(20));
711 }
712
713 #[test]
714 fn test_get_mut_poison() {
715 let m = Arc::new(RwLock::new(NonCopy(10)));
716 let m2 = m.clone();
717 let _ = thread::spawn(move || {
718 let _lock = m2.write().unwrap();
719 panic!("test panic in inner thread to poison RwLock");
720 }).join();
721
722 assert!(m.is_poisoned());
723 match Arc::try_unwrap(m).unwrap().get_mut() {
724 Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
725 Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
726 }
727 }
728 }