]> git.proxmox.com Git - rustc.git/blob - src/libstd/sync/rwlock.rs
4ca2e282f707dc1d7429ef32e5a254c1708d5950
[rustc.git] / src / libstd / sync / rwlock.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use prelude::v1::*;
12
13 use cell::UnsafeCell;
14 use fmt;
15 use marker;
16 use ops::{Deref, DerefMut};
17 use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
18 use sys_common::rwlock as sys;
19
20 /// A reader-writer lock
21 ///
22 /// This type of lock allows a number of readers or at most one writer at any
23 /// point in time. The write portion of this lock typically allows modification
24 /// of the underlying data (exclusive access) and the read portion of this lock
25 /// typically allows for read-only access (shared access).
26 ///
27 /// The priority policy of the lock is dependent on the underlying operating
28 /// system's implementation, and this type does not guarantee that any
29 /// particular policy will be used.
30 ///
31 /// The type parameter `T` represents the data that this lock protects. It is
32 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
33 /// allow concurrent access through readers. The RAII guards returned from the
34 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
35 /// to allow access to the contained of the lock.
36 ///
37 /// # Poisoning
38 ///
39 /// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
40 /// an RwLock may only be poisoned if a panic occurs while it is locked
41 /// exclusively (write mode). If a panic occurs in any reader, then the lock
42 /// will not be poisoned.
43 ///
44 /// # Examples
45 ///
46 /// ```
47 /// use std::sync::RwLock;
48 ///
49 /// let lock = RwLock::new(5);
50 ///
51 /// // many reader locks can be held at once
52 /// {
53 /// let r1 = lock.read().unwrap();
54 /// let r2 = lock.read().unwrap();
55 /// assert_eq!(*r1, 5);
56 /// assert_eq!(*r2, 5);
57 /// } // read locks are dropped at this point
58 ///
59 /// // only one write lock may be held, however
60 /// {
61 /// let mut w = lock.write().unwrap();
62 /// *w += 1;
63 /// assert_eq!(*w, 6);
64 /// } // write lock is dropped here
65 /// ```
66 #[stable(feature = "rust1", since = "1.0.0")]
67 pub struct RwLock<T: ?Sized> {
68 inner: Box<StaticRwLock>,
69 data: UnsafeCell<T>,
70 }
71
72 unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {}
73 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
74
75 /// Structure representing a statically allocated RwLock.
76 ///
77 /// This structure is intended to be used inside of a `static` and will provide
78 /// automatic global access as well as lazy initialization. The internal
79 /// resources of this RwLock, however, must be manually deallocated.
80 ///
81 /// # Examples
82 ///
83 /// ```
84 /// # #![feature(static_rwlock)]
85 /// use std::sync::{StaticRwLock, RW_LOCK_INIT};
86 ///
87 /// static LOCK: StaticRwLock = RW_LOCK_INIT;
88 ///
89 /// {
90 /// let _g = LOCK.read().unwrap();
91 /// // ... shared read access
92 /// }
93 /// {
94 /// let _g = LOCK.write().unwrap();
95 /// // ... exclusive write access
96 /// }
97 /// unsafe { LOCK.destroy() } // free all resources
98 /// ```
99 #[unstable(feature = "static_rwlock",
100 reason = "may be merged with RwLock in the future")]
101 pub struct StaticRwLock {
102 lock: sys::RWLock,
103 poison: poison::Flag,
104 }
105
106 /// Constant initialization for a statically-initialized rwlock.
107 #[unstable(feature = "static_rwlock",
108 reason = "may be merged with RwLock in the future")]
109 pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
110
111 /// RAII structure used to release the shared read access of a lock when
112 /// dropped.
113 #[must_use]
114 #[stable(feature = "rust1", since = "1.0.0")]
115 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
116 __lock: &'a StaticRwLock,
117 __data: &'a UnsafeCell<T>,
118 }
119
120 impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {}
121
122 /// RAII structure used to release the exclusive write access of a lock when
123 /// dropped.
124 #[must_use]
125 #[stable(feature = "rust1", since = "1.0.0")]
126 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
127 __lock: &'a StaticRwLock,
128 __data: &'a UnsafeCell<T>,
129 __poison: poison::Guard,
130 }
131
132 impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {}
133
134 impl<T> RwLock<T> {
135 /// Creates a new instance of an `RwLock<T>` which is unlocked.
136 ///
137 /// # Examples
138 ///
139 /// ```
140 /// use std::sync::RwLock;
141 ///
142 /// let lock = RwLock::new(5);
143 /// ```
144 #[stable(feature = "rust1", since = "1.0.0")]
145 pub fn new(t: T) -> RwLock<T> {
146 RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
147 }
148 }
149
150 impl<T: ?Sized> RwLock<T> {
151 /// Locks this rwlock with shared read access, blocking the current thread
152 /// until it can be acquired.
153 ///
154 /// The calling thread will be blocked until there are no more writers which
155 /// hold the lock. There may be other readers currently inside the lock when
156 /// this method returns. This method does not provide any guarantees with
157 /// respect to the ordering of whether contentious readers or writers will
158 /// acquire the lock first.
159 ///
160 /// Returns an RAII guard which will release this thread's shared access
161 /// once it is dropped.
162 ///
163 /// # Failure
164 ///
165 /// This function will return an error if the RwLock is poisoned. An RwLock
166 /// is poisoned whenever a writer panics while holding an exclusive lock.
167 /// The failure will occur immediately after the lock has been acquired.
168 #[inline]
169 #[stable(feature = "rust1", since = "1.0.0")]
170 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
171 unsafe { self.inner.lock.read() }
172 RwLockReadGuard::new(&*self.inner, &self.data)
173 }
174
175 /// Attempts to acquire this rwlock with shared read access.
176 ///
177 /// If the access could not be granted at this time, then `Err` is returned.
178 /// Otherwise, an RAII guard is returned which will release the shared access
179 /// when it is dropped.
180 ///
181 /// This function does not block.
182 ///
183 /// This function does not provide any guarantees with respect to the ordering
184 /// of whether contentious readers or writers will acquire the lock first.
185 ///
186 /// # Failure
187 ///
188 /// This function will return an error if the RwLock is poisoned. An RwLock
189 /// is poisoned whenever a writer panics while holding an exclusive lock. An
190 /// error will only be returned if the lock would have otherwise been
191 /// acquired.
192 #[inline]
193 #[stable(feature = "rust1", since = "1.0.0")]
194 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
195 if unsafe { self.inner.lock.try_read() } {
196 Ok(try!(RwLockReadGuard::new(&*self.inner, &self.data)))
197 } else {
198 Err(TryLockError::WouldBlock)
199 }
200 }
201
202 /// Locks this rwlock with exclusive write access, blocking the current
203 /// thread until it can be acquired.
204 ///
205 /// This function will not return while other writers or other readers
206 /// currently have access to the lock.
207 ///
208 /// Returns an RAII guard which will drop the write access of this rwlock
209 /// when dropped.
210 ///
211 /// # Failure
212 ///
213 /// This function will return an error if the RwLock is poisoned. An RwLock
214 /// is poisoned whenever a writer panics while holding an exclusive lock.
215 /// An error will be returned when the lock is acquired.
216 #[inline]
217 #[stable(feature = "rust1", since = "1.0.0")]
218 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
219 unsafe { self.inner.lock.write() }
220 RwLockWriteGuard::new(&*self.inner, &self.data)
221 }
222
223 /// Attempts to lock this rwlock with exclusive write access.
224 ///
225 /// If the lock could not be acquired at this time, then `Err` is returned.
226 /// Otherwise, an RAII guard is returned which will release the lock when
227 /// it is dropped.
228 ///
229 /// This function does not block.
230 ///
231 /// This function does not provide any guarantees with respect to the ordering
232 /// of whether contentious readers or writers will acquire the lock first.
233 ///
234 /// # Failure
235 ///
236 /// This function will return an error if the RwLock is poisoned. An RwLock
237 /// is poisoned whenever a writer panics while holding an exclusive lock. An
238 /// error will only be returned if the lock would have otherwise been
239 /// acquired.
240 #[inline]
241 #[stable(feature = "rust1", since = "1.0.0")]
242 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
243 if unsafe { self.inner.lock.try_write() } {
244 Ok(try!(RwLockWriteGuard::new(&*self.inner, &self.data)))
245 } else {
246 Err(TryLockError::WouldBlock)
247 }
248 }
249
250 /// Determines whether the lock is poisoned.
251 ///
252 /// If another thread is active, the lock can still become poisoned at any
253 /// time. You should not trust a `false` value for program correctness
254 /// without additional synchronization.
255 #[inline]
256 #[stable(feature = "sync_poison", since = "1.2.0")]
257 pub fn is_poisoned(&self) -> bool {
258 self.inner.poison.get()
259 }
260 }
261
262 #[stable(feature = "rust1", since = "1.0.0")]
263 impl<T: ?Sized> Drop for RwLock<T> {
264 fn drop(&mut self) {
265 unsafe { self.inner.lock.destroy() }
266 }
267 }
268
269 #[stable(feature = "rust1", since = "1.0.0")]
270 impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
271 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
272 match self.try_read() {
273 Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
274 Err(TryLockError::Poisoned(err)) => {
275 write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref())
276 },
277 Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}")
278 }
279 }
280 }
281
282 struct Dummy(UnsafeCell<()>);
283 unsafe impl Sync for Dummy {}
284 static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
285
286 #[unstable(feature = "static_rwlock",
287 reason = "may be merged with RwLock in the future")]
288 impl StaticRwLock {
289 /// Creates a new rwlock.
290 pub const fn new() -> StaticRwLock {
291 StaticRwLock {
292 lock: sys::RWLock::new(),
293 poison: poison::Flag::new(),
294 }
295 }
296
297 /// Locks this rwlock with shared read access, blocking the current thread
298 /// until it can be acquired.
299 ///
300 /// See `RwLock::read`.
301 #[inline]
302 pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> {
303 unsafe { self.lock.read() }
304 RwLockReadGuard::new(self, &DUMMY.0)
305 }
306
307 /// Attempts to acquire this lock with shared read access.
308 ///
309 /// See `RwLock::try_read`.
310 #[inline]
311 pub fn try_read(&'static self)
312 -> TryLockResult<RwLockReadGuard<'static, ()>> {
313 if unsafe { self.lock.try_read() } {
314 Ok(try!(RwLockReadGuard::new(self, &DUMMY.0)))
315 } else {
316 Err(TryLockError::WouldBlock)
317 }
318 }
319
320 /// Locks this rwlock with exclusive write access, blocking the current
321 /// thread until it can be acquired.
322 ///
323 /// See `RwLock::write`.
324 #[inline]
325 pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> {
326 unsafe { self.lock.write() }
327 RwLockWriteGuard::new(self, &DUMMY.0)
328 }
329
330 /// Attempts to lock this rwlock with exclusive write access.
331 ///
332 /// See `RwLock::try_write`.
333 #[inline]
334 pub fn try_write(&'static self)
335 -> TryLockResult<RwLockWriteGuard<'static, ()>> {
336 if unsafe { self.lock.try_write() } {
337 Ok(try!(RwLockWriteGuard::new(self, &DUMMY.0)))
338 } else {
339 Err(TryLockError::WouldBlock)
340 }
341 }
342
343 /// Deallocates all resources associated with this static lock.
344 ///
345 /// This method is unsafe to call as there is no guarantee that there are no
346 /// active users of the lock, and this also doesn't prevent any future users
347 /// of this lock. This method is required to be called to not leak memory on
348 /// all platforms.
349 pub unsafe fn destroy(&'static self) {
350 self.lock.destroy()
351 }
352 }
353
354 impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
355 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
356 -> LockResult<RwLockReadGuard<'rwlock, T>> {
357 poison::map_result(lock.poison.borrow(), |_| {
358 RwLockReadGuard {
359 __lock: lock,
360 __data: data,
361 }
362 })
363 }
364 }
365
366 impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
367 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
368 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
369 poison::map_result(lock.poison.borrow(), |guard| {
370 RwLockWriteGuard {
371 __lock: lock,
372 __data: data,
373 __poison: guard,
374 }
375 })
376 }
377 }
378
379 #[stable(feature = "rust1", since = "1.0.0")]
380 impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
381 type Target = T;
382
383 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
384 }
385
386 #[stable(feature = "rust1", since = "1.0.0")]
387 impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
388 type Target = T;
389
390 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
391 }
392
393 #[stable(feature = "rust1", since = "1.0.0")]
394 impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
395 fn deref_mut(&mut self) -> &mut T {
396 unsafe { &mut *self.__data.get() }
397 }
398 }
399
400 #[stable(feature = "rust1", since = "1.0.0")]
401 impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
402 fn drop(&mut self) {
403 unsafe { self.__lock.lock.read_unlock(); }
404 }
405 }
406
407 #[stable(feature = "rust1", since = "1.0.0")]
408 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
409 fn drop(&mut self) {
410 self.__lock.poison.done(&self.__poison);
411 unsafe { self.__lock.lock.write_unlock(); }
412 }
413 }
414
415 #[cfg(test)]
416 mod tests {
417 #![allow(deprecated)] // rand
418
419 use prelude::v1::*;
420
421 use rand::{self, Rng};
422 use sync::mpsc::channel;
423 use thread;
424 use sync::{Arc, RwLock, StaticRwLock, TryLockError};
425
426 #[test]
427 fn smoke() {
428 let l = RwLock::new(());
429 drop(l.read().unwrap());
430 drop(l.write().unwrap());
431 drop((l.read().unwrap(), l.read().unwrap()));
432 drop(l.write().unwrap());
433 }
434
435 #[test]
436 fn static_smoke() {
437 static R: StaticRwLock = StaticRwLock::new();
438 drop(R.read().unwrap());
439 drop(R.write().unwrap());
440 drop((R.read().unwrap(), R.read().unwrap()));
441 drop(R.write().unwrap());
442 unsafe { R.destroy(); }
443 }
444
445 #[test]
446 fn frob() {
447 static R: StaticRwLock = StaticRwLock::new();
448 const N: usize = 10;
449 const M: usize = 1000;
450
451 let (tx, rx) = channel::<()>();
452 for _ in 0..N {
453 let tx = tx.clone();
454 thread::spawn(move|| {
455 let mut rng = rand::thread_rng();
456 for _ in 0..M {
457 if rng.gen_weighted_bool(N) {
458 drop(R.write().unwrap());
459 } else {
460 drop(R.read().unwrap());
461 }
462 }
463 drop(tx);
464 });
465 }
466 drop(tx);
467 let _ = rx.recv();
468 unsafe { R.destroy(); }
469 }
470
471 #[test]
472 fn test_rw_arc_poison_wr() {
473 let arc = Arc::new(RwLock::new(1));
474 let arc2 = arc.clone();
475 let _: Result<(), _> = thread::spawn(move|| {
476 let _lock = arc2.write().unwrap();
477 panic!();
478 }).join();
479 assert!(arc.read().is_err());
480 }
481
482 #[test]
483 fn test_rw_arc_poison_ww() {
484 let arc = Arc::new(RwLock::new(1));
485 assert!(!arc.is_poisoned());
486 let arc2 = arc.clone();
487 let _: Result<(), _> = thread::spawn(move|| {
488 let _lock = arc2.write().unwrap();
489 panic!();
490 }).join();
491 assert!(arc.write().is_err());
492 assert!(arc.is_poisoned());
493 }
494
495 #[test]
496 fn test_rw_arc_no_poison_rr() {
497 let arc = Arc::new(RwLock::new(1));
498 let arc2 = arc.clone();
499 let _: Result<(), _> = thread::spawn(move|| {
500 let _lock = arc2.read().unwrap();
501 panic!();
502 }).join();
503 let lock = arc.read().unwrap();
504 assert_eq!(*lock, 1);
505 }
506 #[test]
507 fn test_rw_arc_no_poison_rw() {
508 let arc = Arc::new(RwLock::new(1));
509 let arc2 = arc.clone();
510 let _: Result<(), _> = thread::spawn(move|| {
511 let _lock = arc2.read().unwrap();
512 panic!()
513 }).join();
514 let lock = arc.write().unwrap();
515 assert_eq!(*lock, 1);
516 }
517
518 #[test]
519 fn test_rw_arc() {
520 let arc = Arc::new(RwLock::new(0));
521 let arc2 = arc.clone();
522 let (tx, rx) = channel();
523
524 thread::spawn(move|| {
525 let mut lock = arc2.write().unwrap();
526 for _ in 0..10 {
527 let tmp = *lock;
528 *lock = -1;
529 thread::yield_now();
530 *lock = tmp + 1;
531 }
532 tx.send(()).unwrap();
533 });
534
535 // Readers try to catch the writer in the act
536 let mut children = Vec::new();
537 for _ in 0..5 {
538 let arc3 = arc.clone();
539 children.push(thread::spawn(move|| {
540 let lock = arc3.read().unwrap();
541 assert!(*lock >= 0);
542 }));
543 }
544
545 // Wait for children to pass their asserts
546 for r in children {
547 assert!(r.join().is_ok());
548 }
549
550 // Wait for writer to finish
551 rx.recv().unwrap();
552 let lock = arc.read().unwrap();
553 assert_eq!(*lock, 10);
554 }
555
556 #[test]
557 fn test_rw_arc_access_in_unwind() {
558 let arc = Arc::new(RwLock::new(1));
559 let arc2 = arc.clone();
560 let _ = thread::spawn(move|| -> () {
561 struct Unwinder {
562 i: Arc<RwLock<isize>>,
563 }
564 impl Drop for Unwinder {
565 fn drop(&mut self) {
566 let mut lock = self.i.write().unwrap();
567 *lock += 1;
568 }
569 }
570 let _u = Unwinder { i: arc2 };
571 panic!();
572 }).join();
573 let lock = arc.read().unwrap();
574 assert_eq!(*lock, 2);
575 }
576
577 // FIXME(#25351) needs deeply nested coercions of DST structs.
578 // #[test]
579 // fn test_rwlock_unsized() {
580 // let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
581 // {
582 // let b = &mut *rw.write().unwrap();
583 // b[0] = 4;
584 // b[2] = 5;
585 // }
586 // let comp: &[i32] = &[4, 2, 5];
587 // assert_eq!(&*rw.read().unwrap(), comp);
588 // }
589
590 #[test]
591 fn test_rwlock_try_write() {
592 use mem::drop;
593
594 let lock = RwLock::new(0isize);
595 let read_guard = lock.read().unwrap();
596
597 let write_result = lock.try_write();
598 match write_result {
599 Err(TryLockError::WouldBlock) => (),
600 Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
601 Err(_) => assert!(false, "unexpected error"),
602 }
603
604 drop(read_guard);
605 }
606 }