]> git.proxmox.com Git - rustc.git/blame - src/libstd/sync/rwlock.rs
Imported Upstream version 1.0.0~0alpha
[rustc.git] / src / libstd / sync / rwlock.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11use prelude::v1::*;
12
13use cell::UnsafeCell;
14use marker;
15use ops::{Deref, DerefMut};
16use sync::poison::{self, LockResult, TryLockError, TryLockResult};
17use sys_common::rwlock as sys;
18
19/// A reader-writer lock
20///
21/// This type of lock allows a number of readers or at most one writer at any
22/// point in time. The write portion of this lock typically allows modification
23/// of the underlying data (exclusive access) and the read portion of this lock
24/// typically allows for read-only access (shared access).
25///
26/// The type parameter `T` represents the data that this lock protects. It is
27/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
28/// allow concurrent access through readers. The RAII guards returned from the
29/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
30/// to allow access to the contained of the lock.
31///
32/// # Poisoning
33///
34/// RwLocks, like Mutexes, will become poisoned on panics. Note, however, that
35/// an RwLock may only be poisoned if a panic occurs while it is locked
36/// exclusively (write mode). If a panic occurs in any reader, then the lock
37/// will not be poisoned.
38///
39/// # Examples
40///
41/// ```
42/// use std::sync::RwLock;
43///
44/// let lock = RwLock::new(5i);
45///
46/// // many reader locks can be held at once
47/// {
48/// let r1 = lock.read().unwrap();
49/// let r2 = lock.read().unwrap();
50/// assert_eq!(*r1, 5);
51/// assert_eq!(*r2, 5);
52/// } // read locks are dropped at this point
53///
54/// // only one write lock may be held, however
55/// {
56/// let mut w = lock.write().unwrap();
57/// *w += 1;
58/// assert_eq!(*w, 6);
59/// } // write lock is dropped here
60/// ```
61#[stable]
62pub struct RwLock<T> {
63 inner: Box<StaticRwLock>,
64 data: UnsafeCell<T>,
65}
66
67unsafe impl<T:'static+Send> Send for RwLock<T> {}
68unsafe impl<T> Sync for RwLock<T> {}
69
70/// Structure representing a statically allocated RwLock.
71///
72/// This structure is intended to be used inside of a `static` and will provide
73/// automatic global access as well as lazy initialization. The internal
74/// resources of this RwLock, however, must be manually deallocated.
75///
76/// # Example
77///
78/// ```
79/// use std::sync::{StaticRwLock, RW_LOCK_INIT};
80///
81/// static LOCK: StaticRwLock = RW_LOCK_INIT;
82///
83/// {
84/// let _g = LOCK.read().unwrap();
85/// // ... shared read access
86/// }
87/// {
88/// let _g = LOCK.write().unwrap();
89/// // ... exclusive write access
90/// }
91/// unsafe { LOCK.destroy() } // free all resources
92/// ```
93#[unstable = "may be merged with RwLock in the future"]
94pub struct StaticRwLock {
95 lock: sys::RWLock,
96 poison: poison::Flag,
97}
98
99unsafe impl Send for StaticRwLock {}
100unsafe impl Sync for StaticRwLock {}
101
102/// Constant initialization for a statically-initialized rwlock.
103#[unstable = "may be merged with RwLock in the future"]
104pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock {
105 lock: sys::RWLOCK_INIT,
106 poison: poison::FLAG_INIT,
107};
108
109/// RAII structure used to release the shared read access of a lock when
110/// dropped.
111#[must_use]
112#[stable]
113pub struct RwLockReadGuard<'a, T: 'a> {
114 __lock: &'a StaticRwLock,
115 __data: &'a UnsafeCell<T>,
116 __marker: marker::NoSend,
117}
118
119/// RAII structure used to release the exclusive write access of a lock when
120/// dropped.
121#[must_use]
122#[stable]
123pub struct RwLockWriteGuard<'a, T: 'a> {
124 __lock: &'a StaticRwLock,
125 __data: &'a UnsafeCell<T>,
126 __poison: poison::Guard,
127 __marker: marker::NoSend,
128}
129
130impl<T: Send + Sync> RwLock<T> {
131 /// Creates a new instance of an RwLock which is unlocked and read to go.
132 #[stable]
133 pub fn new(t: T) -> RwLock<T> {
134 RwLock { inner: box RW_LOCK_INIT, data: UnsafeCell::new(t) }
135 }
136
137 /// Locks this rwlock with shared read access, blocking the current thread
138 /// until it can be acquired.
139 ///
140 /// The calling thread will be blocked until there are no more writers which
141 /// hold the lock. There may be other readers currently inside the lock when
142 /// this method returns. This method does not provide any guarantees with
143 /// respect to the ordering of whether contentious readers or writers will
144 /// acquire the lock first.
145 ///
146 /// Returns an RAII guard which will release this thread's shared access
147 /// once it is dropped.
148 ///
149 /// # Failure
150 ///
151 /// This function will return an error if the RwLock is poisoned. An RwLock
152 /// is poisoned whenever a writer panics while holding an exclusive lock.
153 /// The failure will occur immediately after the lock has been acquired.
154 #[inline]
155 #[stable]
156 pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
157 unsafe { self.inner.lock.read() }
158 RwLockReadGuard::new(&*self.inner, &self.data)
159 }
160
161 /// Attempt to acquire this lock with shared read access.
162 ///
163 /// This function will never block and will return immediately if `read`
164 /// would otherwise succeed. Returns `Some` of an RAII guard which will
165 /// release the shared access of this thread when dropped, or `None` if the
166 /// access could not be granted. This method does not provide any
167 /// guarantees with respect to the ordering of whether contentious readers
168 /// or writers will acquire the lock first.
169 ///
170 /// # Failure
171 ///
172 /// This function will return an error if the RwLock is poisoned. An RwLock
173 /// is poisoned whenever a writer panics while holding an exclusive lock. An
174 /// error will only be returned if the lock would have otherwise been
175 /// acquired.
176 #[inline]
177 #[stable]
178 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
179 if unsafe { self.inner.lock.try_read() } {
180 Ok(try!(RwLockReadGuard::new(&*self.inner, &self.data)))
181 } else {
182 Err(TryLockError::WouldBlock)
183 }
184 }
185
186 /// Lock this rwlock with exclusive write access, blocking the current
187 /// thread until it can be acquired.
188 ///
189 /// This function will not return while other writers or other readers
190 /// currently have access to the lock.
191 ///
192 /// Returns an RAII guard which will drop the write access of this rwlock
193 /// when dropped.
194 ///
195 /// # Failure
196 ///
197 /// This function will return an error if the RwLock is poisoned. An RwLock
198 /// is poisoned whenever a writer panics while holding an exclusive lock.
199 /// An error will be returned when the lock is acquired.
200 #[inline]
201 #[stable]
202 pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
203 unsafe { self.inner.lock.write() }
204 RwLockWriteGuard::new(&*self.inner, &self.data)
205 }
206
207 /// Attempt to lock this rwlock with exclusive write access.
208 ///
209 /// This function does not ever block, and it will return `None` if a call
210 /// to `write` would otherwise block. If successful, an RAII guard is
211 /// returned.
212 ///
213 /// # Failure
214 ///
215 /// This function will return an error if the RwLock is poisoned. An RwLock
216 /// is poisoned whenever a writer panics while holding an exclusive lock. An
217 /// error will only be returned if the lock would have otherwise been
218 /// acquired.
219 #[inline]
220 #[stable]
221 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
222 if unsafe { self.inner.lock.try_read() } {
223 Ok(try!(RwLockWriteGuard::new(&*self.inner, &self.data)))
224 } else {
225 Err(TryLockError::WouldBlock)
226 }
227 }
228}
229
230#[unsafe_destructor]
231#[stable]
232impl<T> Drop for RwLock<T> {
233 fn drop(&mut self) {
234 unsafe { self.inner.lock.destroy() }
235 }
236}
237
238struct Dummy(UnsafeCell<()>);
239unsafe impl Sync for Dummy {}
240static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
241
242impl StaticRwLock {
243 /// Locks this rwlock with shared read access, blocking the current thread
244 /// until it can be acquired.
245 ///
246 /// See `RwLock::read`.
247 #[inline]
248 #[unstable = "may be merged with RwLock in the future"]
249 pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> {
250 unsafe { self.lock.read() }
251 RwLockReadGuard::new(self, &DUMMY.0)
252 }
253
254 /// Attempt to acquire this lock with shared read access.
255 ///
256 /// See `RwLock::try_read`.
257 #[inline]
258 #[unstable = "may be merged with RwLock in the future"]
259 pub fn try_read(&'static self)
260 -> TryLockResult<RwLockReadGuard<'static, ()>> {
261 if unsafe { self.lock.try_read() } {
262 Ok(try!(RwLockReadGuard::new(self, &DUMMY.0)))
263 } else {
264 Err(TryLockError::WouldBlock)
265 }
266 }
267
268 /// Lock this rwlock with exclusive write access, blocking the current
269 /// thread until it can be acquired.
270 ///
271 /// See `RwLock::write`.
272 #[inline]
273 #[unstable = "may be merged with RwLock in the future"]
274 pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> {
275 unsafe { self.lock.write() }
276 RwLockWriteGuard::new(self, &DUMMY.0)
277 }
278
279 /// Attempt to lock this rwlock with exclusive write access.
280 ///
281 /// See `RwLock::try_write`.
282 #[inline]
283 #[unstable = "may be merged with RwLock in the future"]
284 pub fn try_write(&'static self)
285 -> TryLockResult<RwLockWriteGuard<'static, ()>> {
286 if unsafe { self.lock.try_write() } {
287 Ok(try!(RwLockWriteGuard::new(self, &DUMMY.0)))
288 } else {
289 Err(TryLockError::WouldBlock)
290 }
291 }
292
293 /// Deallocate all resources associated with this static lock.
294 ///
295 /// This method is unsafe to call as there is no guarantee that there are no
296 /// active users of the lock, and this also doesn't prevent any future users
297 /// of this lock. This method is required to be called to not leak memory on
298 /// all platforms.
299 #[unstable = "may be merged with RwLock in the future"]
300 pub unsafe fn destroy(&'static self) {
301 self.lock.destroy()
302 }
303}
304
305impl<'rwlock, T> RwLockReadGuard<'rwlock, T> {
306 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
307 -> LockResult<RwLockReadGuard<'rwlock, T>> {
308 poison::map_result(lock.poison.borrow(), |_| {
309 RwLockReadGuard {
310 __lock: lock,
311 __data: data,
312 __marker: marker::NoSend,
313 }
314 })
315 }
316}
317impl<'rwlock, T> RwLockWriteGuard<'rwlock, T> {
318 fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>)
319 -> LockResult<RwLockWriteGuard<'rwlock, T>> {
320 poison::map_result(lock.poison.borrow(), |guard| {
321 RwLockWriteGuard {
322 __lock: lock,
323 __data: data,
324 __poison: guard,
325 __marker: marker::NoSend,
326 }
327 })
328 }
329}
330
331#[stable]
332impl<'rwlock, T> Deref for RwLockReadGuard<'rwlock, T> {
333 type Target = T;
334
335 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
336}
337#[stable]
338impl<'rwlock, T> Deref for RwLockWriteGuard<'rwlock, T> {
339 type Target = T;
340
341 fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
342}
343#[stable]
344impl<'rwlock, T> DerefMut for RwLockWriteGuard<'rwlock, T> {
345 fn deref_mut(&mut self) -> &mut T {
346 unsafe { &mut *self.__data.get() }
347 }
348}
349
350#[unsafe_destructor]
351#[stable]
352impl<'a, T> Drop for RwLockReadGuard<'a, T> {
353 fn drop(&mut self) {
354 unsafe { self.__lock.lock.read_unlock(); }
355 }
356}
357
358#[unsafe_destructor]
359#[stable]
360impl<'a, T> Drop for RwLockWriteGuard<'a, T> {
361 fn drop(&mut self) {
362 self.__lock.poison.done(&self.__poison);
363 unsafe { self.__lock.lock.write_unlock(); }
364 }
365}
366
367#[cfg(test)]
368mod tests {
369 use prelude::v1::*;
370
371 use rand::{self, Rng};
372 use sync::mpsc::channel;
373 use thread::Thread;
374 use sync::{Arc, RwLock, StaticRwLock, RW_LOCK_INIT};
375
376 #[test]
377 fn smoke() {
378 let l = RwLock::new(());
379 drop(l.read().unwrap());
380 drop(l.write().unwrap());
381 drop((l.read().unwrap(), l.read().unwrap()));
382 drop(l.write().unwrap());
383 }
384
385 #[test]
386 fn static_smoke() {
387 static R: StaticRwLock = RW_LOCK_INIT;
388 drop(R.read().unwrap());
389 drop(R.write().unwrap());
390 drop((R.read().unwrap(), R.read().unwrap()));
391 drop(R.write().unwrap());
392 unsafe { R.destroy(); }
393 }
394
395 #[test]
396 fn frob() {
397 static R: StaticRwLock = RW_LOCK_INIT;
398 static N: uint = 10;
399 static M: uint = 1000;
400
401 let (tx, rx) = channel::<()>();
402 for _ in range(0, N) {
403 let tx = tx.clone();
404 Thread::spawn(move|| {
405 let mut rng = rand::thread_rng();
406 for _ in range(0, M) {
407 if rng.gen_weighted_bool(N) {
408 drop(R.write().unwrap());
409 } else {
410 drop(R.read().unwrap());
411 }
412 }
413 drop(tx);
414 });
415 }
416 drop(tx);
417 let _ = rx.recv();
418 unsafe { R.destroy(); }
419 }
420
421 #[test]
422 fn test_rw_arc_poison_wr() {
423 let arc = Arc::new(RwLock::new(1i));
424 let arc2 = arc.clone();
425 let _: Result<uint, _> = Thread::scoped(move|| {
426 let _lock = arc2.write().unwrap();
427 panic!();
428 }).join();
429 assert!(arc.read().is_err());
430 }
431
432 #[test]
433 fn test_rw_arc_poison_ww() {
434 let arc = Arc::new(RwLock::new(1i));
435 let arc2 = arc.clone();
436 let _: Result<uint, _> = Thread::scoped(move|| {
437 let _lock = arc2.write().unwrap();
438 panic!();
439 }).join();
440 assert!(arc.write().is_err());
441 }
442
443 #[test]
444 fn test_rw_arc_no_poison_rr() {
445 let arc = Arc::new(RwLock::new(1i));
446 let arc2 = arc.clone();
447 let _: Result<uint, _> = Thread::scoped(move|| {
448 let _lock = arc2.read().unwrap();
449 panic!();
450 }).join();
451 let lock = arc.read().unwrap();
452 assert_eq!(*lock, 1);
453 }
454 #[test]
455 fn test_rw_arc_no_poison_rw() {
456 let arc = Arc::new(RwLock::new(1i));
457 let arc2 = arc.clone();
458 let _: Result<uint, _> = Thread::scoped(move|| {
459 let _lock = arc2.read().unwrap();
460 panic!()
461 }).join();
462 let lock = arc.write().unwrap();
463 assert_eq!(*lock, 1);
464 }
465
466 #[test]
467 fn test_rw_arc() {
468 let arc = Arc::new(RwLock::new(0i));
469 let arc2 = arc.clone();
470 let (tx, rx) = channel();
471
472 Thread::spawn(move|| {
473 let mut lock = arc2.write().unwrap();
474 for _ in range(0u, 10) {
475 let tmp = *lock;
476 *lock = -1;
477 Thread::yield_now();
478 *lock = tmp + 1;
479 }
480 tx.send(()).unwrap();
481 });
482
483 // Readers try to catch the writer in the act
484 let mut children = Vec::new();
485 for _ in range(0u, 5) {
486 let arc3 = arc.clone();
487 children.push(Thread::scoped(move|| {
488 let lock = arc3.read().unwrap();
489 assert!(*lock >= 0);
490 }));
491 }
492
493 // Wait for children to pass their asserts
494 for r in children.into_iter() {
495 assert!(r.join().is_ok());
496 }
497
498 // Wait for writer to finish
499 rx.recv().unwrap();
500 let lock = arc.read().unwrap();
501 assert_eq!(*lock, 10);
502 }
503
504 #[test]
505 fn test_rw_arc_access_in_unwind() {
506 let arc = Arc::new(RwLock::new(1i));
507 let arc2 = arc.clone();
508 let _ = Thread::scoped(move|| -> () {
509 struct Unwinder {
510 i: Arc<RwLock<int>>,
511 }
512 impl Drop for Unwinder {
513 fn drop(&mut self) {
514 let mut lock = self.i.write().unwrap();
515 *lock += 1;
516 }
517 }
518 let _u = Unwinder { i: arc2 };
519 panic!();
520 }).join();
521 let lock = arc.read().unwrap();
522 assert_eq!(*lock, 2);
523 }
524}