]> git.proxmox.com Git - rustc.git/blob - src/libstd/sys/unix/rwlock.rs
New upstream version 1.13.0+dfsg1
[rustc.git] / src / libstd / sys / unix / rwlock.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use libc;
12 use cell::UnsafeCell;
13 use sync::atomic::{AtomicUsize, Ordering};
14
15 pub struct RWLock {
16 inner: UnsafeCell<libc::pthread_rwlock_t>,
17 write_locked: UnsafeCell<bool>,
18 num_readers: AtomicUsize,
19 }
20
21 unsafe impl Send for RWLock {}
22 unsafe impl Sync for RWLock {}
23
24 impl RWLock {
25 pub const fn new() -> RWLock {
26 RWLock {
27 inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
28 write_locked: UnsafeCell::new(false),
29 num_readers: AtomicUsize::new(0),
30 }
31 }
32 #[inline]
33 pub unsafe fn read(&self) {
34 let r = libc::pthread_rwlock_rdlock(self.inner.get());
35
36 // According to the pthread_rwlock_rdlock spec, this function **may**
37 // fail with EDEADLK if a deadlock is detected. On the other hand
38 // pthread mutexes will *never* return EDEADLK if they are initialized
39 // as the "fast" kind (which ours always are). As a result, a deadlock
40 // situation may actually return from the call to pthread_rwlock_rdlock
41 // instead of blocking forever (as mutexes and Windows rwlocks do). Note
42 // that not all unix implementations, however, will return EDEADLK for
43 // their rwlocks.
44 //
45 // We roughly maintain the deadlocking behavior by panicking to ensure
46 // that this lock acquisition does not succeed.
47 //
48 // We also check whether this lock is already write locked. This
49 // is only possible if it was write locked by the current thread and
50 // the implementation allows recursive locking. The POSIX standard
51 // doesn't require recursively locking a rwlock to deadlock, but we can't
52 // allow that because it could lead to aliasing issues.
53 if r == libc::EAGAIN {
54 panic!("rwlock maximum reader count exceeded");
55 } else if r == libc::EDEADLK || *self.write_locked.get() {
56 if r == 0 {
57 self.raw_unlock();
58 }
59 panic!("rwlock read lock would result in deadlock");
60 } else {
61 debug_assert_eq!(r, 0);
62 self.num_readers.fetch_add(1, Ordering::Relaxed);
63 }
64 }
65 #[inline]
66 pub unsafe fn try_read(&self) -> bool {
67 let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
68 if r == 0 {
69 if *self.write_locked.get() {
70 self.raw_unlock();
71 false
72 } else {
73 self.num_readers.fetch_add(1, Ordering::Relaxed);
74 true
75 }
76 } else {
77 false
78 }
79 }
80 #[inline]
81 pub unsafe fn write(&self) {
82 let r = libc::pthread_rwlock_wrlock(self.inner.get());
83 // See comments above for why we check for EDEADLK and write_locked. We
84 // also need to check that num_readers is 0.
85 if r == libc::EDEADLK || *self.write_locked.get() ||
86 self.num_readers.load(Ordering::Relaxed) != 0 {
87 if r == 0 {
88 self.raw_unlock();
89 }
90 panic!("rwlock write lock would result in deadlock");
91 } else {
92 debug_assert_eq!(r, 0);
93 }
94 *self.write_locked.get() = true;
95 }
96 #[inline]
97 pub unsafe fn try_write(&self) -> bool {
98 let r = libc::pthread_rwlock_trywrlock(self.inner.get());
99 if r == 0 {
100 if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
101 self.raw_unlock();
102 false
103 } else {
104 *self.write_locked.get() = true;
105 true
106 }
107 } else {
108 false
109 }
110 }
111 #[inline]
112 unsafe fn raw_unlock(&self) {
113 let r = libc::pthread_rwlock_unlock(self.inner.get());
114 debug_assert_eq!(r, 0);
115 }
116 #[inline]
117 pub unsafe fn read_unlock(&self) {
118 debug_assert!(!*self.write_locked.get());
119 self.num_readers.fetch_sub(1, Ordering::Relaxed);
120 self.raw_unlock();
121 }
122 #[inline]
123 pub unsafe fn write_unlock(&self) {
124 debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
125 debug_assert!(*self.write_locked.get());
126 *self.write_locked.get() = false;
127 self.raw_unlock();
128 }
129 #[inline]
130 pub unsafe fn destroy(&self) {
131 let r = libc::pthread_rwlock_destroy(self.inner.get());
132 // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
133 // rwlock that was just initialized with
134 // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
135 // or pthread_rwlock_init() is called, this behaviour no longer occurs.
136 if cfg!(target_os = "dragonfly") {
137 debug_assert!(r == 0 || r == libc::EINVAL);
138 } else {
139 debug_assert_eq!(r, 0);
140 }
141 }
142 }