]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
d9579d0f | 11 | use libc; |
1a4d82fc | 12 | use cell::UnsafeCell; |
3157f602 | 13 | use sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 14 | |
3157f602 XL |
15 | pub struct RWLock { |
16 | inner: UnsafeCell<libc::pthread_rwlock_t>, | |
17 | write_locked: UnsafeCell<bool>, | |
18 | num_readers: AtomicUsize, | |
19 | } | |
1a4d82fc | 20 | |
c34b1796 AL |
21 | unsafe impl Send for RWLock {} |
22 | unsafe impl Sync for RWLock {} | |
23 | ||
1a4d82fc | 24 | impl RWLock { |
62682a34 | 25 | pub const fn new() -> RWLock { |
3157f602 XL |
26 | RWLock { |
27 | inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), | |
28 | write_locked: UnsafeCell::new(false), | |
29 | num_readers: AtomicUsize::new(0), | |
30 | } | |
62682a34 | 31 | } |
1a4d82fc JJ |
32 | #[inline] |
33 | pub unsafe fn read(&self) { | |
92a42be0 | 34 | let r = libc::pthread_rwlock_rdlock(self.inner.get()); |
d9579d0f AL |
35 | |
36 | // According to the pthread_rwlock_rdlock spec, this function **may** | |
37 | // fail with EDEADLK if a deadlock is detected. On the other hand | |
38 | // pthread mutexes will *never* return EDEADLK if they are initialized | |
39 | // as the "fast" kind (which ours always are). As a result, a deadlock | |
40 | // situation may actually return from the call to pthread_rwlock_rdlock | |
41 | // instead of blocking forever (as mutexes and Windows rwlocks do). Note | |
42 | // that not all unix implementations, however, will return EDEADLK for | |
43 | // their rwlocks. | |
44 | // | |
45 | // We roughly maintain the deadlocking behavior by panicking to ensure | |
46 | // that this lock acquisition does not succeed. | |
3157f602 | 47 | // |
9e0c209e | 48 | // We also check whether this lock is already write locked. This |
3157f602 XL |
49 | // is only possible if it was write locked by the current thread and |
50 | // the implementation allows recursive locking. The POSIX standard | |
9e0c209e | 51 | // doesn't require recursively locking a rwlock to deadlock, but we can't |
3157f602 | 52 | // allow that because it could lead to aliasing issues. |
5bcae85e SL |
53 | if r == libc::EAGAIN { |
54 | panic!("rwlock maximum reader count exceeded"); | |
55 | } else if r == libc::EDEADLK || *self.write_locked.get() { | |
3157f602 XL |
56 | if r == 0 { |
57 | self.raw_unlock(); | |
58 | } | |
d9579d0f AL |
59 | panic!("rwlock read lock would result in deadlock"); |
60 | } else { | |
61 | debug_assert_eq!(r, 0); | |
3157f602 | 62 | self.num_readers.fetch_add(1, Ordering::Relaxed); |
d9579d0f | 63 | } |
1a4d82fc JJ |
64 | } |
65 | #[inline] | |
66 | pub unsafe fn try_read(&self) -> bool { | |
3157f602 XL |
67 | let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); |
68 | if r == 0 { | |
69 | if *self.write_locked.get() { | |
70 | self.raw_unlock(); | |
71 | false | |
72 | } else { | |
73 | self.num_readers.fetch_add(1, Ordering::Relaxed); | |
74 | true | |
75 | } | |
76 | } else { | |
77 | false | |
78 | } | |
1a4d82fc JJ |
79 | } |
80 | #[inline] | |
81 | pub unsafe fn write(&self) { | |
92a42be0 | 82 | let r = libc::pthread_rwlock_wrlock(self.inner.get()); |
3157f602 XL |
83 | // See comments above for why we check for EDEADLK and write_locked. We |
84 | // also need to check that num_readers is 0. | |
85 | if r == libc::EDEADLK || *self.write_locked.get() || | |
86 | self.num_readers.load(Ordering::Relaxed) != 0 { | |
87 | if r == 0 { | |
88 | self.raw_unlock(); | |
89 | } | |
d9579d0f AL |
90 | panic!("rwlock write lock would result in deadlock"); |
91 | } else { | |
92 | debug_assert_eq!(r, 0); | |
93 | } | |
3157f602 | 94 | *self.write_locked.get() = true; |
1a4d82fc JJ |
95 | } |
96 | #[inline] | |
97 | pub unsafe fn try_write(&self) -> bool { | |
3157f602 XL |
98 | let r = libc::pthread_rwlock_trywrlock(self.inner.get()); |
99 | if r == 0 { | |
100 | if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { | |
101 | self.raw_unlock(); | |
102 | false | |
103 | } else { | |
104 | *self.write_locked.get() = true; | |
105 | true | |
106 | } | |
107 | } else { | |
108 | false | |
109 | } | |
1a4d82fc JJ |
110 | } |
111 | #[inline] | |
3157f602 | 112 | unsafe fn raw_unlock(&self) { |
92a42be0 | 113 | let r = libc::pthread_rwlock_unlock(self.inner.get()); |
1a4d82fc JJ |
114 | debug_assert_eq!(r, 0); |
115 | } | |
116 | #[inline] | |
3157f602 XL |
117 | pub unsafe fn read_unlock(&self) { |
118 | debug_assert!(!*self.write_locked.get()); | |
119 | self.num_readers.fetch_sub(1, Ordering::Relaxed); | |
120 | self.raw_unlock(); | |
121 | } | |
122 | #[inline] | |
123 | pub unsafe fn write_unlock(&self) { | |
124 | debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); | |
125 | debug_assert!(*self.write_locked.get()); | |
126 | *self.write_locked.get() = false; | |
127 | self.raw_unlock(); | |
128 | } | |
1a4d82fc | 129 | #[inline] |
85aaf69f | 130 | pub unsafe fn destroy(&self) { |
92a42be0 | 131 | let r = libc::pthread_rwlock_destroy(self.inner.get()); |
85aaf69f SL |
132 | // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a |
133 | // rwlock that was just initialized with | |
92a42be0 | 134 | // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) |
85aaf69f | 135 | // or pthread_rwlock_init() is called, this behaviour no longer occurs. |
d9579d0f AL |
136 | if cfg!(target_os = "dragonfly") { |
137 | debug_assert!(r == 0 || r == libc::EINVAL); | |
138 | } else { | |
139 | debug_assert_eq!(r, 0); | |
140 | } | |
85aaf69f | 141 | } |
1a4d82fc | 142 | } |