1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 use sync
::atomic
::{AtomicUsize, Ordering}
;
16 inner
: UnsafeCell
<libc
::pthread_rwlock_t
>,
17 write_locked
: UnsafeCell
<bool
>,
18 num_readers
: AtomicUsize
,
21 unsafe impl Send
for RWLock {}
22 unsafe impl Sync
for RWLock {}
25 pub const fn new() -> RWLock
{
27 inner
: UnsafeCell
::new(libc
::PTHREAD_RWLOCK_INITIALIZER
),
28 write_locked
: UnsafeCell
::new(false),
29 num_readers
: AtomicUsize
::new(0),
33 pub unsafe fn read(&self) {
34 let r
= libc
::pthread_rwlock_rdlock(self.inner
.get());
36 // According to the pthread_rwlock_rdlock spec, this function **may**
37 // fail with EDEADLK if a deadlock is detected. On the other hand
38 // pthread mutexes will *never* return EDEADLK if they are initialized
39 // as the "fast" kind (which ours always are). As a result, a deadlock
40 // situation may actually return from the call to pthread_rwlock_rdlock
41 // instead of blocking forever (as mutexes and Windows rwlocks do). Note
42 // that not all unix implementations, however, will return EDEADLK for
45 // We roughly maintain the deadlocking behavior by panicking to ensure
46 // that this lock acquisition does not succeed.
48 // We also check whether this lock is already write locked. This
49 // is only possible if it was write locked by the current thread and
50 // the implementation allows recursive locking. The POSIX standard
51 // doesn't require recursively locking a rwlock to deadlock, but we can't
52 // allow that because it could lead to aliasing issues.
53 if r
== libc
::EAGAIN
{
54 panic
!("rwlock maximum reader count exceeded");
55 } else if r
== libc
::EDEADLK
|| *self.write_locked
.get() {
59 panic
!("rwlock read lock would result in deadlock");
61 debug_assert_eq
!(r
, 0);
62 self.num_readers
.fetch_add(1, Ordering
::Relaxed
);
66 pub unsafe fn try_read(&self) -> bool
{
67 let r
= libc
::pthread_rwlock_tryrdlock(self.inner
.get());
69 if *self.write_locked
.get() {
73 self.num_readers
.fetch_add(1, Ordering
::Relaxed
);
81 pub unsafe fn write(&self) {
82 let r
= libc
::pthread_rwlock_wrlock(self.inner
.get());
83 // See comments above for why we check for EDEADLK and write_locked. We
84 // also need to check that num_readers is 0.
85 if r
== libc
::EDEADLK
|| *self.write_locked
.get() ||
86 self.num_readers
.load(Ordering
::Relaxed
) != 0 {
90 panic
!("rwlock write lock would result in deadlock");
92 debug_assert_eq
!(r
, 0);
94 *self.write_locked
.get() = true;
97 pub unsafe fn try_write(&self) -> bool
{
98 let r
= libc
::pthread_rwlock_trywrlock(self.inner
.get());
100 if *self.write_locked
.get() || self.num_readers
.load(Ordering
::Relaxed
) != 0 {
104 *self.write_locked
.get() = true;
112 unsafe fn raw_unlock(&self) {
113 let r
= libc
::pthread_rwlock_unlock(self.inner
.get());
114 debug_assert_eq
!(r
, 0);
117 pub unsafe fn read_unlock(&self) {
118 debug_assert
!(!*self.write_locked
.get());
119 self.num_readers
.fetch_sub(1, Ordering
::Relaxed
);
123 pub unsafe fn write_unlock(&self) {
124 debug_assert_eq
!(self.num_readers
.load(Ordering
::Relaxed
), 0);
125 debug_assert
!(*self.write_locked
.get());
126 *self.write_locked
.get() = false;
130 pub unsafe fn destroy(&self) {
131 let r
= libc
::pthread_rwlock_destroy(self.inner
.get());
132 // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
133 // rwlock that was just initialized with
134 // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
135 // or pthread_rwlock_init() is called, this behaviour no longer occurs.
136 if cfg
!(target_os
= "dragonfly") {
137 debug_assert
!(r
== 0 || r
== libc
::EINVAL
);
139 debug_assert_eq
!(r
, 0);