]> git.proxmox.com Git - rustc.git/blobdiff - src/libstd/sys/unix/rwlock.rs
New upstream version 1.12.0+dfsg1
[rustc.git] / src / libstd / sys / unix / rwlock.rs
index 788bff6243018f22aff3a404c18e182f95da2032..08aeb5fb8ccdeae96abf30a8569e9824dfc34856 100644 (file)
 
 use libc;
 use cell::UnsafeCell;
-use sys::sync as ffi;
+use sync::atomic::{AtomicUsize, Ordering};
 
-pub struct RWLock { inner: UnsafeCell<ffi::pthread_rwlock_t> }
+pub struct RWLock {
+    inner: UnsafeCell<libc::pthread_rwlock_t>,
+    write_locked: UnsafeCell<bool>,
+    num_readers: AtomicUsize,
+}
 
 unsafe impl Send for RWLock {}
 unsafe impl Sync for RWLock {}
 
 impl RWLock {
     pub const fn new() -> RWLock {
-        RWLock { inner: UnsafeCell::new(ffi::PTHREAD_RWLOCK_INITIALIZER) }
+        RWLock {
+            inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+            write_locked: UnsafeCell::new(false),
+            num_readers: AtomicUsize::new(0),
+        }
     }
     #[inline]
     pub unsafe fn read(&self) {
-        let r = ffi::pthread_rwlock_rdlock(self.inner.get());
+        let r = libc::pthread_rwlock_rdlock(self.inner.get());
 
         // According to the pthread_rwlock_rdlock spec, this function **may**
         // fail with EDEADLK if a deadlock is detected. On the other hand
@@ -36,43 +44,94 @@ impl RWLock {
         //
         // We roughly maintain the deadlocking behavior by panicking to ensure
         // that this lock acquisition does not succeed.
-        if r == libc::EDEADLK {
+        //
+        // We also check whether there this lock is already write locked. This
+        // is only possible if it was write locked by the current thread and
+        // the implementation allows recursive locking. The POSIX standard
+        // doesn't require recursivly locking a rwlock to deadlock, but we can't
+        // allow that because it could lead to aliasing issues.
+        if r == libc::EAGAIN {
+            panic!("rwlock maximum reader count exceeded");
+        } else if r == libc::EDEADLK || *self.write_locked.get() {
+            if r == 0 {
+                self.raw_unlock();
+            }
             panic!("rwlock read lock would result in deadlock");
         } else {
             debug_assert_eq!(r, 0);
+            self.num_readers.fetch_add(1, Ordering::Relaxed);
         }
     }
     #[inline]
     pub unsafe fn try_read(&self) -> bool {
-        ffi::pthread_rwlock_tryrdlock(self.inner.get()) == 0
+        let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+        if r == 0 {
+            if *self.write_locked.get() {
+                self.raw_unlock();
+                false
+            } else {
+                self.num_readers.fetch_add(1, Ordering::Relaxed);
+                true
+            }
+        } else {
+            false
+        }
     }
     #[inline]
     pub unsafe fn write(&self) {
-        let r = ffi::pthread_rwlock_wrlock(self.inner.get());
-        // see comments above for why we check for EDEADLK
-        if r == libc::EDEADLK {
+        let r = libc::pthread_rwlock_wrlock(self.inner.get());
+        // See comments above for why we check for EDEADLK and write_locked. We
+        // also need to check that num_readers is 0.
+        if r == libc::EDEADLK || *self.write_locked.get() ||
+           self.num_readers.load(Ordering::Relaxed) != 0 {
+            if r == 0 {
+                self.raw_unlock();
+            }
             panic!("rwlock write lock would result in deadlock");
         } else {
             debug_assert_eq!(r, 0);
         }
+        *self.write_locked.get() = true;
     }
     #[inline]
     pub unsafe fn try_write(&self) -> bool {
-        ffi::pthread_rwlock_trywrlock(self.inner.get()) == 0
+        let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+        if r == 0 {
+            if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+                self.raw_unlock();
+                false
+            } else {
+                *self.write_locked.get() = true;
+                true
+            }
+        } else {
+            false
+        }
     }
     #[inline]
-    pub unsafe fn read_unlock(&self) {
-        let r = ffi::pthread_rwlock_unlock(self.inner.get());
+    unsafe fn raw_unlock(&self) {
+        let r = libc::pthread_rwlock_unlock(self.inner.get());
         debug_assert_eq!(r, 0);
     }
     #[inline]
-    pub unsafe fn write_unlock(&self) { self.read_unlock() }
+    pub unsafe fn read_unlock(&self) {
+        debug_assert!(!*self.write_locked.get());
+        self.num_readers.fetch_sub(1, Ordering::Relaxed);
+        self.raw_unlock();
+    }
+    #[inline]
+    pub unsafe fn write_unlock(&self) {
+        debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+        debug_assert!(*self.write_locked.get());
+        *self.write_locked.get() = false;
+        self.raw_unlock();
+    }
     #[inline]
     pub unsafe fn destroy(&self) {
-        let r = ffi::pthread_rwlock_destroy(self.inner.get());
+        let r = libc::pthread_rwlock_destroy(self.inner.get());
         // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
         // rwlock that was just initialized with
-        // ffi::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+        // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
         // or pthread_rwlock_init() is called, this behaviour no longer occurs.
         if cfg!(target_os = "dragonfly") {
             debug_assert!(r == 0 || r == libc::EINVAL);