* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://github.com/behlendorf/spl/>.
+ * For details, see <http://zfsonlinux.org/>.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#include <linux/rwsem.h>
-#ifdef RWSEM_SPINLOCK_IS_RAW
-#define spl_rwsem_lock_irqsave(lock, flags) \
-({ \
- raw_spin_lock_irqsave(lock, flags); \
-})
-#define spl_rwsem_unlock_irqrestore(lock, flags) \
-({ \
- raw_spin_unlock_irqrestore(lock, flags); \
-})
+#if defined(CONFIG_PREEMPT_RT_FULL)
+#define SPL_RWSEM_SINGLE_READER_VALUE (1)
+#define SPL_RWSEM_SINGLE_WRITER_VALUE (0)
+#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
+#define SPL_RWSEM_SINGLE_READER_VALUE (1)
+#define SPL_RWSEM_SINGLE_WRITER_VALUE (-1)
#else
-#define spl_rwsem_lock_irqsave(lock, flags) \
-({ \
- spin_lock_irqsave(lock, flags); \
-})
-#define spl_rwsem_unlock_irqrestore(lock, flags) \
-({ \
- spin_unlock_irqrestore(lock, flags); \
-})
-#endif /* RWSEM_SPINLOCK_IS_RAW */
-
-#ifdef RWSEM_IS_LOCKED_TAKES_WAIT_LOCK
-/*
- * A race condition in rwsem_is_locked() was fixed in Linux 2.6.33 and the fix
- * was backported to RHEL5 as of kernel 2.6.18-190.el5. Details can be found
- * here:
- *
- * https://bugzilla.redhat.com/show_bug.cgi?id=526092
+#define SPL_RWSEM_SINGLE_READER_VALUE (RWSEM_ACTIVE_READ_BIAS)
+#define SPL_RWSEM_SINGLE_WRITER_VALUE (RWSEM_ACTIVE_WRITE_BIAS)
+#endif
- * The race condition was fixed in the kernel by acquiring the semaphore's
- * wait_lock inside rwsem_is_locked(). The SPL worked around the race
- * condition by acquiring the wait_lock before calling that function, but
- * with the fix in place we must not do that.
- */
+/* Linux 3.16 changed activity to count for rwsem-spinlock */
+#if defined(CONFIG_PREEMPT_RT_FULL)
+#define RWSEM_COUNT(sem) sem->read_depth
+#elif defined(HAVE_RWSEM_ACTIVITY)
+#define RWSEM_COUNT(sem) sem->activity
+/* Linux 4.8 changed count to an atomic_long_t for !rwsem-spinlock */
+#elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
+#define RWSEM_COUNT(sem) atomic_long_read(&(sem)->count)
+#else
+#define RWSEM_COUNT(sem) sem->count
+#endif
-#define spl_rwsem_is_locked(rwsem) \
-({ \
- rwsem_is_locked(rwsem); \
-})
+int rwsem_tryupgrade(struct rw_semaphore *rwsem);
+#if defined(RWSEM_SPINLOCK_IS_RAW)
+#define spl_rwsem_lock_irqsave(lk, fl) raw_spin_lock_irqsave(lk, fl)
+#define spl_rwsem_unlock_irqrestore(lk, fl) raw_spin_unlock_irqrestore(lk, fl)
+#define spl_rwsem_trylock_irqsave(lk, fl) raw_spin_trylock_irqsave(lk, fl)
#else
+#define spl_rwsem_lock_irqsave(lk, fl) spin_lock_irqsave(lk, fl)
+#define spl_rwsem_unlock_irqrestore(lk, fl) spin_unlock_irqrestore(lk, fl)
+#define spl_rwsem_trylock_irqsave(lk, fl) spin_trylock_irqsave(lk, fl)
+#endif /* RWSEM_SPINLOCK_IS_RAW */
-#define spl_rwsem_is_locked(rwsem) \
-({ \
- unsigned long _flags_; \
- int _rc_; \
- spl_rwsem_lock_irqsave(&rwsem->wait_lock, _flags_); \
- _rc_ = rwsem_is_locked(rwsem); \
- spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, _flags_); \
- _rc_; \
-})
-
-#endif /* RWSEM_IS_LOCKED_TAKES_WAIT_LOCK */
+#define spl_rwsem_is_locked(rwsem) rwsem_is_locked(rwsem)
#endif /* _SPL_RWSEM_COMPAT_H */