Architectures other than mips and x86 are not using ticket spinlocks.
Therefore, the contention on the lock is meaningless, since there is
nobody known to be waiting on it (arguably /fairly/ unfair locks).
Dummy it out to return 0 on other architectures.
Signed-off-by: Kyle McMartin <kyle@redhat.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
{
{
return __ticket_spin_is_contended(lock);
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
{
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
+
+#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#else
+#define spin_is_contended(lock) (((void)(lock), 0))
+#endif /*__raw_spin_is_contended*/
#endif
/**