]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/cmpxchg: add a locked add() helper
authorJeremy Fitzhardinge <jeremy@goop.org>
Wed, 28 Sep 2011 18:49:28 +0000 (11:49 -0700)
committerJeremy Fitzhardinge <jeremy@goop.org>
Fri, 25 Nov 2011 18:42:59 +0000 (10:42 -0800)
Mostly to remove some conditional code in spinlock.h.

Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/spinlock.h

index 5d3acdf5a7a682d96681f78c86548a18203aaf98..49eade13161cd4e4aec0280285d101e542526890 100644 (file)
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
        __compiletime_error("Bad argument size for cmpxchg");
 extern void __xadd_wrong_size(void)
        __compiletime_error("Bad argument size for xadd");
+extern void __add_wrong_size(void)
+       __compiletime_error("Bad argument size for add");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
 #define xadd_sync(ptr, inc)    __xadd((ptr), (inc), "lock; ")
 #define xadd_local(ptr, inc)   __xadd((ptr), (inc), "")
 
+#define __add(ptr, inc, lock)                                          \
+       ({                                                              \
+               __typeof__ (*(ptr)) __ret = (inc);                      \
+               switch (sizeof(*(ptr))) {                               \
+               case __X86_CASE_B:                                      \
+                       asm volatile (lock "addb %b1, %0\n"             \
+                                     : "+m" (*(ptr)) : "ri" (inc)      \
+                                     : "memory", "cc");                \
+                       break;                                          \
+               case __X86_CASE_W:                                      \
+                       asm volatile (lock "addw %w1, %0\n"             \
+                                     : "+m" (*(ptr)) : "ri" (inc)      \
+                                     : "memory", "cc");                \
+                       break;                                          \
+               case __X86_CASE_L:                                      \
+                       asm volatile (lock "addl %1, %0\n"              \
+                                     : "+m" (*(ptr)) : "ri" (inc)      \
+                                     : "memory", "cc");                \
+                       break;                                          \
+               case __X86_CASE_Q:                                      \
+                       asm volatile (lock "addq %1, %0\n"              \
+                                     : "+m" (*(ptr)) : "ri" (inc)      \
+                                     : "memory", "cc");                \
+                       break;                                          \
+               default:                                                \
+                       __add_wrong_size();                             \
+               }                                                       \
+               __ret;                                                  \
+       })
+
+/*
+ * add_*() adds "inc" to "*ptr"
+ *
+ * __add() takes a lock prefix
+ * add_smp() is locked when multiple CPUs are online
+ * add_sync() is always locked
+ */
+#define add_smp(ptr, inc)      __add((ptr), (inc), LOCK_PREFIX)
+#define add_sync(ptr, inc)     __add((ptr), (inc), "lock; ")
+
 #endif /* ASM_X86_CMPXCHG_H */
index 972c260919a394881523ff2001f9ab19da2cb9b6..a82c2bf504b60be3fac409131d56253db124a906 100644 (file)
@@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
        return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-#if (NR_CPUS < 256)
 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
-       asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
-                    : "+m" (lock->head_tail)
-                    :
-                    : "memory", "cc");
+       __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
 }
-#else
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
-{
-       asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
-                    : "+m" (lock->head_tail)
-                    :
-                    : "memory", "cc");
-}
-#endif
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {