]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
percpu,x86: relocate this_cpu_add_return() and friends
authorTejun Heo <tj@kernel.org>
Fri, 17 Dec 2010 14:47:04 +0000 (15:47 +0100)
committerTejun Heo <tj@kernel.org>
Fri, 17 Dec 2010 15:13:22 +0000 (16:13 +0100)
- include/linux/percpu.h: this_cpu_add_return() and friends were
  located next to __this_cpu_add_return().  However, the overall
  organization is to first group by preemption safeness.  Relocate
  this_cpu_add_return() and friends to preemption-safe area.

- arch/x86/include/asm/percpu.h: Relocate percpu_add_return_op() after
  other more basic operations.  Relocate [__]this_cpu_add_return_8()
  so that they're first grouped by preemption safeness.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
arch/x86/include/asm/percpu.h
include/linux/percpu.h

index 38f9e965ff9671b282bbce716e571d9d2859ffd8..dd0cd4b6a76fedeb28b3fcdf8f3f97387d88304b 100644 (file)
@@ -177,39 +177,6 @@ do {                                                                       \
        }                                                               \
 } while (0)
 
-/*
- * Add return operation
- */
-#define percpu_add_return_op(var, val)                                 \
-({                                                                     \
-       typeof(var) paro_ret__ = val;                                   \
-       switch (sizeof(var)) {                                          \
-       case 1:                                                         \
-               asm("xaddb %0, "__percpu_arg(1)                         \
-                           : "+q" (paro_ret__), "+m" (var)             \
-                           : : "memory");                              \
-               break;                                                  \
-       case 2:                                                         \
-               asm("xaddw %0, "__percpu_arg(1)                         \
-                           : "+r" (paro_ret__), "+m" (var)             \
-                           : : "memory");                              \
-               break;                                                  \
-       case 4:                                                         \
-               asm("xaddl %0, "__percpu_arg(1)                         \
-                           : "+r" (paro_ret__), "+m" (var)             \
-                           : : "memory");                              \
-               break;                                                  \
-       case 8:                                                         \
-               asm("xaddq %0, "__percpu_arg(1)                         \
-                           : "+re" (paro_ret__), "+m" (var)            \
-                           : : "memory");                              \
-               break;                                                  \
-       default: __bad_percpu_size();                                   \
-       }                                                               \
-       paro_ret__ += val;                                              \
-       paro_ret__;                                                     \
-})
-
 #define percpu_from_op(op, var, constraint)            \
 ({                                                     \
        typeof(var) pfo_ret__;                          \
@@ -262,6 +229,39 @@ do {                                                                       \
        }                                               \
 })
 
+/*
+ * Add return operation
+ */
+#define percpu_add_return_op(var, val)                                 \
+({                                                                     \
+       typeof(var) paro_ret__ = val;                                   \
+       switch (sizeof(var)) {                                          \
+       case 1:                                                         \
+               asm("xaddb %0, "__percpu_arg(1)                         \
+                           : "+q" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 2:                                                         \
+               asm("xaddw %0, "__percpu_arg(1)                         \
+                           : "+r" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 4:                                                         \
+               asm("xaddl %0, "__percpu_arg(1)                         \
+                           : "+r" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 8:                                                         \
+               asm("xaddq %0, "__percpu_arg(1)                         \
+                           : "+re" (paro_ret__), "+m" (var)            \
+                           : : "memory");                              \
+               break;                                                  \
+       default: __bad_percpu_size();                                   \
+       }                                                               \
+       paro_ret__ += val;                                              \
+       paro_ret__;                                                     \
+})
+
 /*
  * percpu_read() makes gcc load the percpu variable every time it is
  * accessed while percpu_read_stable() allows the value to be cached.
@@ -352,6 +352,7 @@ do {                                                                        \
 #define __this_cpu_and_8(pcp, val)     percpu_to_op("and", (pcp), val)
 #define __this_cpu_or_8(pcp, val)      percpu_to_op("or", (pcp), val)
 #define __this_cpu_xor_8(pcp, val)     percpu_to_op("xor", (pcp), val)
+#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
 
 #define this_cpu_read_8(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
 #define this_cpu_write_8(pcp, val)     percpu_to_op("mov", (pcp), val)
@@ -359,14 +360,12 @@ do {                                                                      \
 #define this_cpu_and_8(pcp, val)       percpu_to_op("and", (pcp), val)
 #define this_cpu_or_8(pcp, val)                percpu_to_op("or", (pcp), val)
 #define this_cpu_xor_8(pcp, val)       percpu_to_op("xor", (pcp), val)
+#define this_cpu_add_return_8(pcp, val)        percpu_add_return_op(pcp, val)
 
 #define irqsafe_cpu_add_8(pcp, val)    percpu_add_op((pcp), val)
 #define irqsafe_cpu_and_8(pcp, val)    percpu_to_op("and", (pcp), val)
 #define irqsafe_cpu_or_8(pcp, val)     percpu_to_op("or", (pcp), val)
 #define irqsafe_cpu_xor_8(pcp, val)    percpu_to_op("xor", (pcp), val)
-
-#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
-#define this_cpu_add_return_8(pcp, val)        percpu_add_return_op(pcp, val)
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
index 4d593defc47da6e69bcc33c96bcd93d8c9df6d99..3484e88d93f88ab089f47c13e78c6204b69fdccf 100644 (file)
@@ -417,6 +417,36 @@ do {                                                                       \
 # define this_cpu_xor(pcp, val)                __pcpu_size_call(this_cpu_or_, (pcp), (val))
 #endif
 
+#define _this_cpu_generic_add_return(pcp, val)                         \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       __this_cpu_add(pcp, val);                                       \
+       ret__ = __this_cpu_read(pcp);                                   \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+#  define this_cpu_add_return_1(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+#  define this_cpu_add_return_2(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+#  define this_cpu_add_return_4(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+#  define this_cpu_add_return_8(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
+
 /*
  * Generic percpu operations that do not require preemption handling.
  * Either we do not care about races or the caller has the
@@ -544,36 +574,6 @@ do {                                                                       \
 # define __this_cpu_xor(pcp, val)      __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
 #endif
 
-#define _this_cpu_generic_add_return(pcp, val)                         \
-({                                                                     \
-       typeof(pcp) ret__;                                              \
-       preempt_disable();                                              \
-       __this_cpu_add(pcp, val);                                       \
-       ret__ = __this_cpu_read(pcp);                                   \
-       preempt_enable();                                               \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_add_return
-# ifndef this_cpu_add_return_1
-#  define this_cpu_add_return_1(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_2
-#  define this_cpu_add_return_2(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_4
-#  define this_cpu_add_return_4(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_8
-#  define this_cpu_add_return_8(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
-#endif
-
-#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(val))
-#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
-#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
-
 #define __this_cpu_generic_add_return(pcp, val)                                \
 ({                                                                     \
        __this_cpu_add(pcp, val);                                       \