]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
authorDavid Daney <david.daney@cavium.com>
Wed, 11 Sep 2013 21:23:24 +0000 (14:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Sep 2013 22:58:23 +0000 (15:58 -0700)
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in
!SMP version of on_each_cpu()"), we don't want to enable irqs if they
are not already enabled.  There are currently no known problematical
callers of these functions, but since it is a known failure pattern, we
preemptively fix them.

Since they are not trivial functions, make them non-inline by moving
them to up.c.  This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.

Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/smp.h
kernel/up.c

index c8488763277f0066bb731f019964f038f4107b57..3724a907090734a9f37209634b75f17fbab78d5e 100644 (file)
@@ -29,6 +29,22 @@ extern unsigned int total_cpus;
 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
                             int wait);
 
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+               void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+               smp_call_func_t func, void *info, bool wait,
+               gfp_t gfp_flags);
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -100,22 +116,6 @@ static inline void call_function_init(void) { }
  */
 int on_each_cpu(smp_call_func_t func, void *info, int wait);
 
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-               void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-               smp_call_func_t func, void *info, bool wait,
-               gfp_t gfp_flags);
-
 /*
  * Mark the boot cpu "online" so that it can call console drivers in
  * printk() and can access its per-cpu storage.
@@ -151,36 +151,6 @@ static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
        return 0;
 }
 
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
-       do {                                            \
-               if (cpumask_test_cpu(0, (mask))) {      \
-                       local_irq_disable();            \
-                       (func)(info);                   \
-                       local_irq_enable();             \
-               }                                       \
-       } while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
-       do {                                                    \
-               void *__info = (info);                          \
-               preempt_disable();                              \
-               if ((cond_func)(0, __info)) {                   \
-                       local_irq_disable();                    \
-                       (func)(__info);                         \
-                       local_irq_enable();                     \
-               }                                               \
-               preempt_enable();                               \
-       } while (0)
-
 static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()                 do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
index c54c75e9faf7a68446c1a5e80f3713af0cc64117..144e57255234659f7a8f684d0fc373ff5ec2a70e 100644 (file)
@@ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
        return 0;
 }
 EXPORT_SYMBOL(smp_call_function_single);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+                     smp_call_func_t func, void *info, bool wait)
+{
+       unsigned long flags;
+
+       if (cpumask_test_cpu(0, mask)) {
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
+       }
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+                     smp_call_func_t func, void *info, bool wait,
+                     gfp_t gfp_flags)
+{
+       unsigned long flags;
+
+       preempt_disable();
+       if (cond_func(0, info)) {
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);