]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
smp: Remove allocation mask from on_each_cpu_cond.*()
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Fri, 17 Jan 2020 09:01:37 +0000 (10:01 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 24 Jan 2020 19:40:09 +0000 (20:40 +0100)
The allocation mask is no longer used by on_each_cpu_cond() and
on_each_cpu_cond_mask() and can be removed.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200117090137.1205765-4-bigeasy@linutronix.de
arch/x86/mm/tlb.c
fs/buffer.c
include/linux/smp.h
kernel/smp.c
kernel/up.c
mm/slub.c

index e6a9edc5baaf07b99925519351a895520e2d7949..66f96f21a7b60b2a73832fd291c1f7ae783cb2c5 100644 (file)
@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
                               (void *)info, 1);
        else
                on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
-                               (void *)info, 1, GFP_ATOMIC, cpumask);
+                               (void *)info, 1, cpumask);
 }
 
 /*
index 18a87ec8a465bbf1dccee2cd59e29a3c180fcb5c..b8d28370cfd7f25cb5ed6e44fa7a073547ba5319 100644 (file)
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
 
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
+       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
index 4734416855aada79049aea6e41241281359d2241..cbc9162689d0f56c3f0fa7b8776df10cf10c4482 100644 (file)
@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * processor.
  */
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-                     void *info, bool wait, gfp_t gfp_flags);
+                     void *info, bool wait);
 
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-                          void *info, bool wait, gfp_t gfp_flags,
-                          const struct cpumask *mask);
+                          void *info, bool wait, const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
index e17e6344ab54d59b25f492e1d8cbbf83b4dd546b..3b7bedc97af38d004f74439b4e587d9666e42580 100644 (file)
@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * @info:      An arbitrary pointer to pass to both functions.
  * @wait:      If true, wait (atomically) until function has
  *             completed on other CPUs.
- * @gfp_flags: GFP flags to use when allocating the cpumask
- *             used internally by the function.
- *
- * The function might sleep if the GFP flags indicates a non
- * atomic allocation is allowed.
  *
  * Preemption is disabled to protect against CPUs going offline but not online.
  * CPUs going online during the call will not be seen or sent an IPI.
@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * from a hardware interrupt handler or from a bottom half handler.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-                          void *info, bool wait, gfp_t gfp_flags,
-                          const struct cpumask *mask)
+                          void *info, bool wait, const struct cpumask *mask)
 {
        int cpu = get_cpu();
 
@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-                     void *info, bool wait, gfp_t gfp_flags)
+                     void *info, bool wait)
 {
-       on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
-                               cpu_online_mask);
+       on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
index 5c0d4f2bece22f1d9105da98beb906b217bc22f6..53144d0562522e6d00824073f65ffa9926ae6261 100644 (file)
@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * same condtions in UP and SMP.
  */
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
-                          void *info, bool wait, gfp_t gfp_flags,
-                          const struct cpumask *mask)
+                          void *info, bool wait, const struct cpumask *mask)
 {
        unsigned long flags;
 
@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
-                     void *info, bool wait, gfp_t gfp_flags)
+                     void *info, bool wait)
 {
-       on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+       on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
index 8eafccf759409b78657fae18e80ae12c9fd52789..2e1a57723f8e42ce7c14a60abee9b8af78411078 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
 
 static void flush_all(struct kmem_cache *s)
 {
-       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
 }
 
 /*