]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/smp.c
locking/csd_lock: Prepare more CSD lock debugging
[mirror_ubuntu-jammy-kernel.git] / kernel / smp.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3d442233
JA
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 6 */
ca7dfdbb
ME
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
47885016 10#include <linux/irq_work.h>
3d442233 11#include <linux/rcupdate.h>
59190f42 12#include <linux/rculist.h>
641cd4cf 13#include <linux/kernel.h>
9984de1a 14#include <linux/export.h>
0b13fda1
IM
15#include <linux/percpu.h>
16#include <linux/init.h>
f9d34595 17#include <linux/interrupt.h>
5a0e3ad6 18#include <linux/gfp.h>
3d442233 19#include <linux/smp.h>
8969a5ed 20#include <linux/cpu.h>
c6f4459f 21#include <linux/sched.h>
4c822698 22#include <linux/sched/idle.h>
47ae4b05 23#include <linux/hypervisor.h>
35feb604
PM
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
8d0968cc 27#include <linux/jump_label.h>
3d442233 28
3bb5d2ee 29#include "smpboot.h"
1f8db415 30#include "sched/smp.h"
3bb5d2ee 31
545b8c8d 32#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
3d442233 33
de7b09ef
JG
34struct cfd_percpu {
35 call_single_data_t csd;
36};
37
3d442233 38struct call_function_data {
de7b09ef 39 struct cfd_percpu __percpu *pcpu;
0b13fda1 40 cpumask_var_t cpumask;
3fc5b3b6 41 cpumask_var_t cpumask_ipi;
3d442233
JA
42};
43
a22793c7 44static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
e03bcb68 45
6897fc22 46static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed 47
8d056c48
SB
48static void flush_smp_call_function_queue(bool warn_cpu_offline);
49
31487f83 50int smpcfd_prepare_cpu(unsigned int cpu)
8969a5ed 51{
8969a5ed
PZ
52 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
53
31487f83
RW
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 cpu_to_node(cpu)))
56 return -ENOMEM;
3fc5b3b6
AL
57 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
58 cpu_to_node(cpu))) {
59 free_cpumask_var(cfd->cpumask);
60 return -ENOMEM;
61 }
de7b09ef
JG
62 cfd->pcpu = alloc_percpu(struct cfd_percpu);
63 if (!cfd->pcpu) {
8969a5ed 64 free_cpumask_var(cfd->cpumask);
3fc5b3b6 65 free_cpumask_var(cfd->cpumask_ipi);
31487f83
RW
66 return -ENOMEM;
67 }
68
69 return 0;
8969a5ed
PZ
70}
71
31487f83
RW
72int smpcfd_dead_cpu(unsigned int cpu)
73{
74 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
75
76 free_cpumask_var(cfd->cpumask);
3fc5b3b6 77 free_cpumask_var(cfd->cpumask_ipi);
de7b09ef 78 free_percpu(cfd->pcpu);
31487f83
RW
79 return 0;
80}
81
82int smpcfd_dying_cpu(unsigned int cpu)
83{
84 /*
85 * The IPIs for the smp-call-function callbacks queued by other
86 * CPUs might arrive late, either due to hardware latencies or
87 * because this CPU disabled interrupts (inside stop-machine)
88 * before the IPIs were sent. So flush out any pending callbacks
89 * explicitly (without waiting for the IPIs to arrive), to
90 * ensure that the outgoing CPU doesn't go offline with work
91 * still pending.
92 */
93 flush_smp_call_function_queue(false);
afaa653c 94 irq_work_run();
31487f83
RW
95 return 0;
96}
8969a5ed 97
d8ad7d11 98void __init call_function_init(void)
3d442233
JA
99{
100 int i;
101
6897fc22
CH
102 for_each_possible_cpu(i)
103 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed 104
31487f83 105 smpcfd_prepare_cpu(smp_processor_id());
3d442233
JA
106}
107
35feb604
PM
108#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
109
8d0968cc
JG
110static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
111
112static int __init csdlock_debug(char *str)
113{
114 unsigned int val = 0;
115
116 get_option(&str, &val);
117 if (val)
118 static_branch_enable(&csdlock_debug_enabled);
119
120 return 0;
121}
122early_param("csdlock_debug", csdlock_debug);
123
35feb604
PM
124static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
125static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
126static DEFINE_PER_CPU(void *, cur_csd_info);
127
128#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
2b722160 129static atomic_t csd_bug_count = ATOMIC_INIT(0);
35feb604
PM
130
131/* Record current CSD work for current CPU, NULL to erase. */
8d0968cc 132static void __csd_lock_record(call_single_data_t *csd)
35feb604
PM
133{
134 if (!csd) {
135 smp_mb(); /* NULL cur_csd after unlock. */
136 __this_cpu_write(cur_csd, NULL);
137 return;
138 }
139 __this_cpu_write(cur_csd_func, csd->func);
140 __this_cpu_write(cur_csd_info, csd->info);
141 smp_wmb(); /* func and info before csd. */
142 __this_cpu_write(cur_csd, csd);
143 smp_mb(); /* Update cur_csd before function call. */
144 /* Or before unlock, as the case may be. */
145}
146
8d0968cc
JG
147static __always_inline void csd_lock_record(call_single_data_t *csd)
148{
149 if (static_branch_unlikely(&csdlock_debug_enabled))
150 __csd_lock_record(csd);
151}
152
153static int csd_lock_wait_getcpu(call_single_data_t *csd)
35feb604
PM
154{
155 unsigned int csd_type;
156
157 csd_type = CSD_TYPE(csd);
158 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
a787bdaf 159 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
35feb604
PM
160 return -1;
161}
162
163/*
164 * Complain if too much time spent waiting. Note that only
165 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
166 * so waiting on other types gets much less information.
167 */
8d0968cc 168static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
35feb604
PM
169{
170 int cpu = -1;
171 int cpux;
172 bool firsttime;
173 u64 ts2, ts_delta;
174 call_single_data_t *cpu_cur_csd;
545b8c8d 175 unsigned int flags = READ_ONCE(csd->node.u_flags);
35feb604
PM
176
177 if (!(flags & CSD_FLAG_LOCK)) {
178 if (!unlikely(*bug_id))
179 return true;
180 cpu = csd_lock_wait_getcpu(csd);
181 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
182 *bug_id, raw_smp_processor_id(), cpu);
183 return true;
184 }
185
186 ts2 = sched_clock();
187 ts_delta = ts2 - *ts1;
188 if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
189 return false;
190
191 firsttime = !*bug_id;
192 if (firsttime)
193 *bug_id = atomic_inc_return(&csd_bug_count);
194 cpu = csd_lock_wait_getcpu(csd);
195 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
196 cpux = 0;
197 else
198 cpux = cpu;
199 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
200 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
201 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
202 cpu, csd->func, csd->info);
203 if (cpu_cur_csd && csd != cpu_cur_csd) {
204 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
205 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
206 READ_ONCE(per_cpu(cur_csd_info, cpux)));
207 } else {
208 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
209 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
210 }
211 if (cpu >= 0) {
212 if (!trigger_single_cpu_backtrace(cpu))
213 dump_cpu_task(cpu);
214 if (!cpu_cur_csd) {
215 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
216 arch_send_call_function_single_ipi(cpu);
217 }
218 }
219 dump_stack();
220 *ts1 = ts2;
221
222 return false;
223}
224
8969a5ed
PZ
225/*
226 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
227 *
0b13fda1
IM
228 * For non-synchronous ipi calls the csd can still be in use by the
229 * previous function call. For multi-cpu calls its even more interesting
230 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 231 */
8d0968cc 232static void __csd_lock_wait(call_single_data_t *csd)
35feb604
PM
233{
234 int bug_id = 0;
235 u64 ts0, ts1;
236
237 ts1 = ts0 = sched_clock();
238 for (;;) {
239 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
240 break;
241 cpu_relax();
242 }
243 smp_acquire__after_ctrl_dep();
244}
245
8d0968cc
JG
246static __always_inline void csd_lock_wait(call_single_data_t *csd)
247{
248 if (static_branch_unlikely(&csdlock_debug_enabled)) {
249 __csd_lock_wait(csd);
250 return;
251 }
252
253 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
254}
35feb604
PM
255#else
256static void csd_lock_record(call_single_data_t *csd)
257{
258}
259
966a9671 260static __always_inline void csd_lock_wait(call_single_data_t *csd)
8969a5ed 261{
545b8c8d 262 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
6e275637 263}
35feb604 264#endif
6e275637 265
966a9671 266static __always_inline void csd_lock(call_single_data_t *csd)
6e275637 267{
e1d12f32 268 csd_lock_wait(csd);
545b8c8d 269 csd->node.u_flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
270
271 /*
0b13fda1
IM
272 * prevent CPU from reordering the above assignment
273 * to ->flags with any subsequent assignments to other
966a9671 274 * fields of the specified call_single_data_t structure:
8969a5ed 275 */
8053871d 276 smp_wmb();
8969a5ed
PZ
277}
278
966a9671 279static __always_inline void csd_unlock(call_single_data_t *csd)
8969a5ed 280{
545b8c8d 281 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
0b13fda1 282
8969a5ed 283 /*
0b13fda1 284 * ensure we're all done before releasing data:
8969a5ed 285 */
545b8c8d 286 smp_store_release(&csd->node.u_flags, 0);
3d442233
JA
287}
288
966a9671 289static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
8b28499a 290
4b44a21d
PZ
291void __smp_call_single_queue(int cpu, struct llist_node *node)
292{
293 /*
294 * The list addition should be visible before sending the IPI
295 * handler locks the list to pull the entry off it because of
296 * normal cache coherency rules implied by spinlocks.
297 *
298 * If IPIs can go out of order to the cache coherency protocol
299 * in an architecture, sufficient synchronisation should be added
300 * to arch code to make it appear to obey cache coherency WRT
301 * locking and barrier primitives. Generic code isn't really
302 * equipped to do the right thing...
303 */
304 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
305 send_call_function_single_ipi(cpu);
306}
307
3d442233 308/*
966a9671 309 * Insert a previously allocated call_single_data_t element
0b13fda1
IM
310 * for execution on the given CPU. data must already have
311 * ->func, ->info, and ->flags set.
3d442233 312 */
4b44a21d 313static int generic_exec_single(int cpu, call_single_data_t *csd)
3d442233 314{
8b28499a 315 if (cpu == smp_processor_id()) {
4b44a21d
PZ
316 smp_call_func_t func = csd->func;
317 void *info = csd->info;
8053871d
LT
318 unsigned long flags;
319
320 /*
321 * We can unlock early even for the synchronous on-stack case,
322 * since we're doing this from the same CPU..
323 */
35feb604 324 csd_lock_record(csd);
8053871d 325 csd_unlock(csd);
8b28499a
FW
326 local_irq_save(flags);
327 func(info);
35feb604 328 csd_lock_record(NULL);
8b28499a
FW
329 local_irq_restore(flags);
330 return 0;
331 }
332
5224b961
LT
333 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
334 csd_unlock(csd);
8b28499a 335 return -ENXIO;
5224b961 336 }
8b28499a 337
545b8c8d 338 __smp_call_single_queue(cpu, &csd->node.llist);
3d442233 339
8b28499a 340 return 0;
3d442233
JA
341}
342
8d056c48
SB
343/**
344 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
345 *
346 * Invoked by arch to handle an IPI for call function single.
347 * Must be called with interrupts disabled.
3d442233
JA
348 */
349void generic_smp_call_function_single_interrupt(void)
350{
8d056c48
SB
351 flush_smp_call_function_queue(true);
352}
353
354/**
355 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
356 *
357 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
358 * offline CPU. Skip this check if set to 'false'.
359 *
360 * Flush any pending smp-call-function callbacks queued on this CPU. This is
361 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
362 * to ensure that all pending IPI callbacks are run before it goes completely
363 * offline.
364 *
365 * Loop through the call_single_queue and run all the queued callbacks.
366 * Must be called with interrupts disabled.
367 */
368static void flush_smp_call_function_queue(bool warn_cpu_offline)
369{
966a9671 370 call_single_data_t *csd, *csd_next;
52103be0
PZ
371 struct llist_node *entry, *prev;
372 struct llist_head *head;
a219ccf4
SB
373 static bool warned;
374
83efcbd0 375 lockdep_assert_irqs_disabled();
8d056c48 376
bb964a92 377 head = this_cpu_ptr(&call_single_queue);
8d056c48 378 entry = llist_del_all(head);
a219ccf4 379 entry = llist_reverse_order(entry);
3d442233 380
8d056c48
SB
381 /* There shouldn't be any pending callbacks on an offline CPU. */
382 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
383 !warned && !llist_empty(head))) {
a219ccf4
SB
384 warned = true;
385 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
386
387 /*
388 * We don't have to use the _safe() variant here
389 * because we are not invoking the IPI handlers yet.
390 */
545b8c8d 391 llist_for_each_entry(csd, entry, node.llist) {
4b44a21d
PZ
392 switch (CSD_TYPE(csd)) {
393 case CSD_TYPE_ASYNC:
394 case CSD_TYPE_SYNC:
395 case CSD_TYPE_IRQ_WORK:
396 pr_warn("IPI callback %pS sent to offline CPU\n",
397 csd->func);
398 break;
399
a1488664
PZ
400 case CSD_TYPE_TTWU:
401 pr_warn("IPI task-wakeup sent to offline CPU\n");
402 break;
403
4b44a21d
PZ
404 default:
405 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
406 CSD_TYPE(csd));
407 break;
408 }
409 }
a219ccf4 410 }
3d442233 411
52103be0
PZ
412 /*
413 * First; run all SYNC callbacks, people are waiting for us.
414 */
415 prev = NULL;
545b8c8d 416 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
8053871d 417 /* Do we wait until *after* callback? */
4b44a21d
PZ
418 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
419 smp_call_func_t func = csd->func;
420 void *info = csd->info;
421
52103be0 422 if (prev) {
545b8c8d 423 prev->next = &csd_next->node.llist;
52103be0 424 } else {
545b8c8d 425 entry = &csd_next->node.llist;
52103be0 426 }
4b44a21d 427
35feb604 428 csd_lock_record(csd);
8053871d
LT
429 func(info);
430 csd_unlock(csd);
35feb604 431 csd_lock_record(NULL);
8053871d 432 } else {
545b8c8d 433 prev = &csd->node.llist;
8053871d 434 }
3d442233 435 }
47885016 436
a1488664
PZ
437 if (!entry)
438 return;
439
47885016 440 /*
52103be0 441 * Second; run all !SYNC callbacks.
47885016 442 */
a1488664 443 prev = NULL;
545b8c8d 444 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
4b44a21d 445 int type = CSD_TYPE(csd);
52103be0 446
a1488664
PZ
447 if (type != CSD_TYPE_TTWU) {
448 if (prev) {
545b8c8d 449 prev->next = &csd_next->node.llist;
a1488664 450 } else {
545b8c8d 451 entry = &csd_next->node.llist;
a1488664 452 }
4b44a21d 453
a1488664
PZ
454 if (type == CSD_TYPE_ASYNC) {
455 smp_call_func_t func = csd->func;
456 void *info = csd->info;
457
35feb604 458 csd_lock_record(csd);
a1488664
PZ
459 csd_unlock(csd);
460 func(info);
35feb604 461 csd_lock_record(NULL);
a1488664
PZ
462 } else if (type == CSD_TYPE_IRQ_WORK) {
463 irq_work_single(csd);
464 }
465
466 } else {
545b8c8d 467 prev = &csd->node.llist;
4b44a21d 468 }
52103be0 469 }
a1488664
PZ
470
471 /*
472 * Third; only CSD_TYPE_TTWU is left, issue those.
473 */
474 if (entry)
475 sched_ttwu_pending(entry);
3d442233
JA
476}
477
b2a02fc4
PZ
478void flush_smp_call_function_from_idle(void)
479{
480 unsigned long flags;
481
482 if (llist_empty(this_cpu_ptr(&call_single_queue)))
483 return;
484
485 local_irq_save(flags);
486 flush_smp_call_function_queue(true);
f9d34595
SAS
487 if (local_softirq_pending())
488 do_softirq();
489
b2a02fc4 490 local_irq_restore(flags);
3d442233
JA
491}
492
493/*
494 * smp_call_function_single - Run a function on a specific CPU
495 * @func: The function to run. This must be fast and non-blocking.
496 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
497 * @wait: If true, wait until function has completed on other CPUs.
498 *
72f279b2 499 * Returns 0 on success, else a negative status code.
3d442233 500 */
3a5f65df 501int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 502 int wait)
3d442233 503{
966a9671
YH
504 call_single_data_t *csd;
505 call_single_data_t csd_stack = {
545b8c8d 506 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
966a9671 507 };
0b13fda1 508 int this_cpu;
8b28499a 509 int err;
3d442233 510
0b13fda1
IM
511 /*
512 * prevent preemption and reschedule on another processor,
513 * as well as CPU removal
514 */
515 this_cpu = get_cpu();
516
269c861b
SS
517 /*
518 * Can deadlock when called with interrupts disabled.
519 * We allow cpu's that are not yet online though, as no one else can
520 * send smp call function interrupt to this cpu and as such deadlocks
521 * can't happen.
522 */
523 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
524 && !oops_in_progress);
3d442233 525
19dbdcb8
PZ
526 /*
527 * When @wait we can deadlock when we interrupt between llist_add() and
528 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
529 * csd_lock() on because the interrupt context uses the same csd
530 * storage.
531 */
532 WARN_ON_ONCE(!in_task());
533
8053871d
LT
534 csd = &csd_stack;
535 if (!wait) {
536 csd = this_cpu_ptr(&csd_data);
537 csd_lock(csd);
538 }
539
4b44a21d
PZ
540 csd->func = func;
541 csd->info = info;
35feb604 542#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
545b8c8d
PZ
543 csd->node.src = smp_processor_id();
544 csd->node.dst = cpu;
e48c15b7 545#endif
4b44a21d
PZ
546
547 err = generic_exec_single(cpu, csd);
8053871d
LT
548
549 if (wait)
550 csd_lock_wait(csd);
3d442233
JA
551
552 put_cpu();
0b13fda1 553
f73be6de 554 return err;
3d442233
JA
555}
556EXPORT_SYMBOL(smp_call_function_single);
557
d7877c03 558/**
c46fff2a
FW
559 * smp_call_function_single_async(): Run an asynchronous function on a
560 * specific CPU.
d7877c03
FW
561 * @cpu: The CPU to run on.
562 * @csd: Pre-allocated and setup data structure
d7877c03 563 *
c46fff2a
FW
564 * Like smp_call_function_single(), but the call is asynchonous and
565 * can thus be done from contexts with disabled interrupts.
566 *
567 * The caller passes his own pre-allocated data structure
568 * (ie: embedded in an object) and is responsible for synchronizing it
569 * such that the IPIs performed on the @csd are strictly serialized.
570 *
5a18ceca
PX
571 * If the function is called with one csd which has not yet been
572 * processed by previous call to smp_call_function_single_async(), the
573 * function will return immediately with -EBUSY showing that the csd
574 * object is still in progress.
575 *
c46fff2a
FW
576 * NOTE: Be careful, there is unfortunately no current debugging facility to
577 * validate the correctness of this serialization.
d7877c03 578 */
966a9671 579int smp_call_function_single_async(int cpu, call_single_data_t *csd)
d7877c03
FW
580{
581 int err = 0;
d7877c03 582
fce8ad15 583 preempt_disable();
8053871d 584
545b8c8d 585 if (csd->node.u_flags & CSD_FLAG_LOCK) {
5a18ceca
PX
586 err = -EBUSY;
587 goto out;
588 }
8053871d 589
545b8c8d 590 csd->node.u_flags = CSD_FLAG_LOCK;
8053871d
LT
591 smp_wmb();
592
4b44a21d 593 err = generic_exec_single(cpu, csd);
5a18ceca
PX
594
595out:
fce8ad15 596 preempt_enable();
d7877c03
FW
597
598 return err;
599}
c46fff2a 600EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 601
2ea6dec4
RR
602/*
603 * smp_call_function_any - Run a function on any of the given cpus
604 * @mask: The mask of cpus it can run on.
605 * @func: The function to run. This must be fast and non-blocking.
606 * @info: An arbitrary pointer to pass to the function.
607 * @wait: If true, wait until function has completed.
608 *
609 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
610 *
611 * Selection preference:
612 * 1) current cpu if in @mask
613 * 2) any cpu of current node if in @mask
614 * 3) any other online cpu in @mask
615 */
616int smp_call_function_any(const struct cpumask *mask,
3a5f65df 617 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
618{
619 unsigned int cpu;
620 const struct cpumask *nodemask;
621 int ret;
622
623 /* Try for same CPU (cheapest) */
624 cpu = get_cpu();
625 if (cpumask_test_cpu(cpu, mask))
626 goto call;
627
628 /* Try for same node. */
af2422c4 629 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
630 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
631 cpu = cpumask_next_and(cpu, nodemask, mask)) {
632 if (cpu_online(cpu))
633 goto call;
634 }
635
636 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
637 cpu = cpumask_any_and(mask, cpu_online_mask);
638call:
639 ret = smp_call_function_single(cpu, func, info, wait);
640 put_cpu();
641 return ret;
642}
643EXPORT_SYMBOL_GPL(smp_call_function_any);
644
67719ef2
SAS
645static void smp_call_function_many_cond(const struct cpumask *mask,
646 smp_call_func_t func, void *info,
647 bool wait, smp_cond_func_t cond_func)
3d442233 648{
e1d12f32 649 struct call_function_data *cfd;
9a46ad6d 650 int cpu, next_cpu, this_cpu = smp_processor_id();
3d442233 651
269c861b
SS
652 /*
653 * Can deadlock when called with interrupts disabled.
654 * We allow cpu's that are not yet online though, as no one else can
655 * send smp call function interrupt to this cpu and as such deadlocks
656 * can't happen.
657 */
658 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
bd924e8c 659 && !oops_in_progress && !early_boot_irqs_disabled);
3d442233 660
19dbdcb8
PZ
661 /*
662 * When @wait we can deadlock when we interrupt between llist_add() and
663 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
664 * csd_lock() on because the interrupt context uses the same csd
665 * storage.
666 */
667 WARN_ON_ONCE(!in_task());
668
723aae25 669 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
54b11e6d 670 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 671 if (cpu == this_cpu)
54b11e6d 672 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 673
54b11e6d
RR
674 /* No online cpus? We're done. */
675 if (cpu >= nr_cpu_ids)
676 return;
677
678 /* Do we have another CPU which isn't us? */
679 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 680 if (next_cpu == this_cpu)
54b11e6d
RR
681 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
682
683 /* Fastpath: do that cpu by itself. */
684 if (next_cpu >= nr_cpu_ids) {
25a3a154 685 if (!cond_func || cond_func(cpu, info))
67719ef2 686 smp_call_function_single(cpu, func, info, wait);
54b11e6d 687 return;
3d442233
JA
688 }
689
bb964a92 690 cfd = this_cpu_ptr(&cfd_data);
45a57919 691
e1d12f32 692 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
6c8557bd 693 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
723aae25
MM
694
695 /* Some callers race with other cpus changing the passed mask */
e1d12f32 696 if (unlikely(!cpumask_weight(cfd->cpumask)))
723aae25 697 return;
3d442233 698
3fc5b3b6 699 cpumask_clear(cfd->cpumask_ipi);
e1d12f32 700 for_each_cpu(cpu, cfd->cpumask) {
de7b09ef 701 call_single_data_t *csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
9a46ad6d 702
67719ef2
SAS
703 if (cond_func && !cond_func(cpu, info))
704 continue;
705
9a46ad6d 706 csd_lock(csd);
8053871d 707 if (wait)
545b8c8d 708 csd->node.u_flags |= CSD_TYPE_SYNC;
9a46ad6d
SL
709 csd->func = func;
710 csd->info = info;
35feb604 711#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
545b8c8d
PZ
712 csd->node.src = smp_processor_id();
713 csd->node.dst = cpu;
e48c15b7 714#endif
545b8c8d 715 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu)))
6c8557bd 716 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
9a46ad6d 717 }
561920a0 718
3d442233 719 /* Send a message to all CPUs in the map */
3fc5b3b6 720 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
3d442233 721
9a46ad6d 722 if (wait) {
e1d12f32 723 for_each_cpu(cpu, cfd->cpumask) {
966a9671 724 call_single_data_t *csd;
e1d12f32 725
de7b09ef 726 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
9a46ad6d
SL
727 csd_lock_wait(csd);
728 }
729 }
3d442233 730}
67719ef2
SAS
731
732/**
733 * smp_call_function_many(): Run a function on a set of other CPUs.
734 * @mask: The set of cpus to run on (only runs on online subset).
735 * @func: The function to run. This must be fast and non-blocking.
736 * @info: An arbitrary pointer to pass to the function.
737 * @wait: If true, wait (atomically) until function has completed
738 * on other CPUs.
739 *
740 * If @wait is true, then returns once @func has returned.
741 *
742 * You must not call this function with disabled interrupts or from a
743 * hardware interrupt handler or from a bottom half handler. Preemption
744 * must be disabled when calling this function.
745 */
746void smp_call_function_many(const struct cpumask *mask,
747 smp_call_func_t func, void *info, bool wait)
748{
749 smp_call_function_many_cond(mask, func, info, wait, NULL);
750}
54b11e6d 751EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
752
753/**
754 * smp_call_function(): Run a function on all other CPUs.
755 * @func: The function to run. This must be fast and non-blocking.
756 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
757 * @wait: If true, wait (atomically) until function has completed
758 * on other CPUs.
3d442233 759 *
54b11e6d 760 * Returns 0.
3d442233
JA
761 *
762 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 763 * it returns just before the target cpu calls @func.
3d442233
JA
764 *
765 * You must not call this function with disabled interrupts or from a
766 * hardware interrupt handler or from a bottom half handler.
767 */
caa75932 768void smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 769{
3d442233 770 preempt_disable();
54b11e6d 771 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 772 preempt_enable();
3d442233
JA
773}
774EXPORT_SYMBOL(smp_call_function);
351f8f8e 775
34db18a0
AW
776/* Setup configured maximum number of CPUs to activate */
777unsigned int setup_max_cpus = NR_CPUS;
778EXPORT_SYMBOL(setup_max_cpus);
779
780
781/*
782 * Setup routine for controlling SMP activation
783 *
784 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
785 * activation entirely (the MPS table probe still happens, though).
786 *
787 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
788 * greater than 0, limits the maximum number of CPUs activated in
789 * SMP mode to <NUM>.
790 */
791
792void __weak arch_disable_smp_support(void) { }
793
794static int __init nosmp(char *str)
795{
796 setup_max_cpus = 0;
797 arch_disable_smp_support();
798
799 return 0;
800}
801
802early_param("nosmp", nosmp);
803
804/* this is hard limit */
805static int __init nrcpus(char *str)
806{
807 int nr_cpus;
808
58934356 809 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
34db18a0
AW
810 nr_cpu_ids = nr_cpus;
811
812 return 0;
813}
814
815early_param("nr_cpus", nrcpus);
816
817static int __init maxcpus(char *str)
818{
819 get_option(&str, &setup_max_cpus);
820 if (setup_max_cpus == 0)
821 arch_disable_smp_support();
822
823 return 0;
824}
825
826early_param("maxcpus", maxcpus);
827
828/* Setup number of possible processor ids */
9b130ad5 829unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
34db18a0
AW
830EXPORT_SYMBOL(nr_cpu_ids);
831
832/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
833void __init setup_nr_cpu_ids(void)
834{
835 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
836}
837
838/* Called by boot processor to activate the rest. */
839void __init smp_init(void)
840{
92b23278 841 int num_nodes, num_cpus;
34db18a0 842
3bb5d2ee 843 idle_threads_init();
4cb28ced 844 cpuhp_threads_init();
3bb5d2ee 845
51111dce
ME
846 pr_info("Bringing up secondary CPUs ...\n");
847
b99a2659 848 bringup_nonboot_cpus(setup_max_cpus);
34db18a0 849
92b23278
ME
850 num_nodes = num_online_nodes();
851 num_cpus = num_online_cpus();
852 pr_info("Brought up %d node%s, %d CPU%s\n",
853 num_nodes, (num_nodes > 1 ? "s" : ""),
854 num_cpus, (num_cpus > 1 ? "s" : ""));
855
34db18a0 856 /* Any cleanup work */
34db18a0
AW
857 smp_cpus_done(setup_max_cpus);
858}
859
351f8f8e 860/*
bd924e8c
TH
861 * Call a function on all processors. May be used during early boot while
862 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
863 * of local_irq_disable/enable().
351f8f8e 864 */
58eb7b77 865void on_each_cpu(smp_call_func_t func, void *info, int wait)
351f8f8e 866{
bd924e8c 867 unsigned long flags;
351f8f8e
AW
868
869 preempt_disable();
caa75932 870 smp_call_function(func, info, wait);
bd924e8c 871 local_irq_save(flags);
351f8f8e 872 func(info);
bd924e8c 873 local_irq_restore(flags);
351f8f8e 874 preempt_enable();
351f8f8e
AW
875}
876EXPORT_SYMBOL(on_each_cpu);
3fc498f1
GBY
877
878/**
879 * on_each_cpu_mask(): Run a function on processors specified by
880 * cpumask, which may include the local processor.
881 * @mask: The set of cpus to run on (only runs on online subset).
882 * @func: The function to run. This must be fast and non-blocking.
883 * @info: An arbitrary pointer to pass to the function.
884 * @wait: If true, wait (atomically) until function has completed
885 * on other CPUs.
886 *
887 * If @wait is true, then returns once @func has returned.
888 *
202da400
DD
889 * You must not call this function with disabled interrupts or from a
890 * hardware interrupt handler or from a bottom half handler. The
891 * exception is that it may be used during early boot while
892 * early_boot_irqs_disabled is set.
3fc498f1
GBY
893 */
894void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
895 void *info, bool wait)
896{
897 int cpu = get_cpu();
898
899 smp_call_function_many(mask, func, info, wait);
900 if (cpumask_test_cpu(cpu, mask)) {
202da400
DD
901 unsigned long flags;
902 local_irq_save(flags);
3fc498f1 903 func(info);
202da400 904 local_irq_restore(flags);
3fc498f1
GBY
905 }
906 put_cpu();
907}
908EXPORT_SYMBOL(on_each_cpu_mask);
b3a7e98e
GBY
909
910/*
911 * on_each_cpu_cond(): Call a function on each processor for which
912 * the supplied function cond_func returns true, optionally waiting
913 * for all the required CPUs to finish. This may include the local
914 * processor.
915 * @cond_func: A callback function that is passed a cpu id and
7b7b8a2c 916 * the info parameter. The function is called
b3a7e98e
GBY
917 * with preemption disabled. The function should
918 * return a blooean value indicating whether to IPI
919 * the specified CPU.
920 * @func: The function to run on all applicable CPUs.
921 * This must be fast and non-blocking.
922 * @info: An arbitrary pointer to pass to both functions.
923 * @wait: If true, wait (atomically) until function has
924 * completed on other CPUs.
b3a7e98e
GBY
925 *
926 * Preemption is disabled to protect against CPUs going offline but not online.
927 * CPUs going online during the call will not be seen or sent an IPI.
928 *
929 * You must not call this function with disabled interrupts or
930 * from a hardware interrupt handler or from a bottom half handler.
931 */
5671d814 932void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 933 void *info, bool wait, const struct cpumask *mask)
b3a7e98e 934{
67719ef2
SAS
935 int cpu = get_cpu();
936
937 smp_call_function_many_cond(mask, func, info, wait, cond_func);
938 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
939 unsigned long flags;
940
941 local_irq_save(flags);
942 func(info);
943 local_irq_restore(flags);
b3a7e98e 944 }
67719ef2 945 put_cpu();
b3a7e98e 946}
7d49b28a
RR
947EXPORT_SYMBOL(on_each_cpu_cond_mask);
948
5671d814 949void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 950 void *info, bool wait)
7d49b28a 951{
cb923159 952 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
7d49b28a 953}
b3a7e98e 954EXPORT_SYMBOL(on_each_cpu_cond);
f37f435f
TG
955
956static void do_nothing(void *unused)
957{
958}
959
960/**
961 * kick_all_cpus_sync - Force all cpus out of idle
962 *
963 * Used to synchronize the update of pm_idle function pointer. It's
964 * called after the pointer is updated and returns after the dummy
965 * callback function has been executed on all cpus. The execution of
966 * the function can only happen on the remote cpus after they have
967 * left the idle function which had been called via pm_idle function
968 * pointer. So it's guaranteed that nothing uses the previous pointer
969 * anymore.
970 */
971void kick_all_cpus_sync(void)
972{
973 /* Make sure the change is visible before we kick the cpus */
974 smp_mb();
975 smp_call_function(do_nothing, NULL, 1);
976}
977EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
c6f4459f
CL
978
979/**
980 * wake_up_all_idle_cpus - break all cpus out of idle
981 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
982 * including idle polling cpus, for non-idle cpus, we will do nothing
983 * for them.
984 */
985void wake_up_all_idle_cpus(void)
986{
987 int cpu;
988
989 preempt_disable();
990 for_each_online_cpu(cpu) {
991 if (cpu == smp_processor_id())
992 continue;
993
994 wake_up_if_idle(cpu);
995 }
996 preempt_enable();
997}
998EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
df8ce9d7
JG
999
1000/**
1001 * smp_call_on_cpu - Call a function on a specific cpu
1002 *
1003 * Used to call a function on a specific cpu and wait for it to return.
1004 * Optionally make sure the call is done on a specified physical cpu via vcpu
1005 * pinning in order to support virtualized environments.
1006 */
1007struct smp_call_on_cpu_struct {
1008 struct work_struct work;
1009 struct completion done;
1010 int (*func)(void *);
1011 void *data;
1012 int ret;
1013 int cpu;
1014};
1015
1016static void smp_call_on_cpu_callback(struct work_struct *work)
1017{
1018 struct smp_call_on_cpu_struct *sscs;
1019
1020 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1021 if (sscs->cpu >= 0)
1022 hypervisor_pin_vcpu(sscs->cpu);
1023 sscs->ret = sscs->func(sscs->data);
1024 if (sscs->cpu >= 0)
1025 hypervisor_pin_vcpu(-1);
1026
1027 complete(&sscs->done);
1028}
1029
1030int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1031{
1032 struct smp_call_on_cpu_struct sscs = {
df8ce9d7
JG
1033 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1034 .func = func,
1035 .data = par,
1036 .cpu = phys ? cpu : -1,
1037 };
1038
8db54949
PZ
1039 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1040
df8ce9d7
JG
1041 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1042 return -ENXIO;
1043
1044 queue_work_on(cpu, system_wq, &sscs.work);
1045 wait_for_completion(&sscs.done);
1046
1047 return sscs.ret;
1048}
1049EXPORT_SYMBOL_GPL(smp_call_on_cpu);