1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic helpers for smp ipi calls
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/jump_label.h>
30 #include "sched/smp.h"
32 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
35 call_single_data_t csd
;
38 struct call_function_data
{
39 struct cfd_percpu __percpu
*pcpu
;
40 cpumask_var_t cpumask
;
41 cpumask_var_t cpumask_ipi
;
44 static DEFINE_PER_CPU_ALIGNED(struct call_function_data
, cfd_data
);
46 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head
, call_single_queue
);
48 static void flush_smp_call_function_queue(bool warn_cpu_offline
);
50 int smpcfd_prepare_cpu(unsigned int cpu
)
52 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
54 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
57 if (!zalloc_cpumask_var_node(&cfd
->cpumask_ipi
, GFP_KERNEL
,
59 free_cpumask_var(cfd
->cpumask
);
62 cfd
->pcpu
= alloc_percpu(struct cfd_percpu
);
64 free_cpumask_var(cfd
->cpumask
);
65 free_cpumask_var(cfd
->cpumask_ipi
);
72 int smpcfd_dead_cpu(unsigned int cpu
)
74 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
76 free_cpumask_var(cfd
->cpumask
);
77 free_cpumask_var(cfd
->cpumask_ipi
);
78 free_percpu(cfd
->pcpu
);
82 int smpcfd_dying_cpu(unsigned int cpu
)
85 * The IPIs for the smp-call-function callbacks queued by other
86 * CPUs might arrive late, either due to hardware latencies or
87 * because this CPU disabled interrupts (inside stop-machine)
88 * before the IPIs were sent. So flush out any pending callbacks
89 * explicitly (without waiting for the IPIs to arrive), to
90 * ensure that the outgoing CPU doesn't go offline with work
93 flush_smp_call_function_queue(false);
98 void __init
call_function_init(void)
102 for_each_possible_cpu(i
)
103 init_llist_head(&per_cpu(call_single_queue
, i
));
105 smpcfd_prepare_cpu(smp_processor_id());
108 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
110 static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled
);
112 static int __init
csdlock_debug(char *str
)
114 unsigned int val
= 0;
116 get_option(&str
, &val
);
118 static_branch_enable(&csdlock_debug_enabled
);
122 early_param("csdlock_debug", csdlock_debug
);
124 static DEFINE_PER_CPU(call_single_data_t
*, cur_csd
);
125 static DEFINE_PER_CPU(smp_call_func_t
, cur_csd_func
);
126 static DEFINE_PER_CPU(void *, cur_csd_info
);
128 #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
129 static atomic_t csd_bug_count
= ATOMIC_INIT(0);
131 /* Record current CSD work for current CPU, NULL to erase. */
132 static void __csd_lock_record(call_single_data_t
*csd
)
135 smp_mb(); /* NULL cur_csd after unlock. */
136 __this_cpu_write(cur_csd
, NULL
);
139 __this_cpu_write(cur_csd_func
, csd
->func
);
140 __this_cpu_write(cur_csd_info
, csd
->info
);
141 smp_wmb(); /* func and info before csd. */
142 __this_cpu_write(cur_csd
, csd
);
143 smp_mb(); /* Update cur_csd before function call. */
144 /* Or before unlock, as the case may be. */
147 static __always_inline
void csd_lock_record(call_single_data_t
*csd
)
149 if (static_branch_unlikely(&csdlock_debug_enabled
))
150 __csd_lock_record(csd
);
153 static int csd_lock_wait_getcpu(call_single_data_t
*csd
)
155 unsigned int csd_type
;
157 csd_type
= CSD_TYPE(csd
);
158 if (csd_type
== CSD_TYPE_ASYNC
|| csd_type
== CSD_TYPE_SYNC
)
159 return csd
->node
.dst
; /* Other CSD_TYPE_ values might not have ->dst. */
164 * Complain if too much time spent waiting. Note that only
165 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
166 * so waiting on other types gets much less information.
168 static bool csd_lock_wait_toolong(call_single_data_t
*csd
, u64 ts0
, u64
*ts1
, int *bug_id
)
174 call_single_data_t
*cpu_cur_csd
;
175 unsigned int flags
= READ_ONCE(csd
->node
.u_flags
);
177 if (!(flags
& CSD_FLAG_LOCK
)) {
178 if (!unlikely(*bug_id
))
180 cpu
= csd_lock_wait_getcpu(csd
);
181 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
182 *bug_id
, raw_smp_processor_id(), cpu
);
187 ts_delta
= ts2
- *ts1
;
188 if (likely(ts_delta
<= CSD_LOCK_TIMEOUT
))
191 firsttime
= !*bug_id
;
193 *bug_id
= atomic_inc_return(&csd_bug_count
);
194 cpu
= csd_lock_wait_getcpu(csd
);
195 if (WARN_ONCE(cpu
< 0 || cpu
>= nr_cpu_ids
, "%s: cpu = %d\n", __func__
, cpu
))
199 cpu_cur_csd
= smp_load_acquire(&per_cpu(cur_csd
, cpux
)); /* Before func and info. */
200 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
201 firsttime
? "Detected" : "Continued", *bug_id
, raw_smp_processor_id(), ts2
- ts0
,
202 cpu
, csd
->func
, csd
->info
);
203 if (cpu_cur_csd
&& csd
!= cpu_cur_csd
) {
204 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
205 *bug_id
, READ_ONCE(per_cpu(cur_csd_func
, cpux
)),
206 READ_ONCE(per_cpu(cur_csd_info
, cpux
)));
208 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
209 *bug_id
, !cpu_cur_csd
? "unresponsive" : "handling this request");
212 if (!trigger_single_cpu_backtrace(cpu
))
215 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id
, raw_smp_processor_id(), cpu
);
216 arch_send_call_function_single_ipi(cpu
);
226 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
228 * For non-synchronous ipi calls the csd can still be in use by the
229 * previous function call. For multi-cpu calls its even more interesting
230 * as we'll have to ensure no other cpu is observing our csd.
232 static void __csd_lock_wait(call_single_data_t
*csd
)
237 ts1
= ts0
= sched_clock();
239 if (csd_lock_wait_toolong(csd
, ts0
, &ts1
, &bug_id
))
243 smp_acquire__after_ctrl_dep();
246 static __always_inline
void csd_lock_wait(call_single_data_t
*csd
)
248 if (static_branch_unlikely(&csdlock_debug_enabled
)) {
249 __csd_lock_wait(csd
);
253 smp_cond_load_acquire(&csd
->node
.u_flags
, !(VAL
& CSD_FLAG_LOCK
));
256 static void csd_lock_record(call_single_data_t
*csd
)
260 static __always_inline
void csd_lock_wait(call_single_data_t
*csd
)
262 smp_cond_load_acquire(&csd
->node
.u_flags
, !(VAL
& CSD_FLAG_LOCK
));
266 static __always_inline
void csd_lock(call_single_data_t
*csd
)
269 csd
->node
.u_flags
|= CSD_FLAG_LOCK
;
272 * prevent CPU from reordering the above assignment
273 * to ->flags with any subsequent assignments to other
274 * fields of the specified call_single_data_t structure:
279 static __always_inline
void csd_unlock(call_single_data_t
*csd
)
281 WARN_ON(!(csd
->node
.u_flags
& CSD_FLAG_LOCK
));
284 * ensure we're all done before releasing data:
286 smp_store_release(&csd
->node
.u_flags
, 0);
289 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t
, csd_data
);
291 void __smp_call_single_queue(int cpu
, struct llist_node
*node
)
294 * The list addition should be visible before sending the IPI
295 * handler locks the list to pull the entry off it because of
296 * normal cache coherency rules implied by spinlocks.
298 * If IPIs can go out of order to the cache coherency protocol
299 * in an architecture, sufficient synchronisation should be added
300 * to arch code to make it appear to obey cache coherency WRT
301 * locking and barrier primitives. Generic code isn't really
302 * equipped to do the right thing...
304 if (llist_add(node
, &per_cpu(call_single_queue
, cpu
)))
305 send_call_function_single_ipi(cpu
);
309 * Insert a previously allocated call_single_data_t element
310 * for execution on the given CPU. data must already have
311 * ->func, ->info, and ->flags set.
313 static int generic_exec_single(int cpu
, call_single_data_t
*csd
)
315 if (cpu
== smp_processor_id()) {
316 smp_call_func_t func
= csd
->func
;
317 void *info
= csd
->info
;
321 * We can unlock early even for the synchronous on-stack case,
322 * since we're doing this from the same CPU..
324 csd_lock_record(csd
);
326 local_irq_save(flags
);
328 csd_lock_record(NULL
);
329 local_irq_restore(flags
);
333 if ((unsigned)cpu
>= nr_cpu_ids
|| !cpu_online(cpu
)) {
338 __smp_call_single_queue(cpu
, &csd
->node
.llist
);
344 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
346 * Invoked by arch to handle an IPI for call function single.
347 * Must be called with interrupts disabled.
349 void generic_smp_call_function_single_interrupt(void)
351 flush_smp_call_function_queue(true);
355 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
357 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
358 * offline CPU. Skip this check if set to 'false'.
360 * Flush any pending smp-call-function callbacks queued on this CPU. This is
361 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
362 * to ensure that all pending IPI callbacks are run before it goes completely
365 * Loop through the call_single_queue and run all the queued callbacks.
366 * Must be called with interrupts disabled.
368 static void flush_smp_call_function_queue(bool warn_cpu_offline
)
370 call_single_data_t
*csd
, *csd_next
;
371 struct llist_node
*entry
, *prev
;
372 struct llist_head
*head
;
375 lockdep_assert_irqs_disabled();
377 head
= this_cpu_ptr(&call_single_queue
);
378 entry
= llist_del_all(head
);
379 entry
= llist_reverse_order(entry
);
381 /* There shouldn't be any pending callbacks on an offline CPU. */
382 if (unlikely(warn_cpu_offline
&& !cpu_online(smp_processor_id()) &&
383 !warned
&& !llist_empty(head
))) {
385 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
388 * We don't have to use the _safe() variant here
389 * because we are not invoking the IPI handlers yet.
391 llist_for_each_entry(csd
, entry
, node
.llist
) {
392 switch (CSD_TYPE(csd
)) {
395 case CSD_TYPE_IRQ_WORK
:
396 pr_warn("IPI callback %pS sent to offline CPU\n",
401 pr_warn("IPI task-wakeup sent to offline CPU\n");
405 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
413 * First; run all SYNC callbacks, people are waiting for us.
416 llist_for_each_entry_safe(csd
, csd_next
, entry
, node
.llist
) {
417 /* Do we wait until *after* callback? */
418 if (CSD_TYPE(csd
) == CSD_TYPE_SYNC
) {
419 smp_call_func_t func
= csd
->func
;
420 void *info
= csd
->info
;
423 prev
->next
= &csd_next
->node
.llist
;
425 entry
= &csd_next
->node
.llist
;
428 csd_lock_record(csd
);
431 csd_lock_record(NULL
);
433 prev
= &csd
->node
.llist
;
441 * Second; run all !SYNC callbacks.
444 llist_for_each_entry_safe(csd
, csd_next
, entry
, node
.llist
) {
445 int type
= CSD_TYPE(csd
);
447 if (type
!= CSD_TYPE_TTWU
) {
449 prev
->next
= &csd_next
->node
.llist
;
451 entry
= &csd_next
->node
.llist
;
454 if (type
== CSD_TYPE_ASYNC
) {
455 smp_call_func_t func
= csd
->func
;
456 void *info
= csd
->info
;
458 csd_lock_record(csd
);
461 csd_lock_record(NULL
);
462 } else if (type
== CSD_TYPE_IRQ_WORK
) {
463 irq_work_single(csd
);
467 prev
= &csd
->node
.llist
;
472 * Third; only CSD_TYPE_TTWU is left, issue those.
475 sched_ttwu_pending(entry
);
478 void flush_smp_call_function_from_idle(void)
482 if (llist_empty(this_cpu_ptr(&call_single_queue
)))
485 local_irq_save(flags
);
486 flush_smp_call_function_queue(true);
487 if (local_softirq_pending())
490 local_irq_restore(flags
);
494 * smp_call_function_single - Run a function on a specific CPU
495 * @func: The function to run. This must be fast and non-blocking.
496 * @info: An arbitrary pointer to pass to the function.
497 * @wait: If true, wait until function has completed on other CPUs.
499 * Returns 0 on success, else a negative status code.
501 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
504 call_single_data_t
*csd
;
505 call_single_data_t csd_stack
= {
506 .node
= { .u_flags
= CSD_FLAG_LOCK
| CSD_TYPE_SYNC
, },
512 * prevent preemption and reschedule on another processor,
513 * as well as CPU removal
515 this_cpu
= get_cpu();
518 * Can deadlock when called with interrupts disabled.
519 * We allow cpu's that are not yet online though, as no one else can
520 * send smp call function interrupt to this cpu and as such deadlocks
523 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
524 && !oops_in_progress
);
527 * When @wait we can deadlock when we interrupt between llist_add() and
528 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
529 * csd_lock() on because the interrupt context uses the same csd
532 WARN_ON_ONCE(!in_task());
536 csd
= this_cpu_ptr(&csd_data
);
542 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
543 csd
->node
.src
= smp_processor_id();
547 err
= generic_exec_single(cpu
, csd
);
556 EXPORT_SYMBOL(smp_call_function_single
);
559 * smp_call_function_single_async(): Run an asynchronous function on a
561 * @cpu: The CPU to run on.
562 * @csd: Pre-allocated and setup data structure
564 * Like smp_call_function_single(), but the call is asynchonous and
565 * can thus be done from contexts with disabled interrupts.
567 * The caller passes his own pre-allocated data structure
568 * (ie: embedded in an object) and is responsible for synchronizing it
569 * such that the IPIs performed on the @csd are strictly serialized.
571 * If the function is called with one csd which has not yet been
572 * processed by previous call to smp_call_function_single_async(), the
573 * function will return immediately with -EBUSY showing that the csd
574 * object is still in progress.
576 * NOTE: Be careful, there is unfortunately no current debugging facility to
577 * validate the correctness of this serialization.
579 int smp_call_function_single_async(int cpu
, call_single_data_t
*csd
)
585 if (csd
->node
.u_flags
& CSD_FLAG_LOCK
) {
590 csd
->node
.u_flags
= CSD_FLAG_LOCK
;
593 err
= generic_exec_single(cpu
, csd
);
600 EXPORT_SYMBOL_GPL(smp_call_function_single_async
);
603 * smp_call_function_any - Run a function on any of the given cpus
604 * @mask: The mask of cpus it can run on.
605 * @func: The function to run. This must be fast and non-blocking.
606 * @info: An arbitrary pointer to pass to the function.
607 * @wait: If true, wait until function has completed.
609 * Returns 0 on success, else a negative status code (if no cpus were online).
611 * Selection preference:
612 * 1) current cpu if in @mask
613 * 2) any cpu of current node if in @mask
614 * 3) any other online cpu in @mask
616 int smp_call_function_any(const struct cpumask
*mask
,
617 smp_call_func_t func
, void *info
, int wait
)
620 const struct cpumask
*nodemask
;
623 /* Try for same CPU (cheapest) */
625 if (cpumask_test_cpu(cpu
, mask
))
628 /* Try for same node. */
629 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
630 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
631 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
636 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
637 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
639 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
643 EXPORT_SYMBOL_GPL(smp_call_function_any
);
645 static void smp_call_function_many_cond(const struct cpumask
*mask
,
646 smp_call_func_t func
, void *info
,
647 bool wait
, smp_cond_func_t cond_func
)
649 struct call_function_data
*cfd
;
650 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
653 * Can deadlock when called with interrupts disabled.
654 * We allow cpu's that are not yet online though, as no one else can
655 * send smp call function interrupt to this cpu and as such deadlocks
658 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
659 && !oops_in_progress
&& !early_boot_irqs_disabled
);
662 * When @wait we can deadlock when we interrupt between llist_add() and
663 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
664 * csd_lock() on because the interrupt context uses the same csd
667 WARN_ON_ONCE(!in_task());
669 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
670 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
672 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
674 /* No online cpus? We're done. */
675 if (cpu
>= nr_cpu_ids
)
678 /* Do we have another CPU which isn't us? */
679 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
680 if (next_cpu
== this_cpu
)
681 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
683 /* Fastpath: do that cpu by itself. */
684 if (next_cpu
>= nr_cpu_ids
) {
685 if (!cond_func
|| cond_func(cpu
, info
))
686 smp_call_function_single(cpu
, func
, info
, wait
);
690 cfd
= this_cpu_ptr(&cfd_data
);
692 cpumask_and(cfd
->cpumask
, mask
, cpu_online_mask
);
693 __cpumask_clear_cpu(this_cpu
, cfd
->cpumask
);
695 /* Some callers race with other cpus changing the passed mask */
696 if (unlikely(!cpumask_weight(cfd
->cpumask
)))
699 cpumask_clear(cfd
->cpumask_ipi
);
700 for_each_cpu(cpu
, cfd
->cpumask
) {
701 call_single_data_t
*csd
= &per_cpu_ptr(cfd
->pcpu
, cpu
)->csd
;
703 if (cond_func
&& !cond_func(cpu
, info
))
708 csd
->node
.u_flags
|= CSD_TYPE_SYNC
;
711 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
712 csd
->node
.src
= smp_processor_id();
715 if (llist_add(&csd
->node
.llist
, &per_cpu(call_single_queue
, cpu
)))
716 __cpumask_set_cpu(cpu
, cfd
->cpumask_ipi
);
719 /* Send a message to all CPUs in the map */
720 arch_send_call_function_ipi_mask(cfd
->cpumask_ipi
);
723 for_each_cpu(cpu
, cfd
->cpumask
) {
724 call_single_data_t
*csd
;
726 csd
= &per_cpu_ptr(cfd
->pcpu
, cpu
)->csd
;
733 * smp_call_function_many(): Run a function on a set of other CPUs.
734 * @mask: The set of cpus to run on (only runs on online subset).
735 * @func: The function to run. This must be fast and non-blocking.
736 * @info: An arbitrary pointer to pass to the function.
737 * @wait: If true, wait (atomically) until function has completed
740 * If @wait is true, then returns once @func has returned.
742 * You must not call this function with disabled interrupts or from a
743 * hardware interrupt handler or from a bottom half handler. Preemption
744 * must be disabled when calling this function.
746 void smp_call_function_many(const struct cpumask
*mask
,
747 smp_call_func_t func
, void *info
, bool wait
)
749 smp_call_function_many_cond(mask
, func
, info
, wait
, NULL
);
751 EXPORT_SYMBOL(smp_call_function_many
);
754 * smp_call_function(): Run a function on all other CPUs.
755 * @func: The function to run. This must be fast and non-blocking.
756 * @info: An arbitrary pointer to pass to the function.
757 * @wait: If true, wait (atomically) until function has completed
762 * If @wait is true, then returns once @func has returned; otherwise
763 * it returns just before the target cpu calls @func.
765 * You must not call this function with disabled interrupts or from a
766 * hardware interrupt handler or from a bottom half handler.
768 void smp_call_function(smp_call_func_t func
, void *info
, int wait
)
771 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
774 EXPORT_SYMBOL(smp_call_function
);
776 /* Setup configured maximum number of CPUs to activate */
777 unsigned int setup_max_cpus
= NR_CPUS
;
778 EXPORT_SYMBOL(setup_max_cpus
);
782 * Setup routine for controlling SMP activation
784 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
785 * activation entirely (the MPS table probe still happens, though).
787 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
788 * greater than 0, limits the maximum number of CPUs activated in
792 void __weak
arch_disable_smp_support(void) { }
794 static int __init
nosmp(char *str
)
797 arch_disable_smp_support();
802 early_param("nosmp", nosmp
);
804 /* this is hard limit */
805 static int __init
nrcpus(char *str
)
809 if (get_option(&str
, &nr_cpus
) && nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
810 nr_cpu_ids
= nr_cpus
;
815 early_param("nr_cpus", nrcpus
);
817 static int __init
maxcpus(char *str
)
819 get_option(&str
, &setup_max_cpus
);
820 if (setup_max_cpus
== 0)
821 arch_disable_smp_support();
826 early_param("maxcpus", maxcpus
);
828 /* Setup number of possible processor ids */
829 unsigned int nr_cpu_ids __read_mostly
= NR_CPUS
;
830 EXPORT_SYMBOL(nr_cpu_ids
);
832 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
833 void __init
setup_nr_cpu_ids(void)
835 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
838 /* Called by boot processor to activate the rest. */
839 void __init
smp_init(void)
841 int num_nodes
, num_cpus
;
844 cpuhp_threads_init();
846 pr_info("Bringing up secondary CPUs ...\n");
848 bringup_nonboot_cpus(setup_max_cpus
);
850 num_nodes
= num_online_nodes();
851 num_cpus
= num_online_cpus();
852 pr_info("Brought up %d node%s, %d CPU%s\n",
853 num_nodes
, (num_nodes
> 1 ? "s" : ""),
854 num_cpus
, (num_cpus
> 1 ? "s" : ""));
856 /* Any cleanup work */
857 smp_cpus_done(setup_max_cpus
);
861 * Call a function on all processors. May be used during early boot while
862 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
863 * of local_irq_disable/enable().
865 void on_each_cpu(smp_call_func_t func
, void *info
, int wait
)
870 smp_call_function(func
, info
, wait
);
871 local_irq_save(flags
);
873 local_irq_restore(flags
);
876 EXPORT_SYMBOL(on_each_cpu
);
879 * on_each_cpu_mask(): Run a function on processors specified by
880 * cpumask, which may include the local processor.
881 * @mask: The set of cpus to run on (only runs on online subset).
882 * @func: The function to run. This must be fast and non-blocking.
883 * @info: An arbitrary pointer to pass to the function.
884 * @wait: If true, wait (atomically) until function has completed
887 * If @wait is true, then returns once @func has returned.
889 * You must not call this function with disabled interrupts or from a
890 * hardware interrupt handler or from a bottom half handler. The
891 * exception is that it may be used during early boot while
892 * early_boot_irqs_disabled is set.
894 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
895 void *info
, bool wait
)
899 smp_call_function_many(mask
, func
, info
, wait
);
900 if (cpumask_test_cpu(cpu
, mask
)) {
902 local_irq_save(flags
);
904 local_irq_restore(flags
);
908 EXPORT_SYMBOL(on_each_cpu_mask
);
911 * on_each_cpu_cond(): Call a function on each processor for which
912 * the supplied function cond_func returns true, optionally waiting
913 * for all the required CPUs to finish. This may include the local
915 * @cond_func: A callback function that is passed a cpu id and
916 * the info parameter. The function is called
917 * with preemption disabled. The function should
918 * return a blooean value indicating whether to IPI
920 * @func: The function to run on all applicable CPUs.
921 * This must be fast and non-blocking.
922 * @info: An arbitrary pointer to pass to both functions.
923 * @wait: If true, wait (atomically) until function has
924 * completed on other CPUs.
926 * Preemption is disabled to protect against CPUs going offline but not online.
927 * CPUs going online during the call will not be seen or sent an IPI.
929 * You must not call this function with disabled interrupts or
930 * from a hardware interrupt handler or from a bottom half handler.
932 void on_each_cpu_cond_mask(smp_cond_func_t cond_func
, smp_call_func_t func
,
933 void *info
, bool wait
, const struct cpumask
*mask
)
937 smp_call_function_many_cond(mask
, func
, info
, wait
, cond_func
);
938 if (cpumask_test_cpu(cpu
, mask
) && cond_func(cpu
, info
)) {
941 local_irq_save(flags
);
943 local_irq_restore(flags
);
947 EXPORT_SYMBOL(on_each_cpu_cond_mask
);
949 void on_each_cpu_cond(smp_cond_func_t cond_func
, smp_call_func_t func
,
950 void *info
, bool wait
)
952 on_each_cpu_cond_mask(cond_func
, func
, info
, wait
, cpu_online_mask
);
954 EXPORT_SYMBOL(on_each_cpu_cond
);
956 static void do_nothing(void *unused
)
961 * kick_all_cpus_sync - Force all cpus out of idle
963 * Used to synchronize the update of pm_idle function pointer. It's
964 * called after the pointer is updated and returns after the dummy
965 * callback function has been executed on all cpus. The execution of
966 * the function can only happen on the remote cpus after they have
967 * left the idle function which had been called via pm_idle function
968 * pointer. So it's guaranteed that nothing uses the previous pointer
971 void kick_all_cpus_sync(void)
973 /* Make sure the change is visible before we kick the cpus */
975 smp_call_function(do_nothing
, NULL
, 1);
977 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);
980 * wake_up_all_idle_cpus - break all cpus out of idle
981 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
982 * including idle polling cpus, for non-idle cpus, we will do nothing
985 void wake_up_all_idle_cpus(void)
990 for_each_online_cpu(cpu
) {
991 if (cpu
== smp_processor_id())
994 wake_up_if_idle(cpu
);
998 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus
);
1001 * smp_call_on_cpu - Call a function on a specific cpu
1003 * Used to call a function on a specific cpu and wait for it to return.
1004 * Optionally make sure the call is done on a specified physical cpu via vcpu
1005 * pinning in order to support virtualized environments.
1007 struct smp_call_on_cpu_struct
{
1008 struct work_struct work
;
1009 struct completion done
;
1010 int (*func
)(void *);
1016 static void smp_call_on_cpu_callback(struct work_struct
*work
)
1018 struct smp_call_on_cpu_struct
*sscs
;
1020 sscs
= container_of(work
, struct smp_call_on_cpu_struct
, work
);
1022 hypervisor_pin_vcpu(sscs
->cpu
);
1023 sscs
->ret
= sscs
->func(sscs
->data
);
1025 hypervisor_pin_vcpu(-1);
1027 complete(&sscs
->done
);
1030 int smp_call_on_cpu(unsigned int cpu
, int (*func
)(void *), void *par
, bool phys
)
1032 struct smp_call_on_cpu_struct sscs
= {
1033 .done
= COMPLETION_INITIALIZER_ONSTACK(sscs
.done
),
1036 .cpu
= phys
? cpu
: -1,
1039 INIT_WORK_ONSTACK(&sscs
.work
, smp_call_on_cpu_callback
);
1041 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
1044 queue_work_on(cpu
, system_wq
, &sscs
.work
);
1045 wait_for_completion(&sscs
.done
);
1049 EXPORT_SYMBOL_GPL(smp_call_on_cpu
);