]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/smp.c
Doc/DT: Add DT binding documentation for MIPI DPI Panel
[mirror_ubuntu-bionic-kernel.git] / kernel / smp.c
CommitLineData
3d442233
JA
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 5 */
3d442233 6#include <linux/rcupdate.h>
59190f42 7#include <linux/rculist.h>
641cd4cf 8#include <linux/kernel.h>
9984de1a 9#include <linux/export.h>
0b13fda1
IM
10#include <linux/percpu.h>
11#include <linux/init.h>
5a0e3ad6 12#include <linux/gfp.h>
3d442233 13#include <linux/smp.h>
8969a5ed 14#include <linux/cpu.h>
3d442233 15
3bb5d2ee
SS
16#include "smpboot.h"
17
3d442233 18enum {
6e275637 19 CSD_FLAG_LOCK = 0x01,
c84a83e2 20 CSD_FLAG_WAIT = 0x02,
3d442233
JA
21};
22
23struct call_function_data {
9a46ad6d 24 struct call_single_data __percpu *csd;
0b13fda1 25 cpumask_var_t cpumask;
3d442233
JA
26};
27
e03bcb68
MM
28static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29
6897fc22 30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed
PZ
31
32static int
33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34{
35 long cpu = (long)hcpu;
36 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
37
38 switch (action) {
39 case CPU_UP_PREPARE:
40 case CPU_UP_PREPARE_FROZEN:
eaa95840 41 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
8969a5ed 42 cpu_to_node(cpu)))
80b5184c 43 return notifier_from_errno(-ENOMEM);
9a46ad6d
SL
44 cfd->csd = alloc_percpu(struct call_single_data);
45 if (!cfd->csd) {
46 free_cpumask_var(cfd->cpumask);
47 return notifier_from_errno(-ENOMEM);
48 }
8969a5ed
PZ
49 break;
50
69dd647f 51#ifdef CONFIG_HOTPLUG_CPU
8969a5ed
PZ
52 case CPU_UP_CANCELED:
53 case CPU_UP_CANCELED_FROZEN:
54
55 case CPU_DEAD:
56 case CPU_DEAD_FROZEN:
57 free_cpumask_var(cfd->cpumask);
9a46ad6d 58 free_percpu(cfd->csd);
8969a5ed
PZ
59 break;
60#endif
61 };
62
63 return NOTIFY_OK;
64}
65
0db0628d 66static struct notifier_block hotplug_cfd_notifier = {
0b13fda1 67 .notifier_call = hotplug_cfd,
8969a5ed
PZ
68};
69
d8ad7d11 70void __init call_function_init(void)
3d442233 71{
8969a5ed 72 void *cpu = (void *)(long)smp_processor_id();
3d442233
JA
73 int i;
74
6897fc22
CH
75 for_each_possible_cpu(i)
76 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed
PZ
77
78 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
79 register_cpu_notifier(&hotplug_cfd_notifier);
3d442233
JA
80}
81
8969a5ed
PZ
82/*
83 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
84 *
0b13fda1
IM
85 * For non-synchronous ipi calls the csd can still be in use by the
86 * previous function call. For multi-cpu calls its even more interesting
87 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 88 */
e1d12f32 89static void csd_lock_wait(struct call_single_data *csd)
8969a5ed 90{
e1d12f32 91 while (csd->flags & CSD_FLAG_LOCK)
8969a5ed 92 cpu_relax();
6e275637
PZ
93}
94
e1d12f32 95static void csd_lock(struct call_single_data *csd)
6e275637 96{
e1d12f32
AM
97 csd_lock_wait(csd);
98 csd->flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
99
100 /*
0b13fda1
IM
101 * prevent CPU from reordering the above assignment
102 * to ->flags with any subsequent assignments to other
103 * fields of the specified call_single_data structure:
8969a5ed 104 */
8969a5ed
PZ
105 smp_mb();
106}
107
e1d12f32 108static void csd_unlock(struct call_single_data *csd)
8969a5ed 109{
c84a83e2 110 WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
0b13fda1 111
8969a5ed 112 /*
0b13fda1 113 * ensure we're all done before releasing data:
8969a5ed
PZ
114 */
115 smp_mb();
0b13fda1 116
e1d12f32 117 csd->flags &= ~CSD_FLAG_LOCK;
3d442233
JA
118}
119
8b28499a
FW
120static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
121
3d442233 122/*
0b13fda1
IM
123 * Insert a previously allocated call_single_data element
124 * for execution on the given CPU. data must already have
125 * ->func, ->info, and ->flags set.
3d442233 126 */
8b28499a
FW
127static int generic_exec_single(int cpu, struct call_single_data *csd,
128 smp_call_func_t func, void *info, int wait)
3d442233 129{
8b28499a
FW
130 struct call_single_data csd_stack = { .flags = 0 };
131 unsigned long flags;
132
133
134 if (cpu == smp_processor_id()) {
135 local_irq_save(flags);
136 func(info);
137 local_irq_restore(flags);
138 return 0;
139 }
140
141
142 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143 return -ENXIO;
144
145
146 if (!csd) {
147 csd = &csd_stack;
148 if (!wait)
149 csd = &__get_cpu_var(csd_data);
150 }
151
152 csd_lock(csd);
153
154 csd->func = func;
155 csd->info = info;
156
c84a83e2
JA
157 if (wait)
158 csd->flags |= CSD_FLAG_WAIT;
159
561920a0 160 /*
15d0d3b3
NP
161 * The list addition should be visible before sending the IPI
162 * handler locks the list to pull the entry off it because of
163 * normal cache coherency rules implied by spinlocks.
164 *
165 * If IPIs can go out of order to the cache coherency protocol
166 * in an architecture, sufficient synchronisation should be added
167 * to arch code to make it appear to obey cache coherency WRT
0b13fda1
IM
168 * locking and barrier primitives. Generic code isn't really
169 * equipped to do the right thing...
561920a0 170 */
6897fc22 171 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
3d442233
JA
172 arch_send_call_function_single_ipi(cpu);
173
174 if (wait)
e1d12f32 175 csd_lock_wait(csd);
8b28499a
FW
176
177 return 0;
3d442233
JA
178}
179
3d442233 180/*
0b13fda1
IM
181 * Invoked by arch to handle an IPI for call function single. Must be
182 * called from the arch with interrupts disabled.
3d442233
JA
183 */
184void generic_smp_call_function_single_interrupt(void)
185{
5fd77595
JK
186 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next;
3d442233 188
269c861b
SS
189 /*
190 * Shouldn't receive this interrupt on a cpu that is not yet online.
191 */
192 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
193
6897fc22
CH
194 entry = llist_del_all(&__get_cpu_var(call_single_queue));
195 entry = llist_reverse_order(entry);
3d442233 196
5fd77595 197 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
e1d12f32 198 csd->func(csd->info);
46591962 199 csd_unlock(csd);
3d442233
JA
200 }
201}
202
203/*
204 * smp_call_function_single - Run a function on a specific CPU
205 * @func: The function to run. This must be fast and non-blocking.
206 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
207 * @wait: If true, wait until function has completed on other CPUs.
208 *
72f279b2 209 * Returns 0 on success, else a negative status code.
3d442233 210 */
3a5f65df 211int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 212 int wait)
3d442233 213{
0b13fda1 214 int this_cpu;
8b28499a 215 int err;
3d442233 216
0b13fda1
IM
217 /*
218 * prevent preemption and reschedule on another processor,
219 * as well as CPU removal
220 */
221 this_cpu = get_cpu();
222
269c861b
SS
223 /*
224 * Can deadlock when called with interrupts disabled.
225 * We allow cpu's that are not yet online though, as no one else can
226 * send smp call function interrupt to this cpu and as such deadlocks
227 * can't happen.
228 */
229 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
230 && !oops_in_progress);
3d442233 231
8b28499a 232 err = generic_exec_single(cpu, NULL, func, info, wait);
3d442233
JA
233
234 put_cpu();
0b13fda1 235
f73be6de 236 return err;
3d442233
JA
237}
238EXPORT_SYMBOL(smp_call_function_single);
239
d7877c03 240/**
c46fff2a
FW
241 * smp_call_function_single_async(): Run an asynchronous function on a
242 * specific CPU.
d7877c03
FW
243 * @cpu: The CPU to run on.
244 * @csd: Pre-allocated and setup data structure
d7877c03 245 *
c46fff2a
FW
246 * Like smp_call_function_single(), but the call is asynchonous and
247 * can thus be done from contexts with disabled interrupts.
248 *
249 * The caller passes his own pre-allocated data structure
250 * (ie: embedded in an object) and is responsible for synchronizing it
251 * such that the IPIs performed on the @csd are strictly serialized.
252 *
253 * NOTE: Be careful, there is unfortunately no current debugging facility to
254 * validate the correctness of this serialization.
d7877c03 255 */
c46fff2a 256int smp_call_function_single_async(int cpu, struct call_single_data *csd)
d7877c03
FW
257{
258 int err = 0;
d7877c03 259
fce8ad15
FW
260 preempt_disable();
261 err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
262 preempt_enable();
d7877c03
FW
263
264 return err;
265}
c46fff2a 266EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 267
2ea6dec4
RR
268/*
269 * smp_call_function_any - Run a function on any of the given cpus
270 * @mask: The mask of cpus it can run on.
271 * @func: The function to run. This must be fast and non-blocking.
272 * @info: An arbitrary pointer to pass to the function.
273 * @wait: If true, wait until function has completed.
274 *
275 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
276 *
277 * Selection preference:
278 * 1) current cpu if in @mask
279 * 2) any cpu of current node if in @mask
280 * 3) any other online cpu in @mask
281 */
282int smp_call_function_any(const struct cpumask *mask,
3a5f65df 283 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
284{
285 unsigned int cpu;
286 const struct cpumask *nodemask;
287 int ret;
288
289 /* Try for same CPU (cheapest) */
290 cpu = get_cpu();
291 if (cpumask_test_cpu(cpu, mask))
292 goto call;
293
294 /* Try for same node. */
af2422c4 295 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
296 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
297 cpu = cpumask_next_and(cpu, nodemask, mask)) {
298 if (cpu_online(cpu))
299 goto call;
300 }
301
302 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
303 cpu = cpumask_any_and(mask, cpu_online_mask);
304call:
305 ret = smp_call_function_single(cpu, func, info, wait);
306 put_cpu();
307 return ret;
308}
309EXPORT_SYMBOL_GPL(smp_call_function_any);
310
3d442233 311/**
54b11e6d
RR
312 * smp_call_function_many(): Run a function on a set of other CPUs.
313 * @mask: The set of cpus to run on (only runs on online subset).
3d442233
JA
314 * @func: The function to run. This must be fast and non-blocking.
315 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
316 * @wait: If true, wait (atomically) until function has completed
317 * on other CPUs.
3d442233 318 *
72f279b2 319 * If @wait is true, then returns once @func has returned.
3d442233
JA
320 *
321 * You must not call this function with disabled interrupts or from a
322 * hardware interrupt handler or from a bottom half handler. Preemption
323 * must be disabled when calling this function.
324 */
54b11e6d 325void smp_call_function_many(const struct cpumask *mask,
3a5f65df 326 smp_call_func_t func, void *info, bool wait)
3d442233 327{
e1d12f32 328 struct call_function_data *cfd;
9a46ad6d 329 int cpu, next_cpu, this_cpu = smp_processor_id();
3d442233 330
269c861b
SS
331 /*
332 * Can deadlock when called with interrupts disabled.
333 * We allow cpu's that are not yet online though, as no one else can
334 * send smp call function interrupt to this cpu and as such deadlocks
335 * can't happen.
336 */
337 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
bd924e8c 338 && !oops_in_progress && !early_boot_irqs_disabled);
3d442233 339
723aae25 340 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
54b11e6d 341 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 342 if (cpu == this_cpu)
54b11e6d 343 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 344
54b11e6d
RR
345 /* No online cpus? We're done. */
346 if (cpu >= nr_cpu_ids)
347 return;
348
349 /* Do we have another CPU which isn't us? */
350 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 351 if (next_cpu == this_cpu)
54b11e6d
RR
352 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
353
354 /* Fastpath: do that cpu by itself. */
355 if (next_cpu >= nr_cpu_ids) {
356 smp_call_function_single(cpu, func, info, wait);
357 return;
3d442233
JA
358 }
359
e1d12f32 360 cfd = &__get_cpu_var(cfd_data);
45a57919 361
e1d12f32
AM
362 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
363 cpumask_clear_cpu(this_cpu, cfd->cpumask);
723aae25
MM
364
365 /* Some callers race with other cpus changing the passed mask */
e1d12f32 366 if (unlikely(!cpumask_weight(cfd->cpumask)))
723aae25 367 return;
3d442233 368
e1d12f32
AM
369 for_each_cpu(cpu, cfd->cpumask) {
370 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
371
372 csd_lock(csd);
373 csd->func = func;
374 csd->info = info;
6897fc22 375 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
9a46ad6d 376 }
561920a0 377
3d442233 378 /* Send a message to all CPUs in the map */
73f94550 379 arch_send_call_function_ipi_mask(cfd->cpumask);
3d442233 380
9a46ad6d 381 if (wait) {
e1d12f32
AM
382 for_each_cpu(cpu, cfd->cpumask) {
383 struct call_single_data *csd;
384
385 csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
386 csd_lock_wait(csd);
387 }
388 }
3d442233 389}
54b11e6d 390EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
391
392/**
393 * smp_call_function(): Run a function on all other CPUs.
394 * @func: The function to run. This must be fast and non-blocking.
395 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
396 * @wait: If true, wait (atomically) until function has completed
397 * on other CPUs.
3d442233 398 *
54b11e6d 399 * Returns 0.
3d442233
JA
400 *
401 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 402 * it returns just before the target cpu calls @func.
3d442233
JA
403 *
404 * You must not call this function with disabled interrupts or from a
405 * hardware interrupt handler or from a bottom half handler.
406 */
3a5f65df 407int smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 408{
3d442233 409 preempt_disable();
54b11e6d 410 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 411 preempt_enable();
0b13fda1 412
54b11e6d 413 return 0;
3d442233
JA
414}
415EXPORT_SYMBOL(smp_call_function);
351f8f8e 416
34db18a0
AW
417/* Setup configured maximum number of CPUs to activate */
418unsigned int setup_max_cpus = NR_CPUS;
419EXPORT_SYMBOL(setup_max_cpus);
420
421
422/*
423 * Setup routine for controlling SMP activation
424 *
425 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
426 * activation entirely (the MPS table probe still happens, though).
427 *
428 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
429 * greater than 0, limits the maximum number of CPUs activated in
430 * SMP mode to <NUM>.
431 */
432
433void __weak arch_disable_smp_support(void) { }
434
435static int __init nosmp(char *str)
436{
437 setup_max_cpus = 0;
438 arch_disable_smp_support();
439
440 return 0;
441}
442
443early_param("nosmp", nosmp);
444
445/* this is hard limit */
446static int __init nrcpus(char *str)
447{
448 int nr_cpus;
449
450 get_option(&str, &nr_cpus);
451 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
452 nr_cpu_ids = nr_cpus;
453
454 return 0;
455}
456
457early_param("nr_cpus", nrcpus);
458
459static int __init maxcpus(char *str)
460{
461 get_option(&str, &setup_max_cpus);
462 if (setup_max_cpus == 0)
463 arch_disable_smp_support();
464
465 return 0;
466}
467
468early_param("maxcpus", maxcpus);
469
470/* Setup number of possible processor ids */
471int nr_cpu_ids __read_mostly = NR_CPUS;
472EXPORT_SYMBOL(nr_cpu_ids);
473
474/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
475void __init setup_nr_cpu_ids(void)
476{
477 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
478}
479
a17bce4d
BP
480void __weak smp_announce(void)
481{
482 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
483}
484
34db18a0
AW
485/* Called by boot processor to activate the rest. */
486void __init smp_init(void)
487{
488 unsigned int cpu;
489
3bb5d2ee
SS
490 idle_threads_init();
491
34db18a0
AW
492 /* FIXME: This should be done in userspace --RR */
493 for_each_present_cpu(cpu) {
494 if (num_online_cpus() >= setup_max_cpus)
495 break;
496 if (!cpu_online(cpu))
497 cpu_up(cpu);
498 }
499
500 /* Any cleanup work */
a17bce4d 501 smp_announce();
34db18a0
AW
502 smp_cpus_done(setup_max_cpus);
503}
504
351f8f8e 505/*
bd924e8c
TH
506 * Call a function on all processors. May be used during early boot while
507 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
508 * of local_irq_disable/enable().
351f8f8e
AW
509 */
510int on_each_cpu(void (*func) (void *info), void *info, int wait)
511{
bd924e8c 512 unsigned long flags;
351f8f8e
AW
513 int ret = 0;
514
515 preempt_disable();
516 ret = smp_call_function(func, info, wait);
bd924e8c 517 local_irq_save(flags);
351f8f8e 518 func(info);
bd924e8c 519 local_irq_restore(flags);
351f8f8e
AW
520 preempt_enable();
521 return ret;
522}
523EXPORT_SYMBOL(on_each_cpu);
3fc498f1
GBY
524
525/**
526 * on_each_cpu_mask(): Run a function on processors specified by
527 * cpumask, which may include the local processor.
528 * @mask: The set of cpus to run on (only runs on online subset).
529 * @func: The function to run. This must be fast and non-blocking.
530 * @info: An arbitrary pointer to pass to the function.
531 * @wait: If true, wait (atomically) until function has completed
532 * on other CPUs.
533 *
534 * If @wait is true, then returns once @func has returned.
535 *
202da400
DD
536 * You must not call this function with disabled interrupts or from a
537 * hardware interrupt handler or from a bottom half handler. The
538 * exception is that it may be used during early boot while
539 * early_boot_irqs_disabled is set.
3fc498f1
GBY
540 */
541void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
542 void *info, bool wait)
543{
544 int cpu = get_cpu();
545
546 smp_call_function_many(mask, func, info, wait);
547 if (cpumask_test_cpu(cpu, mask)) {
202da400
DD
548 unsigned long flags;
549 local_irq_save(flags);
3fc498f1 550 func(info);
202da400 551 local_irq_restore(flags);
3fc498f1
GBY
552 }
553 put_cpu();
554}
555EXPORT_SYMBOL(on_each_cpu_mask);
b3a7e98e
GBY
556
557/*
558 * on_each_cpu_cond(): Call a function on each processor for which
559 * the supplied function cond_func returns true, optionally waiting
560 * for all the required CPUs to finish. This may include the local
561 * processor.
562 * @cond_func: A callback function that is passed a cpu id and
563 * the the info parameter. The function is called
564 * with preemption disabled. The function should
565 * return a blooean value indicating whether to IPI
566 * the specified CPU.
567 * @func: The function to run on all applicable CPUs.
568 * This must be fast and non-blocking.
569 * @info: An arbitrary pointer to pass to both functions.
570 * @wait: If true, wait (atomically) until function has
571 * completed on other CPUs.
572 * @gfp_flags: GFP flags to use when allocating the cpumask
573 * used internally by the function.
574 *
575 * The function might sleep if the GFP flags indicates a non
576 * atomic allocation is allowed.
577 *
578 * Preemption is disabled to protect against CPUs going offline but not online.
579 * CPUs going online during the call will not be seen or sent an IPI.
580 *
581 * You must not call this function with disabled interrupts or
582 * from a hardware interrupt handler or from a bottom half handler.
583 */
584void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
585 smp_call_func_t func, void *info, bool wait,
586 gfp_t gfp_flags)
587{
588 cpumask_var_t cpus;
589 int cpu, ret;
590
591 might_sleep_if(gfp_flags & __GFP_WAIT);
592
593 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
594 preempt_disable();
595 for_each_online_cpu(cpu)
596 if (cond_func(cpu, info))
597 cpumask_set_cpu(cpu, cpus);
598 on_each_cpu_mask(cpus, func, info, wait);
599 preempt_enable();
600 free_cpumask_var(cpus);
601 } else {
602 /*
603 * No free cpumask, bother. No matter, we'll
604 * just have to IPI them one by one.
605 */
606 preempt_disable();
607 for_each_online_cpu(cpu)
608 if (cond_func(cpu, info)) {
609 ret = smp_call_function_single(cpu, func,
610 info, wait);
611 WARN_ON_ONCE(!ret);
612 }
613 preempt_enable();
614 }
615}
616EXPORT_SYMBOL(on_each_cpu_cond);
f37f435f
TG
617
618static void do_nothing(void *unused)
619{
620}
621
622/**
623 * kick_all_cpus_sync - Force all cpus out of idle
624 *
625 * Used to synchronize the update of pm_idle function pointer. It's
626 * called after the pointer is updated and returns after the dummy
627 * callback function has been executed on all cpus. The execution of
628 * the function can only happen on the remote cpus after they have
629 * left the idle function which had been called via pm_idle function
630 * pointer. So it's guaranteed that nothing uses the previous pointer
631 * anymore.
632 */
633void kick_all_cpus_sync(void)
634{
635 /* Make sure the change is visible before we kick the cpus */
636 smp_mb();
637 smp_call_function(do_nothing, NULL, 1);
638}
639EXPORT_SYMBOL_GPL(kick_all_cpus_sync);