]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kernel/smp.c
arm64: kernel: implement ACPI parking protocol
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kernel / smp.c
CommitLineData
08e875c1
CM
1/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
0f078336 20#include <linux/acpi.h>
08e875c1
CM
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/cache.h>
27#include <linux/profile.h>
28#include <linux/errno.h>
29#include <linux/mm.h>
30#include <linux/err.h>
31#include <linux/cpu.h>
32#include <linux/smp.h>
33#include <linux/seq_file.h>
34#include <linux/irq.h>
35#include <linux/percpu.h>
36#include <linux/clockchips.h>
37#include <linux/completion.h>
38#include <linux/of.h>
eb631bb5 39#include <linux/irq_work.h>
08e875c1 40
e039ee4e 41#include <asm/alternative.h>
08e875c1
CM
42#include <asm/atomic.h>
43#include <asm/cacheflush.h>
df857416 44#include <asm/cpu.h>
08e875c1 45#include <asm/cputype.h>
cd1aebf5 46#include <asm/cpu_ops.h>
08e875c1 47#include <asm/mmu_context.h>
74e50394 48#include <asm/numa.h>
08e875c1
CM
49#include <asm/pgtable.h>
50#include <asm/pgalloc.h>
51#include <asm/processor.h>
4c7aa002 52#include <asm/smp_plat.h>
08e875c1
CM
53#include <asm/sections.h>
54#include <asm/tlbflush.h>
55#include <asm/ptrace.h>
377bcff9 56#include <asm/virt.h>
08e875c1 57
45ed695a
NP
58#define CREATE_TRACE_POINTS
59#include <trace/events/ipi.h>
60
08e875c1
CM
61/*
62 * as from 2.5, kernels no longer have an init_tasks structure
63 * so we need some other way of telling a new secondary core
64 * where to place its SVC stack
65 */
66struct secondary_data secondary_data;
dab9cf3c 67volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
08e875c1
CM
68
69enum ipi_msg_type {
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNC,
08e875c1 72 IPI_CPU_STOP,
1f85008e 73 IPI_TIMER,
eb631bb5 74 IPI_IRQ_WORK,
58dc86e4 75 IPI_WAKEUP
08e875c1
CM
76};
77
08e875c1
CM
78/*
79 * Boot a secondary CPU, and assign it the specified idle task.
80 * This also gives us the initial stack to use for this CPU.
81 */
b8c6453a 82static int boot_secondary(unsigned int cpu, struct task_struct *idle)
08e875c1 83{
652af899
MR
84 if (cpu_ops[cpu]->cpu_boot)
85 return cpu_ops[cpu]->cpu_boot(cpu);
08e875c1 86
652af899 87 return -EOPNOTSUPP;
08e875c1
CM
88}
89
90static DECLARE_COMPLETION(cpu_running);
91
b8c6453a 92int __cpu_up(unsigned int cpu, struct task_struct *idle)
08e875c1
CM
93{
94 int ret;
95
96 /*
97 * We need to tell the secondary core where to find its stack and the
98 * page tables.
99 */
100 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
101 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
102
103 /*
104 * Now bring the CPU into our world.
105 */
106 ret = boot_secondary(cpu, idle);
107 if (ret == 0) {
108 /*
109 * CPU was successfully started, wait for it to come online or
110 * time out.
111 */
112 wait_for_completion_timeout(&cpu_running,
113 msecs_to_jiffies(1000));
114
115 if (!cpu_online(cpu)) {
116 pr_crit("CPU%u: failed to come online\n", cpu);
117 ret = -EIO;
118 }
119 } else {
120 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
121 }
122
123 secondary_data.stack = NULL;
124
125 return ret;
126}
127
f6e763b9
MB
128static void smp_store_cpu_info(unsigned int cpuid)
129{
130 store_cpu_topology(cpuid);
74e50394 131 numa_store_cpu_info(cpuid);
f6e763b9
MB
132}
133
08e875c1
CM
134/*
135 * This is the secondary CPU boot entry. We're using this CPUs
136 * idle thread stack, but a set of temporary page tables.
137 */
b8c6453a 138asmlinkage void secondary_start_kernel(void)
08e875c1
CM
139{
140 struct mm_struct *mm = &init_mm;
141 unsigned int cpu = smp_processor_id();
142
08e875c1
CM
143 /*
144 * All kernel threads share the same mm context; grab a
145 * reference and switch to it.
146 */
147 atomic_inc(&mm->mm_count);
148 current->active_mm = mm;
08e875c1 149
71586276 150 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
71586276 151
08e875c1
CM
152 /*
153 * TTBR0 is only used for the identity mapping at this stage. Make it
154 * point to zero page to avoid speculatively fetching new entries.
155 */
156 cpu_set_reserved_ttbr0();
8e63d388 157 local_flush_tlb_all();
dd006da2 158 cpu_set_default_tcr_t0sz();
08e875c1
CM
159
160 preempt_disable();
161 trace_hardirqs_off();
162
dbb4e152
SP
163 /*
164 * If the system has established the capabilities, make sure
165 * this CPU ticks all of those. If it doesn't, the CPU will
166 * fail to come online.
167 */
168 verify_local_cpu_capabilities();
169
652af899
MR
170 if (cpu_ops[cpu]->cpu_postboot)
171 cpu_ops[cpu]->cpu_postboot();
08e875c1 172
df857416
MR
173 /*
174 * Log the CPU info before it is marked online and might get read.
175 */
176 cpuinfo_store_cpu();
177
7ade67b5
MZ
178 /*
179 * Enable GIC and timers.
180 */
181 notify_cpu_starting(cpu);
182
f6e763b9
MB
183 smp_store_cpu_info(cpu);
184
08e875c1
CM
185 /*
186 * OK, now it's safe to let the boot CPU continue. Wait for
187 * the CPU migration code to notice that the CPU is online
188 * before we continue.
189 */
64f17818
SP
190 pr_info("CPU%u: Booted secondary processor [%08x]\n",
191 cpu, read_cpuid_id());
08e875c1 192 set_cpu_online(cpu, true);
b3770b32 193 complete(&cpu_running);
08e875c1 194
53ae3acd 195 local_irq_enable();
b3bf6aa7 196 local_async_enable();
53ae3acd 197
08e875c1
CM
198 /*
199 * OK, it's off to the idle thread for us
200 */
0087298f 201 cpu_startup_entry(CPUHP_ONLINE);
08e875c1
CM
202}
203
9327e2c6
MR
204#ifdef CONFIG_HOTPLUG_CPU
205static int op_cpu_disable(unsigned int cpu)
206{
207 /*
208 * If we don't have a cpu_die method, abort before we reach the point
209 * of no return. CPU0 may not have an cpu_ops, so test for it.
210 */
211 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
212 return -EOPNOTSUPP;
213
214 /*
215 * We may need to abort a hot unplug for some other mechanism-specific
216 * reason.
217 */
218 if (cpu_ops[cpu]->cpu_disable)
219 return cpu_ops[cpu]->cpu_disable(cpu);
220
221 return 0;
222}
223
224/*
225 * __cpu_disable runs on the processor to be shutdown.
226 */
227int __cpu_disable(void)
228{
229 unsigned int cpu = smp_processor_id();
230 int ret;
231
232 ret = op_cpu_disable(cpu);
233 if (ret)
234 return ret;
235
236 /*
237 * Take this CPU offline. Once we clear this, we can't return,
238 * and we must not schedule until we're ready to give up the cpu.
239 */
240 set_cpu_online(cpu, false);
241
242 /*
243 * OK - migrate IRQs away from this CPU
244 */
217d453d
YY
245 irq_migrate_all_off_this_cpu();
246
9327e2c6
MR
247 return 0;
248}
249
c814ca02
AC
250static int op_cpu_kill(unsigned int cpu)
251{
252 /*
253 * If we have no means of synchronising with the dying CPU, then assume
254 * that it is really dead. We can only wait for an arbitrary length of
255 * time and hope that it's dead, so let's skip the wait and just hope.
256 */
257 if (!cpu_ops[cpu]->cpu_kill)
6b99c68c 258 return 0;
c814ca02
AC
259
260 return cpu_ops[cpu]->cpu_kill(cpu);
261}
262
9327e2c6
MR
263/*
264 * called on the thread which is asking for a CPU to be shutdown -
265 * waits until shutdown has completed, or it is timed out.
266 */
267void __cpu_die(unsigned int cpu)
268{
6b99c68c
MR
269 int err;
270
05981277 271 if (!cpu_wait_death(cpu, 5)) {
9327e2c6
MR
272 pr_crit("CPU%u: cpu didn't die\n", cpu);
273 return;
274 }
275 pr_notice("CPU%u: shutdown\n", cpu);
c814ca02
AC
276
277 /*
278 * Now that the dying CPU is beyond the point of no return w.r.t.
279 * in-kernel synchronisation, try to get the firwmare to help us to
280 * verify that it has really left the kernel before we consider
281 * clobbering anything it might still be using.
282 */
6b99c68c
MR
283 err = op_cpu_kill(cpu);
284 if (err)
285 pr_warn("CPU%d may not have shut down cleanly: %d\n",
286 cpu, err);
9327e2c6
MR
287}
288
289/*
290 * Called from the idle thread for the CPU which has been shutdown.
291 *
292 * Note that we disable IRQs here, but do not re-enable them
293 * before returning to the caller. This is also the behaviour
294 * of the other hotplug-cpu capable cores, so presumably coming
295 * out of idle fixes this.
296 */
297void cpu_die(void)
298{
299 unsigned int cpu = smp_processor_id();
300
301 idle_task_exit();
302
303 local_irq_disable();
304
305 /* Tell __cpu_die() that this CPU is now safe to dispose of */
05981277 306 (void)cpu_report_death();
9327e2c6
MR
307
308 /*
309 * Actually shutdown the CPU. This must never fail. The specific hotplug
310 * mechanism must perform all required cache maintenance to ensure that
311 * no dirty lines are lost in the process of shutting down the CPU.
312 */
313 cpu_ops[cpu]->cpu_die(cpu);
314
315 BUG();
316}
317#endif
318
377bcff9
JR
319static void __init hyp_mode_check(void)
320{
321 if (is_hyp_mode_available())
322 pr_info("CPU: All CPU(s) started at EL2\n");
323 else if (is_hyp_mode_mismatched())
324 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
325 "CPU: CPUs started in inconsistent modes");
326 else
327 pr_info("CPU: All CPU(s) started at EL1\n");
328}
329
08e875c1
CM
330void __init smp_cpus_done(unsigned int max_cpus)
331{
326b16db 332 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
3a75578e 333 setup_cpu_features();
377bcff9
JR
334 hyp_mode_check();
335 apply_alternatives_all();
08e875c1
CM
336}
337
338void __init smp_prepare_boot_cpu(void)
339{
71586276 340 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
194bcd8e 341 cpuinfo_store_boot_cpu();
08e875c1
CM
342}
343
0f078336
LP
344static u64 __init of_get_cpu_mpidr(struct device_node *dn)
345{
346 const __be32 *cell;
347 u64 hwid;
348
349 /*
350 * A cpu node with missing "reg" property is
351 * considered invalid to build a cpu_logical_map
352 * entry.
353 */
354 cell = of_get_property(dn, "reg", NULL);
355 if (!cell) {
356 pr_err("%s: missing reg property\n", dn->full_name);
357 return INVALID_HWID;
358 }
359
360 hwid = of_read_number(cell, of_n_addr_cells(dn));
361 /*
362 * Non affinity bits must be set to 0 in the DT
363 */
364 if (hwid & ~MPIDR_HWID_BITMASK) {
365 pr_err("%s: invalid reg property\n", dn->full_name);
366 return INVALID_HWID;
367 }
368 return hwid;
369}
370
371/*
372 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
373 * entries and check for duplicates. If any is found just ignore the
374 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
375 * matching valid MPIDR values.
376 */
377static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
378{
379 unsigned int i;
380
381 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
382 if (cpu_logical_map(i) == hwid)
383 return true;
384 return false;
385}
386
819a8826
LP
387/*
388 * Initialize cpu operations for a logical cpu and
389 * set it in the possible mask on success
390 */
391static int __init smp_cpu_setup(int cpu)
392{
393 if (cpu_read_ops(cpu))
394 return -ENODEV;
395
396 if (cpu_ops[cpu]->cpu_init(cpu))
397 return -ENODEV;
398
399 set_cpu_possible(cpu, true);
400
401 return 0;
402}
403
0f078336
LP
404static bool bootcpu_valid __initdata;
405static unsigned int cpu_count = 1;
406
407#ifdef CONFIG_ACPI
408/*
409 * acpi_map_gic_cpu_interface - parse processor MADT entry
410 *
411 * Carry out sanity checks on MADT processor entry and initialize
412 * cpu_logical_map on success
413 */
414static void __init
415acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
416{
417 u64 hwid = processor->arm_mpidr;
418
f9058929
HG
419 if (!(processor->flags & ACPI_MADT_ENABLED)) {
420 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
0f078336
LP
421 return;
422 }
423
f9058929
HG
424 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
425 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
0f078336
LP
426 return;
427 }
428
429 if (is_mpidr_duplicate(cpu_count, hwid)) {
430 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
431 return;
432 }
433
434 /* Check if GICC structure of boot CPU is available in the MADT */
435 if (cpu_logical_map(0) == hwid) {
436 if (bootcpu_valid) {
437 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
438 hwid);
439 return;
440 }
441 bootcpu_valid = true;
442 return;
443 }
444
445 if (cpu_count >= NR_CPUS)
446 return;
447
448 /* map the logical cpu id to cpu MPIDR */
449 cpu_logical_map(cpu_count) = hwid;
450
58dc86e4
LP
451 /*
452 * Set-up the ACPI parking protocol cpu entries
453 * while initializing the cpu_logical_map to
454 * avoid parsing MADT entries multiple times for
455 * nothing (ie a valid cpu_logical_map entry should
456 * contain a valid parking protocol data set to
457 * initialize the cpu if the parking protocol is
458 * the only available enable method).
459 */
460 acpi_set_mailbox_entry(cpu_count, processor);
461
0f078336
LP
462 cpu_count++;
463}
464
465static int __init
466acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
467 const unsigned long end)
468{
469 struct acpi_madt_generic_interrupt *processor;
470
471 processor = (struct acpi_madt_generic_interrupt *)header;
99e3e3ae 472 if (BAD_MADT_GICC_ENTRY(processor, end))
0f078336
LP
473 return -EINVAL;
474
475 acpi_table_print_madt_entry(header);
476
477 acpi_map_gic_cpu_interface(processor);
478
479 return 0;
480}
481#else
482#define acpi_table_parse_madt(...) do { } while (0)
483#endif
484
08e875c1 485/*
4c7aa002
JM
486 * Enumerate the possible CPU set from the device tree and build the
487 * cpu logical map array containing MPIDR values related to logical
488 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
08e875c1 489 */
29b8302b 490static void __init of_parse_and_init_cpus(void)
08e875c1 491{
08e875c1 492 struct device_node *dn = NULL;
08e875c1
CM
493
494 while ((dn = of_find_node_by_type(dn, "cpu"))) {
0f078336 495 u64 hwid = of_get_cpu_mpidr(dn);
4c7aa002 496
0f078336 497 if (hwid == INVALID_HWID)
4c7aa002 498 goto next;
4c7aa002 499
0f078336
LP
500 if (is_mpidr_duplicate(cpu_count, hwid)) {
501 pr_err("%s: duplicate cpu reg properties in the DT\n",
502 dn->full_name);
4c7aa002
JM
503 goto next;
504 }
505
4c7aa002
JM
506 /*
507 * The numbering scheme requires that the boot CPU
508 * must be assigned logical id 0. Record it so that
509 * the logical map built from DT is validated and can
510 * be used.
511 */
512 if (hwid == cpu_logical_map(0)) {
513 if (bootcpu_valid) {
514 pr_err("%s: duplicate boot cpu reg property in DT\n",
515 dn->full_name);
516 goto next;
517 }
518
519 bootcpu_valid = true;
520
521 /*
522 * cpu_logical_map has already been
523 * initialized and the boot cpu doesn't need
524 * the enable-method so continue without
525 * incrementing cpu.
526 */
527 continue;
528 }
529
0f078336 530 if (cpu_count >= NR_CPUS)
08e875c1
CM
531 goto next;
532
4c7aa002 533 pr_debug("cpu logical map 0x%llx\n", hwid);
0f078336 534 cpu_logical_map(cpu_count) = hwid;
74e50394
GK
535
536 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
08e875c1 537next:
0f078336 538 cpu_count++;
08e875c1 539 }
0f078336
LP
540}
541
542/*
543 * Enumerate the possible CPU set from the device tree or ACPI and build the
544 * cpu logical map array containing MPIDR values related to logical
545 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
546 */
547void __init smp_init_cpus(void)
548{
549 int i;
550
551 if (acpi_disabled)
552 of_parse_and_init_cpus();
553 else
554 /*
555 * do a walk of MADT to determine how many CPUs
556 * we have including disabled CPUs, and get information
557 * we need for SMP init
558 */
559 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
560 acpi_parse_gic_cpu_interface, 0);
08e875c1 561
0f078336
LP
562 if (cpu_count > NR_CPUS)
563 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
564 cpu_count, NR_CPUS);
4c7aa002
JM
565
566 if (!bootcpu_valid) {
0f078336 567 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
4c7aa002
JM
568 return;
569 }
570
571 /*
819a8826
LP
572 * We need to set the cpu_logical_map entries before enabling
573 * the cpus so that cpu processor description entries (DT cpu nodes
574 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
575 * with entries in cpu_logical_map while initializing the cpus.
576 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
4c7aa002 577 */
819a8826
LP
578 for (i = 1; i < NR_CPUS; i++) {
579 if (cpu_logical_map(i) != INVALID_HWID) {
580 if (smp_cpu_setup(i))
581 cpu_logical_map(i) = INVALID_HWID;
582 }
583 }
08e875c1
CM
584}
585
586void __init smp_prepare_cpus(unsigned int max_cpus)
587{
cd1aebf5
MR
588 int err;
589 unsigned int cpu, ncores = num_possible_cpus();
08e875c1 590
f6e763b9
MB
591 init_cpu_topology();
592
593 smp_store_cpu_info(smp_processor_id());
594
08e875c1
CM
595 /*
596 * are we trying to boot more cores than exist?
597 */
598 if (max_cpus > ncores)
599 max_cpus = ncores;
600
d329de3f
MZ
601 /* Don't bother if we're effectively UP */
602 if (max_cpus <= 1)
603 return;
604
08e875c1
CM
605 /*
606 * Initialise the present map (which describes the set of CPUs
607 * actually populated at the present time) and release the
608 * secondaries from the bootloader.
d329de3f
MZ
609 *
610 * Make sure we online at most (max_cpus - 1) additional CPUs.
08e875c1 611 */
d329de3f 612 max_cpus--;
08e875c1
CM
613 for_each_possible_cpu(cpu) {
614 if (max_cpus == 0)
615 break;
616
d329de3f
MZ
617 if (cpu == smp_processor_id())
618 continue;
619
cd1aebf5 620 if (!cpu_ops[cpu])
08e875c1
CM
621 continue;
622
cd1aebf5 623 err = cpu_ops[cpu]->cpu_prepare(cpu);
d329de3f
MZ
624 if (err)
625 continue;
08e875c1
CM
626
627 set_cpu_present(cpu, true);
628 max_cpus--;
629 }
08e875c1
CM
630}
631
36310736 632void (*__smp_cross_call)(const struct cpumask *, unsigned int);
08e875c1
CM
633
634void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
635{
45ed695a 636 __smp_cross_call = fn;
08e875c1
CM
637}
638
45ed695a
NP
639static const char *ipi_types[NR_IPI] __tracepoint_string = {
640#define S(x,s) [x] = s
08e875c1
CM
641 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
642 S(IPI_CALL_FUNC, "Function call interrupts"),
08e875c1 643 S(IPI_CPU_STOP, "CPU stop interrupts"),
1f85008e 644 S(IPI_TIMER, "Timer broadcast interrupts"),
eb631bb5 645 S(IPI_IRQ_WORK, "IRQ work interrupts"),
58dc86e4 646 S(IPI_WAKEUP, "CPU wake-up interrupts"),
08e875c1
CM
647};
648
45ed695a
NP
649static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
650{
651 trace_ipi_raise(target, ipi_types[ipinr]);
652 __smp_cross_call(target, ipinr);
653}
654
08e875c1
CM
655void show_ipi_list(struct seq_file *p, int prec)
656{
657 unsigned int cpu, i;
658
659 for (i = 0; i < NR_IPI; i++) {
45ed695a 660 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
08e875c1 661 prec >= 4 ? " " : "");
67317c26 662 for_each_online_cpu(cpu)
08e875c1
CM
663 seq_printf(p, "%10u ",
664 __get_irq_stat(cpu, ipi_irqs[i]));
665 seq_printf(p, " %s\n", ipi_types[i]);
666 }
667}
668
669u64 smp_irq_stat_cpu(unsigned int cpu)
670{
671 u64 sum = 0;
672 int i;
673
674 for (i = 0; i < NR_IPI; i++)
675 sum += __get_irq_stat(cpu, ipi_irqs[i]);
676
677 return sum;
678}
679
45ed695a
NP
680void arch_send_call_function_ipi_mask(const struct cpumask *mask)
681{
682 smp_cross_call(mask, IPI_CALL_FUNC);
683}
684
685void arch_send_call_function_single_ipi(int cpu)
686{
0aaf0dae 687 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
45ed695a
NP
688}
689
58dc86e4
LP
690#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
691void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
692{
693 smp_cross_call(mask, IPI_WAKEUP);
694}
695#endif
696
45ed695a
NP
697#ifdef CONFIG_IRQ_WORK
698void arch_irq_work_raise(void)
699{
700 if (__smp_cross_call)
701 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
702}
703#endif
704
08e875c1
CM
705static DEFINE_RAW_SPINLOCK(stop_lock);
706
707/*
708 * ipi_cpu_stop - handle IPI from smp_send_stop()
709 */
710static void ipi_cpu_stop(unsigned int cpu)
711{
712 if (system_state == SYSTEM_BOOTING ||
713 system_state == SYSTEM_RUNNING) {
714 raw_spin_lock(&stop_lock);
715 pr_crit("CPU%u: stopping\n", cpu);
716 dump_stack();
717 raw_spin_unlock(&stop_lock);
718 }
719
720 set_cpu_online(cpu, false);
721
08e875c1
CM
722 local_irq_disable();
723
724 while (1)
725 cpu_relax();
726}
727
728/*
729 * Main handler for inter-processor interrupts
730 */
731void handle_IPI(int ipinr, struct pt_regs *regs)
732{
733 unsigned int cpu = smp_processor_id();
734 struct pt_regs *old_regs = set_irq_regs(regs);
735
45ed695a 736 if ((unsigned)ipinr < NR_IPI) {
be081d9b 737 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
45ed695a
NP
738 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
739 }
08e875c1
CM
740
741 switch (ipinr) {
742 case IPI_RESCHEDULE:
743 scheduler_ipi();
744 break;
745
746 case IPI_CALL_FUNC:
747 irq_enter();
748 generic_smp_call_function_interrupt();
749 irq_exit();
750 break;
751
08e875c1
CM
752 case IPI_CPU_STOP:
753 irq_enter();
754 ipi_cpu_stop(cpu);
755 irq_exit();
756 break;
757
1f85008e
LP
758#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
759 case IPI_TIMER:
760 irq_enter();
761 tick_receive_broadcast();
762 irq_exit();
763 break;
764#endif
765
eb631bb5
LB
766#ifdef CONFIG_IRQ_WORK
767 case IPI_IRQ_WORK:
768 irq_enter();
769 irq_work_run();
770 irq_exit();
771 break;
772#endif
773
58dc86e4
LP
774#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
775 case IPI_WAKEUP:
776 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
777 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
778 cpu);
779 break;
780#endif
781
08e875c1
CM
782 default:
783 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
784 break;
785 }
45ed695a
NP
786
787 if ((unsigned)ipinr < NR_IPI)
be081d9b 788 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
08e875c1
CM
789 set_irq_regs(old_regs);
790}
791
792void smp_send_reschedule(int cpu)
793{
794 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
795}
796
1f85008e
LP
797#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
798void tick_broadcast(const struct cpumask *mask)
799{
800 smp_cross_call(mask, IPI_TIMER);
801}
802#endif
803
08e875c1
CM
804void smp_send_stop(void)
805{
806 unsigned long timeout;
807
808 if (num_online_cpus() > 1) {
809 cpumask_t mask;
810
811 cpumask_copy(&mask, cpu_online_mask);
434ed7f4 812 cpumask_clear_cpu(smp_processor_id(), &mask);
08e875c1
CM
813
814 smp_cross_call(&mask, IPI_CPU_STOP);
815 }
816
817 /* Wait up to one second for other CPUs to stop */
818 timeout = USEC_PER_SEC;
819 while (num_online_cpus() > 1 && timeout--)
820 udelay(1);
821
822 if (num_online_cpus() > 1)
823 pr_warning("SMP: failed to stop secondary CPUs\n");
824}
825
826/*
827 * not supported here
828 */
829int setup_profiling_timer(unsigned int multiplier)
830{
831 return -EINVAL;
832}