2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/interrupt.h>
18 #include <linux/cache.h>
19 #include <linux/profile.h>
20 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/cpu.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/nmi.h>
27 #include <linux/percpu.h>
28 #include <linux/clockchips.h>
29 #include <linux/completion.h>
30 #include <linux/cpufreq.h>
31 #include <linux/irq_work.h>
33 #include <linux/atomic.h>
36 #include <asm/cacheflush.h>
38 #include <asm/cputype.h>
39 #include <asm/exception.h>
40 #include <asm/idmap.h>
41 #include <asm/topology.h>
42 #include <asm/mmu_context.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/procinfo.h>
46 #include <asm/processor.h>
47 #include <asm/sections.h>
48 #include <asm/tlbflush.h>
49 #include <asm/ptrace.h>
50 #include <asm/smp_plat.h>
52 #include <asm/mach/arch.h>
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/ipi.h>
59 * as from 2.5, kernels no longer have an init_tasks structure
60 * so we need some other way of telling a new secondary core
61 * where to place its SVC stack
63 struct secondary_data secondary_data
;
66 * control for which core is the next to come out of the secondary
69 volatile int pen_release
= -1;
81 * SGI8-15 can be reserved by secure firmware, and thus may
82 * not be usable by the kernel. Please keep the above limited
83 * to at most 8 entries.
87 static DECLARE_COMPLETION(cpu_running
);
89 static struct smp_operations smp_ops __ro_after_init
;
91 void __init
smp_set_ops(const struct smp_operations
*ops
)
97 static unsigned long get_arch_pgd(pgd_t
*pgd
)
99 #ifdef CONFIG_ARM_LPAE
100 return __phys_to_pfn(virt_to_phys(pgd
));
102 return virt_to_phys(pgd
);
106 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
107 static int secondary_biglittle_prepare(unsigned int cpu
)
109 if (!cpu_vtable
[cpu
])
110 cpu_vtable
[cpu
] = kzalloc(sizeof(*cpu_vtable
[cpu
]), GFP_KERNEL
);
112 return cpu_vtable
[cpu
] ? 0 : -ENOMEM
;
115 static void secondary_biglittle_init(void)
117 init_proc_vtable(lookup_processor(read_cpuid_id())->proc
);
120 static int secondary_biglittle_prepare(unsigned int cpu
)
125 static void secondary_biglittle_init(void)
130 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
134 if (!smp_ops
.smp_boot_secondary
)
137 ret
= secondary_biglittle_prepare(cpu
);
142 * We need to tell the secondary core where to find
143 * its stack and the page tables.
145 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
146 #ifdef CONFIG_ARM_MPU
147 secondary_data
.mpu_rgn_info
= &mpu_rgn_info
;
151 secondary_data
.pgdir
= virt_to_phys(idmap_pgd
);
152 secondary_data
.swapper_pg_dir
= get_arch_pgd(swapper_pg_dir
);
154 sync_cache_w(&secondary_data
);
157 * Now bring the CPU into our world.
159 ret
= smp_ops
.smp_boot_secondary(cpu
, idle
);
162 * CPU was successfully started, wait for it
163 * to come online or time out.
165 wait_for_completion_timeout(&cpu_running
,
166 msecs_to_jiffies(1000));
168 if (!cpu_online(cpu
)) {
169 pr_crit("CPU%u: failed to come online\n", cpu
);
173 pr_err("CPU%u: failed to boot: %d\n", cpu
, ret
);
177 memset(&secondary_data
, 0, sizeof(secondary_data
));
181 /* platform specific SMP operations */
182 void __init
smp_init_cpus(void)
184 if (smp_ops
.smp_init_cpus
)
185 smp_ops
.smp_init_cpus();
188 int platform_can_secondary_boot(void)
190 return !!smp_ops
.smp_boot_secondary
;
193 int platform_can_cpu_hotplug(void)
195 #ifdef CONFIG_HOTPLUG_CPU
196 if (smp_ops
.cpu_kill
)
203 #ifdef CONFIG_HOTPLUG_CPU
204 static int platform_cpu_kill(unsigned int cpu
)
206 if (smp_ops
.cpu_kill
)
207 return smp_ops
.cpu_kill(cpu
);
211 static int platform_cpu_disable(unsigned int cpu
)
213 if (smp_ops
.cpu_disable
)
214 return smp_ops
.cpu_disable(cpu
);
219 int platform_can_hotplug_cpu(unsigned int cpu
)
221 /* cpu_die must be specified to support hotplug */
222 if (!smp_ops
.cpu_die
)
225 if (smp_ops
.cpu_can_disable
)
226 return smp_ops
.cpu_can_disable(cpu
);
229 * By default, allow disabling all CPUs except the first one,
230 * since this is special on a lot of platforms, e.g. because
231 * of clock tick interrupts.
237 * __cpu_disable runs on the processor to be shutdown.
239 int __cpu_disable(void)
241 unsigned int cpu
= smp_processor_id();
244 ret
= platform_cpu_disable(cpu
);
249 * Take this CPU offline. Once we clear this, we can't return,
250 * and we must not schedule until we're ready to give up the cpu.
252 set_cpu_online(cpu
, false);
255 * OK - migrate IRQs away from this CPU
260 * Flush user cache and TLB mappings, and then remove this CPU
261 * from the vm mask set of all processes.
263 * Caches are flushed to the Level of Unification Inner Shareable
264 * to write-back dirty lines to unified caches shared by all CPUs.
267 local_flush_tlb_all();
272 static DECLARE_COMPLETION(cpu_died
);
275 * called on the thread which is asking for a CPU to be shutdown -
276 * waits until shutdown has completed, or it is timed out.
278 void __cpu_die(unsigned int cpu
)
280 if (!wait_for_completion_timeout(&cpu_died
, msecs_to_jiffies(5000))) {
281 pr_err("CPU%u: cpu didn't die\n", cpu
);
284 pr_debug("CPU%u: shutdown\n", cpu
);
286 clear_tasks_mm_cpumask(cpu
);
288 * platform_cpu_kill() is generally expected to do the powering off
289 * and/or cutting of clocks to the dying CPU. Optionally, this may
290 * be done by the CPU which is dying in preference to supporting
291 * this call, but that means there is _no_ synchronisation between
292 * the requesting CPU and the dying CPU actually losing power.
294 if (!platform_cpu_kill(cpu
))
295 pr_err("CPU%u: unable to kill\n", cpu
);
299 * Called from the idle thread for the CPU which has been shutdown.
301 * Note that we disable IRQs here, but do not re-enable them
302 * before returning to the caller. This is also the behaviour
303 * of the other hotplug-cpu capable cores, so presumably coming
304 * out of idle fixes this.
306 void arch_cpu_idle_dead(void)
308 unsigned int cpu
= smp_processor_id();
315 * Flush the data out of the L1 cache for this CPU. This must be
316 * before the completion to ensure that data is safely written out
317 * before platform_cpu_kill() gets called - which may disable
318 * *this* CPU and power down its cache.
323 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
324 * this returns, power and/or clocks can be removed at any point
325 * from this CPU and its cache by platform_cpu_kill().
330 * Ensure that the cache lines associated with that completion are
331 * written out. This covers the case where _this_ CPU is doing the
332 * powering down, to ensure that the completion is visible to the
333 * CPU waiting for this one.
338 * The actual CPU shutdown procedure is at least platform (if not
339 * CPU) specific. This may remove power, or it may simply spin.
341 * Platforms are generally expected *NOT* to return from this call,
342 * although there are some which do because they have no way to
343 * power down the CPU. These platforms are the _only_ reason we
344 * have a return path which uses the fragment of assembly below.
346 * The return path should not be used for platforms which can
350 smp_ops
.cpu_die(cpu
);
352 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
356 * Do not return to the idle loop - jump back to the secondary
357 * cpu initialisation. There's some initialisation which needs
358 * to be repeated to undo the effects of taking the CPU offline.
360 __asm__("mov sp, %0\n"
362 " b secondary_start_kernel"
364 : "r" (task_stack_page(current
) + THREAD_SIZE
- 8));
366 #endif /* CONFIG_HOTPLUG_CPU */
369 * Called by both boot and secondaries to move global data into
370 * per-processor storage.
372 static void smp_store_cpu_info(unsigned int cpuid
)
374 struct cpuinfo_arm
*cpu_info
= &per_cpu(cpu_data
, cpuid
);
376 cpu_info
->loops_per_jiffy
= loops_per_jiffy
;
377 cpu_info
->cpuid
= read_cpuid_id();
379 store_cpu_topology(cpuid
);
383 * This is the secondary CPU boot entry. We're using this CPUs
384 * idle thread stack, but a set of temporary page tables.
386 asmlinkage
void secondary_start_kernel(void)
388 struct mm_struct
*mm
= &init_mm
;
391 secondary_biglittle_init();
394 * The identity mapping is uncached (strongly ordered), so
395 * switch away from it before attempting any exclusive accesses.
397 cpu_switch_mm(mm
->pgd
, mm
);
398 local_flush_bp_all();
399 enter_lazy_tlb(mm
, current
);
400 local_flush_tlb_all();
403 * All kernel threads share the same mm context; grab a
404 * reference and switch to it.
406 cpu
= smp_processor_id();
408 current
->active_mm
= mm
;
409 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
414 setup_vectors_base();
416 pr_debug("CPU%u: Booted secondary processor\n", cpu
);
419 trace_hardirqs_off();
422 * Give the platform a chance to do its own initialisation.
424 if (smp_ops
.smp_secondary_init
)
425 smp_ops
.smp_secondary_init(cpu
);
427 notify_cpu_starting(cpu
);
431 smp_store_cpu_info(cpu
);
434 * OK, now it's safe to let the boot CPU continue. Wait for
435 * the CPU migration code to notice that the CPU is online
436 * before we continue - which happens after __cpu_up returns.
438 set_cpu_online(cpu
, true);
442 complete(&cpu_running
);
449 * OK, it's off to the idle thread for us
451 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
454 void __init
smp_cpus_done(unsigned int max_cpus
)
457 unsigned long bogosum
= 0;
459 for_each_online_cpu(cpu
)
460 bogosum
+= per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
462 printk(KERN_INFO
"SMP: Total of %d processors activated "
463 "(%lu.%02lu BogoMIPS).\n",
465 bogosum
/ (500000/HZ
),
466 (bogosum
/ (5000/HZ
)) % 100);
471 void __init
smp_prepare_boot_cpu(void)
473 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
476 void __init
smp_prepare_cpus(unsigned int max_cpus
)
478 unsigned int ncores
= num_possible_cpus();
482 smp_store_cpu_info(smp_processor_id());
485 * are we trying to boot more cores than exist?
487 if (max_cpus
> ncores
)
489 if (ncores
> 1 && max_cpus
) {
491 * Initialise the present map, which describes the set of CPUs
492 * actually populated at the present time. A platform should
493 * re-initialize the map in the platforms smp_prepare_cpus()
494 * if present != possible (e.g. physical hotplug).
496 init_cpu_present(cpu_possible_mask
);
499 * Initialise the SCU if there are more than one CPU
500 * and let them know where to start.
502 if (smp_ops
.smp_prepare_cpus
)
503 smp_ops
.smp_prepare_cpus(max_cpus
);
507 static void (*__smp_cross_call
)(const struct cpumask
*, unsigned int);
509 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
511 if (!__smp_cross_call
)
512 __smp_cross_call
= fn
;
515 static const char *ipi_types
[NR_IPI
] __tracepoint_string
= {
516 #define S(x,s) [x] = s
517 S(IPI_WAKEUP
, "CPU wakeup interrupts"),
518 S(IPI_TIMER
, "Timer broadcast interrupts"),
519 S(IPI_RESCHEDULE
, "Rescheduling interrupts"),
520 S(IPI_CALL_FUNC
, "Function call interrupts"),
521 S(IPI_CPU_STOP
, "CPU stop interrupts"),
522 S(IPI_IRQ_WORK
, "IRQ work interrupts"),
523 S(IPI_COMPLETION
, "completion interrupts"),
526 static void smp_cross_call(const struct cpumask
*target
, unsigned int ipinr
)
528 trace_ipi_raise_rcuidle(target
, ipi_types
[ipinr
]);
529 __smp_cross_call(target
, ipinr
);
532 void show_ipi_list(struct seq_file
*p
, int prec
)
536 for (i
= 0; i
< NR_IPI
; i
++) {
537 seq_printf(p
, "%*s%u: ", prec
- 1, "IPI", i
);
539 for_each_online_cpu(cpu
)
540 seq_printf(p
, "%10u ",
541 __get_irq_stat(cpu
, ipi_irqs
[i
]));
543 seq_printf(p
, " %s\n", ipi_types
[i
]);
547 u64
smp_irq_stat_cpu(unsigned int cpu
)
552 for (i
= 0; i
< NR_IPI
; i
++)
553 sum
+= __get_irq_stat(cpu
, ipi_irqs
[i
]);
558 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
560 smp_cross_call(mask
, IPI_CALL_FUNC
);
563 void arch_send_wakeup_ipi_mask(const struct cpumask
*mask
)
565 smp_cross_call(mask
, IPI_WAKEUP
);
568 void arch_send_call_function_single_ipi(int cpu
)
570 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC
);
573 #ifdef CONFIG_IRQ_WORK
574 void arch_irq_work_raise(void)
576 if (arch_irq_work_has_interrupt())
577 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK
);
581 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
582 void tick_broadcast(const struct cpumask
*mask
)
584 smp_cross_call(mask
, IPI_TIMER
);
588 static DEFINE_RAW_SPINLOCK(stop_lock
);
591 * ipi_cpu_stop - handle IPI from smp_send_stop()
593 static void ipi_cpu_stop(unsigned int cpu
)
595 if (system_state
<= SYSTEM_RUNNING
) {
596 raw_spin_lock(&stop_lock
);
597 pr_crit("CPU%u: stopping\n", cpu
);
599 raw_spin_unlock(&stop_lock
);
602 set_cpu_online(cpu
, false);
611 static DEFINE_PER_CPU(struct completion
*, cpu_completion
);
613 int register_ipi_completion(struct completion
*completion
, int cpu
)
615 per_cpu(cpu_completion
, cpu
) = completion
;
616 return IPI_COMPLETION
;
619 static void ipi_complete(unsigned int cpu
)
621 complete(per_cpu(cpu_completion
, cpu
));
625 * Main handler for inter-processor interrupts
627 asmlinkage
void __exception_irq_entry
do_IPI(int ipinr
, struct pt_regs
*regs
)
629 handle_IPI(ipinr
, regs
);
632 void handle_IPI(int ipinr
, struct pt_regs
*regs
)
634 unsigned int cpu
= smp_processor_id();
635 struct pt_regs
*old_regs
= set_irq_regs(regs
);
637 if ((unsigned)ipinr
< NR_IPI
) {
638 trace_ipi_entry_rcuidle(ipi_types
[ipinr
]);
639 __inc_irq_stat(cpu
, ipi_irqs
[ipinr
]);
646 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
649 tick_receive_broadcast();
660 generic_smp_call_function_interrupt();
670 #ifdef CONFIG_IRQ_WORK
684 case IPI_CPU_BACKTRACE
:
687 nmi_cpu_backtrace(regs
);
693 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
698 if ((unsigned)ipinr
< NR_IPI
)
699 trace_ipi_exit_rcuidle(ipi_types
[ipinr
]);
700 set_irq_regs(old_regs
);
703 void smp_send_reschedule(int cpu
)
705 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
708 void smp_send_stop(void)
710 unsigned long timeout
;
713 cpumask_copy(&mask
, cpu_online_mask
);
714 cpumask_clear_cpu(smp_processor_id(), &mask
);
715 if (!cpumask_empty(&mask
))
716 smp_cross_call(&mask
, IPI_CPU_STOP
);
718 /* Wait up to one second for other CPUs to stop */
719 timeout
= USEC_PER_SEC
;
720 while (num_online_cpus() > 1 && timeout
--)
723 if (num_online_cpus() > 1)
724 pr_warn("SMP: failed to stop secondary CPUs\n");
730 int setup_profiling_timer(unsigned int multiplier
)
735 #ifdef CONFIG_CPU_FREQ
737 static DEFINE_PER_CPU(unsigned long, l_p_j_ref
);
738 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq
);
739 static unsigned long global_l_p_j_ref
;
740 static unsigned long global_l_p_j_ref_freq
;
742 static int cpufreq_callback(struct notifier_block
*nb
,
743 unsigned long val
, void *data
)
745 struct cpufreq_freqs
*freq
= data
;
748 if (freq
->flags
& CPUFREQ_CONST_LOOPS
)
751 if (!per_cpu(l_p_j_ref
, cpu
)) {
752 per_cpu(l_p_j_ref
, cpu
) =
753 per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
754 per_cpu(l_p_j_ref_freq
, cpu
) = freq
->old
;
755 if (!global_l_p_j_ref
) {
756 global_l_p_j_ref
= loops_per_jiffy
;
757 global_l_p_j_ref_freq
= freq
->old
;
761 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
762 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new)) {
763 loops_per_jiffy
= cpufreq_scale(global_l_p_j_ref
,
764 global_l_p_j_ref_freq
,
766 per_cpu(cpu_data
, cpu
).loops_per_jiffy
=
767 cpufreq_scale(per_cpu(l_p_j_ref
, cpu
),
768 per_cpu(l_p_j_ref_freq
, cpu
),
774 static struct notifier_block cpufreq_notifier
= {
775 .notifier_call
= cpufreq_callback
,
778 static int __init
register_cpufreq_notifier(void)
780 return cpufreq_register_notifier(&cpufreq_notifier
,
781 CPUFREQ_TRANSITION_NOTIFIER
);
783 core_initcall(register_cpufreq_notifier
);
787 static void raise_nmi(cpumask_t
*mask
)
789 smp_cross_call(mask
, IPI_CPU_BACKTRACE
);
792 void arch_trigger_cpumask_backtrace(const cpumask_t
*mask
, bool exclude_self
)
794 nmi_trigger_cpumask_backtrace(mask
, exclude_self
, raise_nmi
);