2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/sched/hotplug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/reboot.h>
27 #include <linux/seq_file.h>
28 #include <linux/smp.h>
29 #include <linux/thread_info.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
33 #include <asm/mmu_context.h>
34 #include <asm/mxregs.h>
35 #include <asm/platform.h>
36 #include <asm/tlbflush.h>
37 #include <asm/traps.h>
40 # if XCHAL_HAVE_S32C1I == 0
41 # error "The S32C1I option is required for SMP."
45 static void system_invalidate_dcache_range(unsigned long start
,
47 static void system_flush_invalidate_dcache_range(unsigned long start
,
50 /* IPI (Inter Process Interrupt) */
54 static irqreturn_t
ipi_interrupt(int irq
, void *dev_id
);
55 static struct irqaction ipi_irqaction
= {
56 .handler
= ipi_interrupt
,
63 unsigned irq
= irq_create_mapping(NULL
, IPI_IRQ
);
64 setup_irq(irq
, &ipi_irqaction
);
67 static inline unsigned int get_core_count(void)
69 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
70 unsigned int syscfgid
= get_er(SYSCFGID
);
71 return ((syscfgid
>> 18) & 0xf) + 1;
74 static inline int get_core_id(void)
76 /* Bits 0...18 of SYSCFGID contain the core id */
77 unsigned int core_id
= get_er(SYSCFGID
);
78 return core_id
& 0x3fff;
81 void __init
smp_prepare_cpus(unsigned int max_cpus
)
85 for (i
= 0; i
< max_cpus
; ++i
)
86 set_cpu_present(i
, true);
89 void __init
smp_init_cpus(void)
92 unsigned int ncpus
= get_core_count();
93 unsigned int core_id
= get_core_id();
95 pr_info("%s: Core Count = %d\n", __func__
, ncpus
);
96 pr_info("%s: Core Id = %d\n", __func__
, core_id
);
98 for (i
= 0; i
< ncpus
; ++i
)
99 set_cpu_possible(i
, true);
102 void __init
smp_prepare_boot_cpu(void)
104 unsigned int cpu
= smp_processor_id();
106 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
109 void __init
smp_cpus_done(unsigned int max_cpus
)
113 static int boot_secondary_processors
= 1; /* Set with xt-gdb via .xt-gdb */
114 static DECLARE_COMPLETION(cpu_running
);
116 void secondary_start_kernel(void)
118 struct mm_struct
*mm
= &init_mm
;
119 unsigned int cpu
= smp_processor_id();
123 #ifdef CONFIG_DEBUG_KERNEL
124 if (boot_secondary_processors
== 0) {
125 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
126 __func__
, boot_secondary_processors
, cpu
);
128 __asm__
__volatile__ ("waiti " __stringify(LOCKLEVEL
));
131 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
132 __func__
, boot_secondary_processors
, cpu
);
136 secondary_trap_init();
138 /* All kernel threads share the same mm context. */
142 current
->active_mm
= mm
;
143 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
144 enter_lazy_tlb(mm
, current
);
147 trace_hardirqs_off();
151 notify_cpu_starting(cpu
);
153 secondary_init_irq();
154 local_timer_setup(cpu
);
156 set_cpu_online(cpu
, true);
160 complete(&cpu_running
);
162 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
165 static void mx_cpu_start(void *p
)
167 unsigned cpu
= (unsigned)p
;
168 unsigned long run_stall_mask
= get_er(MPSCORE
);
170 set_er(run_stall_mask
& ~(1u << cpu
), MPSCORE
);
171 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
172 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
175 static void mx_cpu_stop(void *p
)
177 unsigned cpu
= (unsigned)p
;
178 unsigned long run_stall_mask
= get_er(MPSCORE
);
180 set_er(run_stall_mask
| (1u << cpu
), MPSCORE
);
181 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
182 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
185 #ifdef CONFIG_HOTPLUG_CPU
186 unsigned long cpu_start_id __cacheline_aligned
;
188 unsigned long cpu_start_ccount
;
190 static int boot_secondary(unsigned int cpu
, struct task_struct
*ts
)
192 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
193 unsigned long ccount
;
196 #ifdef CONFIG_HOTPLUG_CPU
198 system_flush_invalidate_dcache_range(
199 (unsigned long)&cpu_start_id
, sizeof(cpu_start_id
));
201 smp_call_function_single(0, mx_cpu_start
, (void *)cpu
, 1);
203 for (i
= 0; i
< 2; ++i
) {
205 ccount
= get_ccount();
208 cpu_start_ccount
= ccount
;
210 while (time_before(jiffies
, timeout
)) {
212 if (!cpu_start_ccount
)
216 if (cpu_start_ccount
) {
217 smp_call_function_single(0, mx_cpu_stop
,
219 cpu_start_ccount
= 0;
226 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
230 if (cpu_asid_cache(cpu
) == 0)
231 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
233 start_info
.stack
= (unsigned long)task_pt_regs(idle
);
236 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
237 __func__
, cpu
, idle
, start_info
.stack
);
239 ret
= boot_secondary(cpu
, idle
);
241 wait_for_completion_timeout(&cpu_running
,
242 msecs_to_jiffies(1000));
243 if (!cpu_online(cpu
))
248 pr_err("CPU %u failed to boot\n", cpu
);
253 #ifdef CONFIG_HOTPLUG_CPU
256 * __cpu_disable runs on the processor to be shutdown.
258 int __cpu_disable(void)
260 unsigned int cpu
= smp_processor_id();
263 * Take this CPU offline. Once we clear this, we can't return,
264 * and we must not schedule until we're ready to give up the cpu.
266 set_cpu_online(cpu
, false);
269 * OK - migrate IRQs away from this CPU
274 * Flush user cache and TLB mappings, and then remove this CPU
275 * from the vm mask set of all processes.
277 local_flush_cache_all();
278 local_flush_tlb_all();
279 invalidate_page_directory();
281 clear_tasks_mm_cpumask(cpu
);
286 static void platform_cpu_kill(unsigned int cpu
)
288 smp_call_function_single(0, mx_cpu_stop
, (void *)cpu
, true);
292 * called on the thread which is asking for a CPU to be shutdown -
293 * waits until shutdown has completed, or it is timed out.
295 void __cpu_die(unsigned int cpu
)
297 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
298 while (time_before(jiffies
, timeout
)) {
299 system_invalidate_dcache_range((unsigned long)&cpu_start_id
,
300 sizeof(cpu_start_id
));
301 if (cpu_start_id
== -cpu
) {
302 platform_cpu_kill(cpu
);
306 pr_err("CPU%u: unable to kill\n", cpu
);
309 void arch_cpu_idle_dead(void)
314 * Called from the idle thread for the CPU which has been shutdown.
316 * Note that we disable IRQs here, but do not re-enable them
317 * before returning to the caller. This is also the behaviour
318 * of the other hotplug-cpu capable cores, so presumably coming
319 * out of idle fixes this.
321 void __ref
cpu_die(void)
325 __asm__
__volatile__(
326 " movi a2, cpu_restart\n"
330 #endif /* CONFIG_HOTPLUG_CPU */
339 static const struct {
340 const char *short_text
;
341 const char *long_text
;
343 { .short_text
= "RES", .long_text
= "Rescheduling interrupts" },
344 { .short_text
= "CAL", .long_text
= "Function call interrupts" },
345 { .short_text
= "DIE", .long_text
= "CPU shutdown interrupts" },
349 unsigned long ipi_count
[IPI_MAX
];
352 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
);
354 static void send_ipi_message(const struct cpumask
*callmask
,
355 enum ipi_msg_type msg_id
)
358 unsigned long mask
= 0;
360 for_each_cpu(index
, callmask
)
361 if (index
!= smp_processor_id())
364 set_er(mask
, MIPISET(msg_id
));
367 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
369 send_ipi_message(mask
, IPI_CALL_FUNC
);
372 void arch_send_call_function_single_ipi(int cpu
)
374 send_ipi_message(cpumask_of(cpu
), IPI_CALL_FUNC
);
377 void smp_send_reschedule(int cpu
)
379 send_ipi_message(cpumask_of(cpu
), IPI_RESCHEDULE
);
382 void smp_send_stop(void)
384 struct cpumask targets
;
386 cpumask_copy(&targets
, cpu_online_mask
);
387 cpumask_clear_cpu(smp_processor_id(), &targets
);
388 send_ipi_message(&targets
, IPI_CPU_STOP
);
391 static void ipi_cpu_stop(unsigned int cpu
)
393 set_cpu_online(cpu
, false);
397 irqreturn_t
ipi_interrupt(int irq
, void *dev_id
)
399 unsigned int cpu
= smp_processor_id();
400 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
404 msg
= get_er(MIPICAUSE(cpu
));
405 for (i
= 0; i
< IPI_MAX
; i
++)
406 if (msg
& (1 << i
)) {
407 set_er(1 << i
, MIPICAUSE(cpu
));
411 if (msg
& (1 << IPI_RESCHEDULE
))
413 if (msg
& (1 << IPI_CALL_FUNC
))
414 generic_smp_call_function_interrupt();
415 if (msg
& (1 << IPI_CPU_STOP
))
421 void show_ipi_list(struct seq_file
*p
, int prec
)
426 for (i
= 0; i
< IPI_MAX
; ++i
) {
427 seq_printf(p
, "%*s:", prec
, ipi_text
[i
].short_text
);
428 for_each_online_cpu(cpu
)
429 seq_printf(p
, " %10lu",
430 per_cpu(ipi_data
, cpu
).ipi_count
[i
]);
431 seq_printf(p
, " %s\n", ipi_text
[i
].long_text
);
435 int setup_profiling_timer(unsigned int multiplier
)
437 pr_debug("setup_profiling_timer %d\n", multiplier
);
441 /* TLB flush functions */
444 struct vm_area_struct
*vma
;
449 static void ipi_flush_tlb_all(void *arg
)
451 local_flush_tlb_all();
454 void flush_tlb_all(void)
456 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
459 static void ipi_flush_tlb_mm(void *arg
)
461 local_flush_tlb_mm(arg
);
464 void flush_tlb_mm(struct mm_struct
*mm
)
466 on_each_cpu(ipi_flush_tlb_mm
, mm
, 1);
469 static void ipi_flush_tlb_page(void *arg
)
471 struct flush_data
*fd
= arg
;
472 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
475 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
477 struct flush_data fd
= {
481 on_each_cpu(ipi_flush_tlb_page
, &fd
, 1);
484 static void ipi_flush_tlb_range(void *arg
)
486 struct flush_data
*fd
= arg
;
487 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
490 void flush_tlb_range(struct vm_area_struct
*vma
,
491 unsigned long start
, unsigned long end
)
493 struct flush_data fd
= {
498 on_each_cpu(ipi_flush_tlb_range
, &fd
, 1);
501 static void ipi_flush_tlb_kernel_range(void *arg
)
503 struct flush_data
*fd
= arg
;
504 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
507 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
509 struct flush_data fd
= {
513 on_each_cpu(ipi_flush_tlb_kernel_range
, &fd
, 1);
516 /* Cache flush functions */
518 static void ipi_flush_cache_all(void *arg
)
520 local_flush_cache_all();
523 void flush_cache_all(void)
525 on_each_cpu(ipi_flush_cache_all
, NULL
, 1);
528 static void ipi_flush_cache_page(void *arg
)
530 struct flush_data
*fd
= arg
;
531 local_flush_cache_page(fd
->vma
, fd
->addr1
, fd
->addr2
);
534 void flush_cache_page(struct vm_area_struct
*vma
,
535 unsigned long address
, unsigned long pfn
)
537 struct flush_data fd
= {
542 on_each_cpu(ipi_flush_cache_page
, &fd
, 1);
545 static void ipi_flush_cache_range(void *arg
)
547 struct flush_data
*fd
= arg
;
548 local_flush_cache_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
551 void flush_cache_range(struct vm_area_struct
*vma
,
552 unsigned long start
, unsigned long end
)
554 struct flush_data fd
= {
559 on_each_cpu(ipi_flush_cache_range
, &fd
, 1);
562 static void ipi_flush_icache_range(void *arg
)
564 struct flush_data
*fd
= arg
;
565 local_flush_icache_range(fd
->addr1
, fd
->addr2
);
568 void flush_icache_range(unsigned long start
, unsigned long end
)
570 struct flush_data fd
= {
574 on_each_cpu(ipi_flush_icache_range
, &fd
, 1);
576 EXPORT_SYMBOL(flush_icache_range
);
578 /* ------------------------------------------------------------------------- */
580 static void ipi_invalidate_dcache_range(void *arg
)
582 struct flush_data
*fd
= arg
;
583 __invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
586 static void system_invalidate_dcache_range(unsigned long start
,
589 struct flush_data fd
= {
593 on_each_cpu(ipi_invalidate_dcache_range
, &fd
, 1);
596 static void ipi_flush_invalidate_dcache_range(void *arg
)
598 struct flush_data
*fd
= arg
;
599 __flush_invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
602 static void system_flush_invalidate_dcache_range(unsigned long start
,
605 struct flush_data fd
= {
609 on_each_cpu(ipi_flush_invalidate_dcache_range
, &fd
, 1);