2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/reboot.h>
25 #include <linux/seq_file.h>
26 #include <linux/smp.h>
27 #include <linux/thread_info.h>
29 #include <asm/cacheflush.h>
30 #include <asm/kdebug.h>
31 #include <asm/mmu_context.h>
32 #include <asm/mxregs.h>
33 #include <asm/platform.h>
34 #include <asm/tlbflush.h>
35 #include <asm/traps.h>
38 # if XCHAL_HAVE_S32C1I == 0
39 # error "The S32C1I option is required for SMP."
43 static void system_invalidate_dcache_range(unsigned long start
,
45 static void system_flush_invalidate_dcache_range(unsigned long start
,
48 /* IPI (Inter Process Interrupt) */
52 static irqreturn_t
ipi_interrupt(int irq
, void *dev_id
);
53 static struct irqaction ipi_irqaction
= {
54 .handler
= ipi_interrupt
,
61 unsigned irq
= irq_create_mapping(NULL
, IPI_IRQ
);
62 setup_irq(irq
, &ipi_irqaction
);
65 static inline unsigned int get_core_count(void)
67 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
68 unsigned int syscfgid
= get_er(SYSCFGID
);
69 return ((syscfgid
>> 18) & 0xf) + 1;
72 static inline int get_core_id(void)
74 /* Bits 0...18 of SYSCFGID contain the core id */
75 unsigned int core_id
= get_er(SYSCFGID
);
76 return core_id
& 0x3fff;
79 void __init
smp_prepare_cpus(unsigned int max_cpus
)
83 for (i
= 0; i
< max_cpus
; ++i
)
84 set_cpu_present(i
, true);
87 void __init
smp_init_cpus(void)
90 unsigned int ncpus
= get_core_count();
91 unsigned int core_id
= get_core_id();
93 pr_info("%s: Core Count = %d\n", __func__
, ncpus
);
94 pr_info("%s: Core Id = %d\n", __func__
, core_id
);
96 for (i
= 0; i
< ncpus
; ++i
)
97 set_cpu_possible(i
, true);
100 void __init
smp_prepare_boot_cpu(void)
102 unsigned int cpu
= smp_processor_id();
104 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
107 void __init
smp_cpus_done(unsigned int max_cpus
)
111 static int boot_secondary_processors
= 1; /* Set with xt-gdb via .xt-gdb */
112 static DECLARE_COMPLETION(cpu_running
);
114 void secondary_start_kernel(void)
116 struct mm_struct
*mm
= &init_mm
;
117 unsigned int cpu
= smp_processor_id();
121 #ifdef CONFIG_DEBUG_KERNEL
122 if (boot_secondary_processors
== 0) {
123 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
124 __func__
, boot_secondary_processors
, cpu
);
126 __asm__
__volatile__ ("waiti " __stringify(LOCKLEVEL
));
129 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
130 __func__
, boot_secondary_processors
, cpu
);
134 secondary_trap_init();
136 /* All kernel threads share the same mm context. */
138 atomic_inc(&mm
->mm_users
);
139 atomic_inc(&mm
->mm_count
);
140 current
->active_mm
= mm
;
141 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
142 enter_lazy_tlb(mm
, current
);
145 trace_hardirqs_off();
149 notify_cpu_starting(cpu
);
151 secondary_init_irq();
152 local_timer_setup(cpu
);
156 set_cpu_online(cpu
, true);
157 complete(&cpu_running
);
159 cpu_startup_entry(CPUHP_ONLINE
);
162 static void mx_cpu_start(void *p
)
164 unsigned cpu
= (unsigned)p
;
165 unsigned long run_stall_mask
= get_er(MPSCORE
);
167 set_er(run_stall_mask
& ~(1u << cpu
), MPSCORE
);
168 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
169 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
172 static void mx_cpu_stop(void *p
)
174 unsigned cpu
= (unsigned)p
;
175 unsigned long run_stall_mask
= get_er(MPSCORE
);
177 set_er(run_stall_mask
| (1u << cpu
), MPSCORE
);
178 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
179 __func__
, cpu
, run_stall_mask
, get_er(MPSCORE
));
182 #ifdef CONFIG_HOTPLUG_CPU
183 unsigned long cpu_start_id __cacheline_aligned
;
185 unsigned long cpu_start_ccount
;
187 static int boot_secondary(unsigned int cpu
, struct task_struct
*ts
)
189 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
190 unsigned long ccount
;
193 #ifdef CONFIG_HOTPLUG_CPU
195 system_flush_invalidate_dcache_range(
196 (unsigned long)&cpu_start_id
, sizeof(cpu_start_id
));
198 smp_call_function_single(0, mx_cpu_start
, (void *)cpu
, 1);
200 for (i
= 0; i
< 2; ++i
) {
202 ccount
= get_ccount();
205 cpu_start_ccount
= ccount
;
207 while (time_before(jiffies
, timeout
)) {
209 if (!cpu_start_ccount
)
213 if (cpu_start_ccount
) {
214 smp_call_function_single(0, mx_cpu_stop
,
216 cpu_start_ccount
= 0;
223 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
227 if (cpu_asid_cache(cpu
) == 0)
228 cpu_asid_cache(cpu
) = ASID_USER_FIRST
;
230 start_info
.stack
= (unsigned long)task_pt_regs(idle
);
233 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
234 __func__
, cpu
, idle
, start_info
.stack
);
236 ret
= boot_secondary(cpu
, idle
);
238 wait_for_completion_timeout(&cpu_running
,
239 msecs_to_jiffies(1000));
240 if (!cpu_online(cpu
))
245 pr_err("CPU %u failed to boot\n", cpu
);
250 #ifdef CONFIG_HOTPLUG_CPU
253 * __cpu_disable runs on the processor to be shutdown.
255 int __cpu_disable(void)
257 unsigned int cpu
= smp_processor_id();
260 * Take this CPU offline. Once we clear this, we can't return,
261 * and we must not schedule until we're ready to give up the cpu.
263 set_cpu_online(cpu
, false);
266 * OK - migrate IRQs away from this CPU
271 * Flush user cache and TLB mappings, and then remove this CPU
272 * from the vm mask set of all processes.
274 local_flush_cache_all();
275 local_flush_tlb_all();
276 invalidate_page_directory();
278 clear_tasks_mm_cpumask(cpu
);
283 static void platform_cpu_kill(unsigned int cpu
)
285 smp_call_function_single(0, mx_cpu_stop
, (void *)cpu
, true);
289 * called on the thread which is asking for a CPU to be shutdown -
290 * waits until shutdown has completed, or it is timed out.
292 void __cpu_die(unsigned int cpu
)
294 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
295 while (time_before(jiffies
, timeout
)) {
296 system_invalidate_dcache_range((unsigned long)&cpu_start_id
,
297 sizeof(cpu_start_id
));
298 if (cpu_start_id
== -cpu
) {
299 platform_cpu_kill(cpu
);
303 pr_err("CPU%u: unable to kill\n", cpu
);
306 void arch_cpu_idle_dead(void)
311 * Called from the idle thread for the CPU which has been shutdown.
313 * Note that we disable IRQs here, but do not re-enable them
314 * before returning to the caller. This is also the behaviour
315 * of the other hotplug-cpu capable cores, so presumably coming
316 * out of idle fixes this.
318 void __ref
cpu_die(void)
322 __asm__
__volatile__(
323 " movi a2, cpu_restart\n"
327 #endif /* CONFIG_HOTPLUG_CPU */
336 static const struct {
337 const char *short_text
;
338 const char *long_text
;
340 { .short_text
= "RES", .long_text
= "Rescheduling interrupts" },
341 { .short_text
= "CAL", .long_text
= "Function call interrupts" },
342 { .short_text
= "DIE", .long_text
= "CPU shutdown interrupts" },
346 unsigned long ipi_count
[IPI_MAX
];
349 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
);
351 static void send_ipi_message(const struct cpumask
*callmask
,
352 enum ipi_msg_type msg_id
)
355 unsigned long mask
= 0;
357 for_each_cpu(index
, callmask
)
358 if (index
!= smp_processor_id())
361 set_er(mask
, MIPISET(msg_id
));
364 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
366 send_ipi_message(mask
, IPI_CALL_FUNC
);
369 void arch_send_call_function_single_ipi(int cpu
)
371 send_ipi_message(cpumask_of(cpu
), IPI_CALL_FUNC
);
374 void smp_send_reschedule(int cpu
)
376 send_ipi_message(cpumask_of(cpu
), IPI_RESCHEDULE
);
379 void smp_send_stop(void)
381 struct cpumask targets
;
383 cpumask_copy(&targets
, cpu_online_mask
);
384 cpumask_clear_cpu(smp_processor_id(), &targets
);
385 send_ipi_message(&targets
, IPI_CPU_STOP
);
388 static void ipi_cpu_stop(unsigned int cpu
)
390 set_cpu_online(cpu
, false);
394 irqreturn_t
ipi_interrupt(int irq
, void *dev_id
)
396 unsigned int cpu
= smp_processor_id();
397 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
401 msg
= get_er(MIPICAUSE(cpu
));
402 for (i
= 0; i
< IPI_MAX
; i
++)
403 if (msg
& (1 << i
)) {
404 set_er(1 << i
, MIPICAUSE(cpu
));
408 if (msg
& (1 << IPI_RESCHEDULE
))
410 if (msg
& (1 << IPI_CALL_FUNC
))
411 generic_smp_call_function_interrupt();
412 if (msg
& (1 << IPI_CPU_STOP
))
418 void show_ipi_list(struct seq_file
*p
, int prec
)
423 for (i
= 0; i
< IPI_MAX
; ++i
) {
424 seq_printf(p
, "%*s:", prec
, ipi_text
[i
].short_text
);
425 for_each_online_cpu(cpu
)
426 seq_printf(p
, " %10lu",
427 per_cpu(ipi_data
, cpu
).ipi_count
[i
]);
428 seq_printf(p
, " %s\n", ipi_text
[i
].long_text
);
432 int setup_profiling_timer(unsigned int multiplier
)
434 pr_debug("setup_profiling_timer %d\n", multiplier
);
438 /* TLB flush functions */
441 struct vm_area_struct
*vma
;
446 static void ipi_flush_tlb_all(void *arg
)
448 local_flush_tlb_all();
451 void flush_tlb_all(void)
453 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
456 static void ipi_flush_tlb_mm(void *arg
)
458 local_flush_tlb_mm(arg
);
461 void flush_tlb_mm(struct mm_struct
*mm
)
463 on_each_cpu(ipi_flush_tlb_mm
, mm
, 1);
466 static void ipi_flush_tlb_page(void *arg
)
468 struct flush_data
*fd
= arg
;
469 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
472 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
474 struct flush_data fd
= {
478 on_each_cpu(ipi_flush_tlb_page
, &fd
, 1);
481 static void ipi_flush_tlb_range(void *arg
)
483 struct flush_data
*fd
= arg
;
484 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
487 void flush_tlb_range(struct vm_area_struct
*vma
,
488 unsigned long start
, unsigned long end
)
490 struct flush_data fd
= {
495 on_each_cpu(ipi_flush_tlb_range
, &fd
, 1);
498 /* Cache flush functions */
500 static void ipi_flush_cache_all(void *arg
)
502 local_flush_cache_all();
505 void flush_cache_all(void)
507 on_each_cpu(ipi_flush_cache_all
, NULL
, 1);
510 static void ipi_flush_cache_page(void *arg
)
512 struct flush_data
*fd
= arg
;
513 local_flush_cache_page(fd
->vma
, fd
->addr1
, fd
->addr2
);
516 void flush_cache_page(struct vm_area_struct
*vma
,
517 unsigned long address
, unsigned long pfn
)
519 struct flush_data fd
= {
524 on_each_cpu(ipi_flush_cache_page
, &fd
, 1);
527 static void ipi_flush_cache_range(void *arg
)
529 struct flush_data
*fd
= arg
;
530 local_flush_cache_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
533 void flush_cache_range(struct vm_area_struct
*vma
,
534 unsigned long start
, unsigned long end
)
536 struct flush_data fd
= {
541 on_each_cpu(ipi_flush_cache_range
, &fd
, 1);
544 static void ipi_flush_icache_range(void *arg
)
546 struct flush_data
*fd
= arg
;
547 local_flush_icache_range(fd
->addr1
, fd
->addr2
);
550 void flush_icache_range(unsigned long start
, unsigned long end
)
552 struct flush_data fd
= {
556 on_each_cpu(ipi_flush_icache_range
, &fd
, 1);
559 /* ------------------------------------------------------------------------- */
561 static void ipi_invalidate_dcache_range(void *arg
)
563 struct flush_data
*fd
= arg
;
564 __invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
567 static void system_invalidate_dcache_range(unsigned long start
,
570 struct flush_data fd
= {
574 on_each_cpu(ipi_invalidate_dcache_range
, &fd
, 1);
577 static void ipi_flush_invalidate_dcache_range(void *arg
)
579 struct flush_data
*fd
= arg
;
580 __flush_invalidate_dcache_range(fd
->addr1
, fd
->addr2
);
583 static void system_flush_invalidate_dcache_range(unsigned long start
,
586 struct flush_data fd
= {
590 on_each_cpu(ipi_flush_invalidate_dcache_range
, &fd
, 1);