]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm/kernel/smp.c
[ARM] 5516/1: Flush the D-cache after initialising the SCU
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
c97d4869 10#include <linux/module.h>
1da177e4
LT
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
4e950f6f 20#include <linux/err.h>
1da177e4
LT
21#include <linux/cpu.h>
22#include <linux/smp.h>
23#include <linux/seq_file.h>
c97d4869 24#include <linux/irq.h>
bc28248e
RK
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
1da177e4
LT
27
28#include <asm/atomic.h>
29#include <asm/cacheflush.h>
30#include <asm/cpu.h>
e65f38ed
RK
31#include <asm/mmu_context.h>
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <asm/tlbflush.h>
36#include <asm/ptrace.h>
bc28248e 37#include <asm/localtimer.h>
1da177e4 38
e65f38ed
RK
39/*
40 * as from 2.5, kernels no longer have an init_tasks structure
41 * so we need some other way of telling a new secondary core
42 * where to place its SVC stack
43 */
44struct secondary_data secondary_data;
45
1da177e4
LT
46/*
47 * structures for inter-processor calls
48 * - A collection of single bit ipi messages.
49 */
50struct ipi_data {
51 spinlock_t lock;
52 unsigned long ipi_count;
53 unsigned long bits;
54};
55
56static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
57 .lock = SPIN_LOCK_UNLOCKED,
58};
59
60enum ipi_msg_type {
61 IPI_TIMER,
62 IPI_RESCHEDULE,
63 IPI_CALL_FUNC,
f6dd9fa5 64 IPI_CALL_FUNC_SINGLE,
1da177e4
LT
65 IPI_CPU_STOP,
66};
67
bd6f68af 68int __cpuinit __cpu_up(unsigned int cpu)
1da177e4 69{
71f512e8
RK
70 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
71 struct task_struct *idle = ci->idle;
e65f38ed
RK
72 pgd_t *pgd;
73 pmd_t *pmd;
1da177e4
LT
74 int ret;
75
76 /*
71f512e8
RK
77 * Spawn a new process manually, if not already done.
78 * Grab a pointer to its task struct so we can mess with it
1da177e4 79 */
71f512e8
RK
80 if (!idle) {
81 idle = fork_idle(cpu);
82 if (IS_ERR(idle)) {
83 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
84 return PTR_ERR(idle);
85 }
86 ci->idle = idle;
1da177e4
LT
87 }
88
e65f38ed
RK
89 /*
90 * Allocate initial page tables to allow the new CPU to
91 * enable the MMU safely. This essentially means a set
92 * of our "standard" page tables, with the addition of
93 * a 1:1 mapping for the physical address of the kernel.
94 */
95 pgd = pgd_alloc(&init_mm);
058ddee5 96 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
e65f38ed
RK
97 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
98 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
e9fc7823 99 flush_pmd_entry(pmd);
e65f38ed
RK
100
101 /*
102 * We need to tell the secondary core where to find
103 * its stack and the page tables.
104 */
32d39a93 105 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
e65f38ed
RK
106 secondary_data.pgdir = virt_to_phys(pgd);
107 wmb();
108
1da177e4
LT
109 /*
110 * Now bring the CPU into our world.
111 */
112 ret = boot_secondary(cpu, idle);
e65f38ed
RK
113 if (ret == 0) {
114 unsigned long timeout;
115
116 /*
117 * CPU was successfully started, wait for it
118 * to come online or time out.
119 */
120 timeout = jiffies + HZ;
121 while (time_before(jiffies, timeout)) {
122 if (cpu_online(cpu))
123 break;
124
125 udelay(10);
126 barrier();
127 }
128
129 if (!cpu_online(cpu))
130 ret = -EIO;
131 }
132
5d43045b 133 secondary_data.stack = NULL;
e65f38ed
RK
134 secondary_data.pgdir = 0;
135
058ddee5 136 *pmd = __pmd(0);
e9fc7823 137 clean_pmd_entry(pmd);
5e541973 138 pgd_free(&init_mm, pgd);
e65f38ed 139
1da177e4 140 if (ret) {
0908db22
RK
141 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
142
1da177e4
LT
143 /*
144 * FIXME: We need to clean up the new idle thread. --rmk
145 */
146 }
147
148 return ret;
149}
150
a054a811
RK
151#ifdef CONFIG_HOTPLUG_CPU
152/*
153 * __cpu_disable runs on the processor to be shutdown.
154 */
155int __cpuexit __cpu_disable(void)
156{
157 unsigned int cpu = smp_processor_id();
158 struct task_struct *p;
159 int ret;
160
161 ret = mach_cpu_disable(cpu);
162 if (ret)
163 return ret;
164
165 /*
166 * Take this CPU offline. Once we clear this, we can't return,
167 * and we must not schedule until we're ready to give up the cpu.
168 */
169 cpu_clear(cpu, cpu_online_map);
170
171 /*
172 * OK - migrate IRQs away from this CPU
173 */
174 migrate_irqs();
175
37ee16ae
RK
176 /*
177 * Stop the local timer for this CPU.
178 */
ebac6546 179 local_timer_stop();
37ee16ae 180
a054a811
RK
181 /*
182 * Flush user cache and TLB mappings, and then remove this CPU
183 * from the vm mask set of all processes.
184 */
185 flush_cache_all();
186 local_flush_tlb_all();
187
188 read_lock(&tasklist_lock);
189 for_each_process(p) {
190 if (p->mm)
191 cpu_clear(cpu, p->mm->cpu_vm_mask);
192 }
193 read_unlock(&tasklist_lock);
194
195 return 0;
196}
197
198/*
199 * called on the thread which is asking for a CPU to be shutdown -
200 * waits until shutdown has completed, or it is timed out.
201 */
202void __cpuexit __cpu_die(unsigned int cpu)
203{
204 if (!platform_cpu_kill(cpu))
205 printk("CPU%u: unable to kill\n", cpu);
206}
207
208/*
209 * Called from the idle thread for the CPU which has been shutdown.
210 *
211 * Note that we disable IRQs here, but do not re-enable them
212 * before returning to the caller. This is also the behaviour
213 * of the other hotplug-cpu capable cores, so presumably coming
214 * out of idle fixes this.
215 */
216void __cpuexit cpu_die(void)
217{
218 unsigned int cpu = smp_processor_id();
219
220 local_irq_disable();
221 idle_task_exit();
222
223 /*
224 * actual CPU shutdown procedure is at least platform (if not
225 * CPU) specific
226 */
227 platform_cpu_die(cpu);
228
229 /*
230 * Do not return to the idle loop - jump back to the secondary
231 * cpu initialisation. There's some initialisation which needs
232 * to be repeated to undo the effects of taking the CPU offline.
233 */
234 __asm__("mov sp, %0\n"
235 " b secondary_start_kernel"
236 :
32d39a93 237 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
a054a811
RK
238}
239#endif /* CONFIG_HOTPLUG_CPU */
240
e65f38ed
RK
241/*
242 * This is the secondary CPU boot entry. We're using this CPUs
243 * idle thread stack, but a set of temporary page tables.
244 */
bd6f68af 245asmlinkage void __cpuinit secondary_start_kernel(void)
e65f38ed
RK
246{
247 struct mm_struct *mm = &init_mm;
da2660d2 248 unsigned int cpu = smp_processor_id();
e65f38ed
RK
249
250 printk("CPU%u: Booted secondary processor\n", cpu);
251
252 /*
253 * All kernel threads share the same mm context; grab a
254 * reference and switch to it.
255 */
256 atomic_inc(&mm->mm_users);
257 atomic_inc(&mm->mm_count);
258 current->active_mm = mm;
259 cpu_set(cpu, mm->cpu_vm_mask);
260 cpu_switch_mm(mm->pgd, mm);
261 enter_lazy_tlb(mm, current);
505d7b19 262 local_flush_tlb_all();
e65f38ed
RK
263
264 cpu_init();
5bfb5d69 265 preempt_disable();
e65f38ed
RK
266
267 /*
268 * Give the platform a chance to do its own initialisation.
269 */
270 platform_secondary_init(cpu);
271
272 /*
273 * Enable local interrupts.
274 */
e545a614 275 notify_cpu_starting(cpu);
e65f38ed
RK
276 local_irq_enable();
277 local_fiq_enable();
278
a8655e83 279 /*
bc28248e 280 * Setup the percpu timer for this CPU.
a8655e83 281 */
bc28248e 282 percpu_timer_setup();
a8655e83 283
e65f38ed
RK
284 calibrate_delay();
285
286 smp_store_cpu_info(cpu);
287
288 /*
289 * OK, now it's safe to let the boot CPU continue
290 */
291 cpu_set(cpu, cpu_online_map);
292
293 /*
294 * OK, it's off to the idle thread for us
295 */
296 cpu_idle();
297}
298
1da177e4
LT
299/*
300 * Called by both boot and secondaries to move global data into
301 * per-processor storage.
302 */
bd6f68af 303void __cpuinit smp_store_cpu_info(unsigned int cpuid)
1da177e4
LT
304{
305 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
306
307 cpu_info->loops_per_jiffy = loops_per_jiffy;
308}
309
310void __init smp_cpus_done(unsigned int max_cpus)
311{
312 int cpu;
313 unsigned long bogosum = 0;
314
315 for_each_online_cpu(cpu)
316 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
317
318 printk(KERN_INFO "SMP: Total of %d processors activated "
319 "(%lu.%02lu BogoMIPS).\n",
320 num_online_cpus(),
321 bogosum / (500000/HZ),
322 (bogosum / (5000/HZ)) % 100);
323}
324
325void __init smp_prepare_boot_cpu(void)
326{
327 unsigned int cpu = smp_processor_id();
328
71f512e8 329 per_cpu(cpu_data, cpu).idle = current;
1da177e4
LT
330}
331
82668104 332static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
1da177e4
LT
333{
334 unsigned long flags;
335 unsigned int cpu;
336
337 local_irq_save(flags);
338
82668104 339 for_each_cpu(cpu, mask) {
1da177e4
LT
340 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
341
342 spin_lock(&ipi->lock);
343 ipi->bits |= 1 << msg;
344 spin_unlock(&ipi->lock);
345 }
346
347 /*
348 * Call the platform specific cross-CPU call function.
349 */
82668104 350 smp_cross_call(mask);
1da177e4
LT
351
352 local_irq_restore(flags);
353}
354
82668104 355void arch_send_call_function_ipi_mask(const struct cpumask *mask)
1da177e4 356{
f6dd9fa5 357 send_ipi_message(mask, IPI_CALL_FUNC);
1da177e4
LT
358}
359
f6dd9fa5 360void arch_send_call_function_single_ipi(int cpu)
3e459990 361{
82668104 362 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
3e459990 363}
3e459990 364
1da177e4
LT
365void show_ipi_list(struct seq_file *p)
366{
367 unsigned int cpu;
368
369 seq_puts(p, "IPI:");
370
e11b2236 371 for_each_present_cpu(cpu)
1da177e4
LT
372 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
373
374 seq_putc(p, '\n');
375}
376
37ee16ae
RK
377void show_local_irqs(struct seq_file *p)
378{
379 unsigned int cpu;
380
381 seq_printf(p, "LOC: ");
382
383 for_each_present_cpu(cpu)
384 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
385
386 seq_putc(p, '\n');
387}
388
bc28248e
RK
389/*
390 * Timer (local or broadcast) support
391 */
392static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
393
c97d4869 394static void ipi_timer(void)
1da177e4 395{
bc28248e 396 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
1da177e4 397 irq_enter();
bc28248e 398 evt->event_handler(evt);
1da177e4
LT
399 irq_exit();
400}
401
37ee16ae 402#ifdef CONFIG_LOCAL_TIMERS
b9811d7f 403asmlinkage void __exception do_local_timer(struct pt_regs *regs)
37ee16ae 404{
c97d4869 405 struct pt_regs *old_regs = set_irq_regs(regs);
37ee16ae
RK
406 int cpu = smp_processor_id();
407
408 if (local_timer_ack()) {
409 irq_stat[cpu].local_timer_irqs++;
c97d4869 410 ipi_timer();
37ee16ae 411 }
c97d4869
RK
412
413 set_irq_regs(old_regs);
37ee16ae
RK
414}
415#endif
416
bc28248e
RK
417#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
418static void smp_timer_broadcast(const struct cpumask *mask)
419{
420 send_ipi_message(mask, IPI_TIMER);
421}
422
423static void broadcast_timer_set_mode(enum clock_event_mode mode,
424 struct clock_event_device *evt)
425{
426}
427
428static void local_timer_setup(struct clock_event_device *evt)
429{
430 evt->name = "dummy_timer";
431 evt->features = CLOCK_EVT_FEAT_ONESHOT |
432 CLOCK_EVT_FEAT_PERIODIC |
433 CLOCK_EVT_FEAT_DUMMY;
434 evt->rating = 400;
435 evt->mult = 1;
436 evt->set_mode = broadcast_timer_set_mode;
437 evt->broadcast = smp_timer_broadcast;
438
439 clockevents_register_device(evt);
440}
441#endif
442
443void __cpuinit percpu_timer_setup(void)
444{
445 unsigned int cpu = smp_processor_id();
446 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
447
448 evt->cpumask = cpumask_of(cpu);
449
450 local_timer_setup(evt);
451}
452
1da177e4
LT
453static DEFINE_SPINLOCK(stop_lock);
454
455/*
456 * ipi_cpu_stop - handle IPI from smp_send_stop()
457 */
458static void ipi_cpu_stop(unsigned int cpu)
459{
460 spin_lock(&stop_lock);
461 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
462 dump_stack();
463 spin_unlock(&stop_lock);
464
465 cpu_clear(cpu, cpu_online_map);
466
467 local_fiq_disable();
468 local_irq_disable();
469
470 while (1)
471 cpu_relax();
472}
473
474/*
475 * Main handler for inter-processor interrupts
476 *
477 * For ARM, the ipimask now only identifies a single
478 * category of IPI (Bit 1 IPIs have been replaced by a
479 * different mechanism):
480 *
481 * Bit 0 - Inter-processor function call
482 */
b9811d7f 483asmlinkage void __exception do_IPI(struct pt_regs *regs)
1da177e4
LT
484{
485 unsigned int cpu = smp_processor_id();
486 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
c97d4869 487 struct pt_regs *old_regs = set_irq_regs(regs);
1da177e4
LT
488
489 ipi->ipi_count++;
490
491 for (;;) {
492 unsigned long msgs;
493
494 spin_lock(&ipi->lock);
495 msgs = ipi->bits;
496 ipi->bits = 0;
497 spin_unlock(&ipi->lock);
498
499 if (!msgs)
500 break;
501
502 do {
503 unsigned nextmsg;
504
505 nextmsg = msgs & -msgs;
506 msgs &= ~nextmsg;
507 nextmsg = ffz(~nextmsg);
508
509 switch (nextmsg) {
510 case IPI_TIMER:
c97d4869 511 ipi_timer();
1da177e4
LT
512 break;
513
514 case IPI_RESCHEDULE:
515 /*
516 * nothing more to do - eveything is
517 * done on the interrupt return path
518 */
519 break;
520
521 case IPI_CALL_FUNC:
f6dd9fa5
JA
522 generic_smp_call_function_interrupt();
523 break;
524
525 case IPI_CALL_FUNC_SINGLE:
526 generic_smp_call_function_single_interrupt();
1da177e4
LT
527 break;
528
529 case IPI_CPU_STOP:
530 ipi_cpu_stop(cpu);
531 break;
532
533 default:
534 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
535 cpu, nextmsg);
536 break;
537 }
538 } while (msgs);
539 }
c97d4869
RK
540
541 set_irq_regs(old_regs);
1da177e4
LT
542}
543
544void smp_send_reschedule(int cpu)
545{
82668104 546 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
1da177e4
LT
547}
548
1da177e4
LT
549void smp_send_stop(void)
550{
551 cpumask_t mask = cpu_online_map;
552 cpu_clear(smp_processor_id(), mask);
82668104 553 send_ipi_message(&mask, IPI_CPU_STOP);
1da177e4
LT
554}
555
556/*
557 * not supported here
558 */
5048bcba 559int setup_profiling_timer(unsigned int multiplier)
1da177e4
LT
560{
561 return -EINVAL;
562}
4b0ef3b1 563
82668104
RK
564static void
565on_each_cpu_mask(void (*func)(void *), void *info, int wait,
566 const struct cpumask *mask)
4b0ef3b1 567{
4b0ef3b1
RK
568 preempt_disable();
569
82668104
RK
570 smp_call_function_many(mask, func, info, wait);
571 if (cpumask_test_cpu(smp_processor_id(), mask))
4b0ef3b1
RK
572 func(info);
573
574 preempt_enable();
4b0ef3b1
RK
575}
576
577/**********************************************************************/
578
579/*
580 * TLB operations
581 */
582struct tlb_args {
583 struct vm_area_struct *ta_vma;
584 unsigned long ta_start;
585 unsigned long ta_end;
586};
587
588static inline void ipi_flush_tlb_all(void *ignored)
589{
590 local_flush_tlb_all();
591}
592
593static inline void ipi_flush_tlb_mm(void *arg)
594{
595 struct mm_struct *mm = (struct mm_struct *)arg;
596
597 local_flush_tlb_mm(mm);
598}
599
600static inline void ipi_flush_tlb_page(void *arg)
601{
602 struct tlb_args *ta = (struct tlb_args *)arg;
603
604 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
605}
606
607static inline void ipi_flush_tlb_kernel_page(void *arg)
608{
609 struct tlb_args *ta = (struct tlb_args *)arg;
610
611 local_flush_tlb_kernel_page(ta->ta_start);
612}
613
614static inline void ipi_flush_tlb_range(void *arg)
615{
616 struct tlb_args *ta = (struct tlb_args *)arg;
617
618 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
619}
620
621static inline void ipi_flush_tlb_kernel_range(void *arg)
622{
623 struct tlb_args *ta = (struct tlb_args *)arg;
624
625 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
626}
627
628void flush_tlb_all(void)
629{
15c8b6c1 630 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
4b0ef3b1
RK
631}
632
633void flush_tlb_mm(struct mm_struct *mm)
634{
82668104 635 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
4b0ef3b1
RK
636}
637
638void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
639{
4b0ef3b1
RK
640 struct tlb_args ta;
641
642 ta.ta_vma = vma;
643 ta.ta_start = uaddr;
644
82668104 645 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
4b0ef3b1
RK
646}
647
648void flush_tlb_kernel_page(unsigned long kaddr)
649{
650 struct tlb_args ta;
651
652 ta.ta_start = kaddr;
653
15c8b6c1 654 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
4b0ef3b1
RK
655}
656
657void flush_tlb_range(struct vm_area_struct *vma,
658 unsigned long start, unsigned long end)
659{
4b0ef3b1
RK
660 struct tlb_args ta;
661
662 ta.ta_vma = vma;
663 ta.ta_start = start;
664 ta.ta_end = end;
665
82668104 666 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
4b0ef3b1
RK
667}
668
669void flush_tlb_kernel_range(unsigned long start, unsigned long end)
670{
671 struct tlb_args ta;
672
673 ta.ta_start = start;
674 ta.ta_end = end;
675
15c8b6c1 676 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
4b0ef3b1 677}