]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kernel/smp.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / smp.c
1 /*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
37
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46 #include <asm/prom.h>
47 #include <asm/smp.h>
48 #include <asm/time.h>
49 #include <asm/machdep.h>
50 #include <asm/cputhreads.h>
51 #include <asm/cputable.h>
52 #include <asm/mpic.h>
53 #include <asm/vdso_datapage.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/paca.h>
56 #endif
57 #include <asm/vdso.h>
58 #include <asm/debug.h>
59 #include <asm/kexec.h>
60 #include <asm/asm-prototypes.h>
61 #include <asm/cpu_has_feature.h>
62
63 #ifdef DEBUG
64 #include <asm/udbg.h>
65 #define DBG(fmt...) udbg_printf(fmt)
66 #else
67 #define DBG(fmt...)
68 #endif
69
70 #ifdef CONFIG_HOTPLUG_CPU
71 /* State of each CPU during hotplug phases */
72 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73 #endif
74
75 struct thread_info *secondary_ti;
76
77 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
78 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
79
80 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
81 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
82
83 /* SMP operations for this machine */
84 struct smp_ops_t *smp_ops;
85
86 /* Can't be static due to PowerMac hackery */
87 volatile unsigned int cpu_callin_map[NR_CPUS];
88
89 int smt_enabled_at_boot = 1;
90
91 /*
92 * Returns 1 if the specified cpu should be brought up during boot.
93 * Used to inhibit booting threads if they've been disabled or
94 * limited on the command line
95 */
96 int smp_generic_cpu_bootable(unsigned int nr)
97 {
98 /* Special case - we inhibit secondary thread startup
99 * during boot if the user requests it.
100 */
101 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
102 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
103 return 0;
104 if (smt_enabled_at_boot
105 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
106 return 0;
107 }
108
109 return 1;
110 }
111
112
113 #ifdef CONFIG_PPC64
114 int smp_generic_kick_cpu(int nr)
115 {
116 if (nr < 0 || nr >= nr_cpu_ids)
117 return -EINVAL;
118
119 /*
120 * The processor is currently spinning, waiting for the
121 * cpu_start field to become non-zero After we set cpu_start,
122 * the processor will continue on to secondary_start
123 */
124 if (!paca[nr].cpu_start) {
125 paca[nr].cpu_start = 1;
126 smp_mb();
127 return 0;
128 }
129
130 #ifdef CONFIG_HOTPLUG_CPU
131 /*
132 * Ok it's not there, so it might be soft-unplugged, let's
133 * try to bring it back
134 */
135 generic_set_cpu_up(nr);
136 smp_wmb();
137 smp_send_reschedule(nr);
138 #endif /* CONFIG_HOTPLUG_CPU */
139
140 return 0;
141 }
142 #endif /* CONFIG_PPC64 */
143
144 static irqreturn_t call_function_action(int irq, void *data)
145 {
146 generic_smp_call_function_interrupt();
147 return IRQ_HANDLED;
148 }
149
150 static irqreturn_t reschedule_action(int irq, void *data)
151 {
152 scheduler_ipi();
153 return IRQ_HANDLED;
154 }
155
156 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
157 {
158 tick_broadcast_ipi_handler();
159 return IRQ_HANDLED;
160 }
161
162 #ifdef CONFIG_NMI_IPI
163 static irqreturn_t nmi_ipi_action(int irq, void *data)
164 {
165 smp_handle_nmi_ipi(get_irq_regs());
166 return IRQ_HANDLED;
167 }
168 #endif
169
170 static irq_handler_t smp_ipi_action[] = {
171 [PPC_MSG_CALL_FUNCTION] = call_function_action,
172 [PPC_MSG_RESCHEDULE] = reschedule_action,
173 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
174 #ifdef CONFIG_NMI_IPI
175 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
176 #endif
177 };
178
179 /*
180 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
181 * than going through the call function infrastructure, and strongly
182 * serialized, so it is more appropriate for debugging.
183 */
184 const char *smp_ipi_name[] = {
185 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
186 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
187 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
188 [PPC_MSG_NMI_IPI] = "nmi ipi",
189 };
190
191 /* optional function to request ipi, for controllers with >= 4 ipis */
192 int smp_request_message_ipi(int virq, int msg)
193 {
194 int err;
195
196 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
197 return -EINVAL;
198 #ifndef CONFIG_NMI_IPI
199 if (msg == PPC_MSG_NMI_IPI)
200 return 1;
201 #endif
202
203 err = request_irq(virq, smp_ipi_action[msg],
204 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
205 smp_ipi_name[msg], NULL);
206 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
207 virq, smp_ipi_name[msg], err);
208
209 return err;
210 }
211
212 #ifdef CONFIG_PPC_SMP_MUXED_IPI
213 struct cpu_messages {
214 long messages; /* current messages */
215 };
216 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
217
218 void smp_muxed_ipi_set_message(int cpu, int msg)
219 {
220 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
221 char *message = (char *)&info->messages;
222
223 /*
224 * Order previous accesses before accesses in the IPI handler.
225 */
226 smp_mb();
227 message[msg] = 1;
228 }
229
230 void smp_muxed_ipi_message_pass(int cpu, int msg)
231 {
232 smp_muxed_ipi_set_message(cpu, msg);
233
234 /*
235 * cause_ipi functions are required to include a full barrier
236 * before doing whatever causes the IPI.
237 */
238 smp_ops->cause_ipi(cpu);
239 }
240
241 #ifdef __BIG_ENDIAN__
242 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
243 #else
244 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
245 #endif
246
247 irqreturn_t smp_ipi_demux(void)
248 {
249 mb(); /* order any irq clear */
250
251 return smp_ipi_demux_relaxed();
252 }
253
254 /* sync-free variant. Callers should ensure synchronization */
255 irqreturn_t smp_ipi_demux_relaxed(void)
256 {
257 struct cpu_messages *info;
258 unsigned long all;
259
260 info = this_cpu_ptr(&ipi_message);
261 do {
262 all = xchg(&info->messages, 0);
263 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
264 /*
265 * Must check for PPC_MSG_RM_HOST_ACTION messages
266 * before PPC_MSG_CALL_FUNCTION messages because when
267 * a VM is destroyed, we call kick_all_cpus_sync()
268 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
269 * messages have completed before we free any VCPUs.
270 */
271 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
272 kvmppc_xics_ipi_action();
273 #endif
274 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
275 generic_smp_call_function_interrupt();
276 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
277 scheduler_ipi();
278 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
279 tick_broadcast_ipi_handler();
280 #ifdef CONFIG_NMI_IPI
281 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
282 nmi_ipi_action(0, NULL);
283 #endif
284 } while (info->messages);
285
286 return IRQ_HANDLED;
287 }
288 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
289
290 static inline void do_message_pass(int cpu, int msg)
291 {
292 if (smp_ops->message_pass)
293 smp_ops->message_pass(cpu, msg);
294 #ifdef CONFIG_PPC_SMP_MUXED_IPI
295 else
296 smp_muxed_ipi_message_pass(cpu, msg);
297 #endif
298 }
299
300 void smp_send_reschedule(int cpu)
301 {
302 if (likely(smp_ops))
303 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
304 }
305 EXPORT_SYMBOL_GPL(smp_send_reschedule);
306
307 void arch_send_call_function_single_ipi(int cpu)
308 {
309 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
310 }
311
312 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
313 {
314 unsigned int cpu;
315
316 for_each_cpu(cpu, mask)
317 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
318 }
319
320 #ifdef CONFIG_NMI_IPI
321
322 /*
323 * "NMI IPI" system.
324 *
325 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
326 * a running system. They can be used for crash, debug, halt/reboot, etc.
327 *
328 * NMI IPIs are globally single threaded. No more than one in progress at
329 * any time.
330 *
331 * The IPI call waits with interrupts disabled until all targets enter the
332 * NMI handler, then the call returns.
333 *
334 * No new NMI can be initiated until targets exit the handler.
335 *
336 * The IPI call may time out without all targets entering the NMI handler.
337 * In that case, there is some logic to recover (and ignore subsequent
338 * NMI interrupts that may eventually be raised), but the platform interrupt
339 * handler may not be able to distinguish this from other exception causes,
340 * which may cause a crash.
341 */
342
343 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
344 static struct cpumask nmi_ipi_pending_mask;
345 static int nmi_ipi_busy_count = 0;
346 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
347
348 static void nmi_ipi_lock_start(unsigned long *flags)
349 {
350 raw_local_irq_save(*flags);
351 hard_irq_disable();
352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
353 raw_local_irq_restore(*flags);
354 cpu_relax();
355 raw_local_irq_save(*flags);
356 hard_irq_disable();
357 }
358 }
359
360 static void nmi_ipi_lock(void)
361 {
362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
363 cpu_relax();
364 }
365
366 static void nmi_ipi_unlock(void)
367 {
368 smp_mb();
369 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
370 atomic_set(&__nmi_ipi_lock, 0);
371 }
372
373 static void nmi_ipi_unlock_end(unsigned long *flags)
374 {
375 nmi_ipi_unlock();
376 raw_local_irq_restore(*flags);
377 }
378
379 /*
380 * Platform NMI handler calls this to ack
381 */
382 int smp_handle_nmi_ipi(struct pt_regs *regs)
383 {
384 void (*fn)(struct pt_regs *);
385 unsigned long flags;
386 int me = raw_smp_processor_id();
387 int ret = 0;
388
389 /*
390 * Unexpected NMIs are possible here because the interrupt may not
391 * be able to distinguish NMI IPIs from other types of NMIs, or
392 * because the caller may have timed out.
393 */
394 nmi_ipi_lock_start(&flags);
395 if (!nmi_ipi_busy_count)
396 goto out;
397 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
398 goto out;
399
400 fn = nmi_ipi_function;
401 if (!fn)
402 goto out;
403
404 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
405 nmi_ipi_busy_count++;
406 nmi_ipi_unlock();
407
408 ret = 1;
409
410 fn(regs);
411
412 nmi_ipi_lock();
413 nmi_ipi_busy_count--;
414 out:
415 nmi_ipi_unlock_end(&flags);
416
417 return ret;
418 }
419
420 static void do_smp_send_nmi_ipi(int cpu)
421 {
422 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
423 return;
424
425 if (cpu >= 0) {
426 do_message_pass(cpu, PPC_MSG_NMI_IPI);
427 } else {
428 int c;
429
430 for_each_online_cpu(c) {
431 if (c == raw_smp_processor_id())
432 continue;
433 do_message_pass(c, PPC_MSG_NMI_IPI);
434 }
435 }
436 }
437
438 void smp_flush_nmi_ipi(u64 delay_us)
439 {
440 unsigned long flags;
441
442 nmi_ipi_lock_start(&flags);
443 while (nmi_ipi_busy_count) {
444 nmi_ipi_unlock_end(&flags);
445 udelay(1);
446 if (delay_us) {
447 delay_us--;
448 if (!delay_us)
449 return;
450 }
451 nmi_ipi_lock_start(&flags);
452 }
453 nmi_ipi_unlock_end(&flags);
454 }
455
456 /*
457 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
458 * - fn is the target callback function.
459 * - delay_us > 0 is the delay before giving up waiting for targets to
460 * enter the handler, == 0 specifies indefinite delay.
461 */
462 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
463 {
464 unsigned long flags;
465 int me = raw_smp_processor_id();
466 int ret = 1;
467
468 BUG_ON(cpu == me);
469 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
470
471 if (unlikely(!smp_ops))
472 return 0;
473
474 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
475 nmi_ipi_lock_start(&flags);
476 while (nmi_ipi_busy_count) {
477 nmi_ipi_unlock_end(&flags);
478 cpu_relax();
479 nmi_ipi_lock_start(&flags);
480 }
481
482 nmi_ipi_function = fn;
483
484 if (cpu < 0) {
485 /* ALL_OTHERS */
486 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
487 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
488 } else {
489 /* cpumask starts clear */
490 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
491 }
492 nmi_ipi_busy_count++;
493 nmi_ipi_unlock();
494
495 do_smp_send_nmi_ipi(cpu);
496
497 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
498 udelay(1);
499 if (delay_us) {
500 delay_us--;
501 if (!delay_us)
502 break;
503 }
504 }
505
506 nmi_ipi_lock();
507 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
508 /* Could not gather all CPUs */
509 ret = 0;
510 cpumask_clear(&nmi_ipi_pending_mask);
511 }
512 nmi_ipi_busy_count--;
513 nmi_ipi_unlock_end(&flags);
514
515 return ret;
516 }
517 #endif /* CONFIG_NMI_IPI */
518
519 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
520 void tick_broadcast(const struct cpumask *mask)
521 {
522 unsigned int cpu;
523
524 for_each_cpu(cpu, mask)
525 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
526 }
527 #endif
528
529 #ifdef CONFIG_DEBUGGER
530 void debugger_ipi_callback(struct pt_regs *regs)
531 {
532 debugger_ipi(regs);
533 }
534
535 void smp_send_debugger_break(void)
536 {
537 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
538 }
539 #endif
540
541 #ifdef CONFIG_KEXEC_CORE
542 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
543 {
544 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
545 }
546 #endif
547
548 static void stop_this_cpu(void *dummy)
549 {
550 /* Remove this CPU */
551 set_cpu_online(smp_processor_id(), false);
552
553 local_irq_disable();
554 while (1)
555 ;
556 }
557
558 void smp_send_stop(void)
559 {
560 smp_call_function(stop_this_cpu, NULL, 0);
561 }
562
563 struct thread_info *current_set[NR_CPUS];
564
565 static void smp_store_cpu_info(int id)
566 {
567 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
568 #ifdef CONFIG_PPC_FSL_BOOK3E
569 per_cpu(next_tlbcam_idx, id)
570 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
571 #endif
572 }
573
574 void __init smp_prepare_cpus(unsigned int max_cpus)
575 {
576 unsigned int cpu;
577
578 DBG("smp_prepare_cpus\n");
579
580 /*
581 * setup_cpu may need to be called on the boot cpu. We havent
582 * spun any cpus up but lets be paranoid.
583 */
584 BUG_ON(boot_cpuid != smp_processor_id());
585
586 /* Fixup boot cpu */
587 smp_store_cpu_info(boot_cpuid);
588 cpu_callin_map[boot_cpuid] = 1;
589
590 for_each_possible_cpu(cpu) {
591 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
592 GFP_KERNEL, cpu_to_node(cpu));
593 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
594 GFP_KERNEL, cpu_to_node(cpu));
595 /*
596 * numa_node_id() works after this.
597 */
598 if (cpu_present(cpu)) {
599 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
600 set_cpu_numa_mem(cpu,
601 local_memory_node(numa_cpu_lookup_table[cpu]));
602 }
603 }
604
605 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
606 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
607
608 if (smp_ops && smp_ops->probe)
609 smp_ops->probe();
610 }
611
612 void smp_prepare_boot_cpu(void)
613 {
614 BUG_ON(smp_processor_id() != boot_cpuid);
615 #ifdef CONFIG_PPC64
616 paca[boot_cpuid].__current = current;
617 #endif
618 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
619 current_set[boot_cpuid] = task_thread_info(current);
620 }
621
622 #ifdef CONFIG_HOTPLUG_CPU
623
624 int generic_cpu_disable(void)
625 {
626 unsigned int cpu = smp_processor_id();
627
628 if (cpu == boot_cpuid)
629 return -EBUSY;
630
631 set_cpu_online(cpu, false);
632 #ifdef CONFIG_PPC64
633 vdso_data->processorCount--;
634 #endif
635 /* Update affinity of all IRQs previously aimed at this CPU */
636 irq_migrate_all_off_this_cpu();
637
638 /*
639 * Depending on the details of the interrupt controller, it's possible
640 * that one of the interrupts we just migrated away from this CPU is
641 * actually already pending on this CPU. If we leave it in that state
642 * the interrupt will never be EOI'ed, and will never fire again. So
643 * temporarily enable interrupts here, to allow any pending interrupt to
644 * be received (and EOI'ed), before we take this CPU offline.
645 */
646 local_irq_enable();
647 mdelay(1);
648 local_irq_disable();
649
650 return 0;
651 }
652
653 void generic_cpu_die(unsigned int cpu)
654 {
655 int i;
656
657 for (i = 0; i < 100; i++) {
658 smp_rmb();
659 if (is_cpu_dead(cpu))
660 return;
661 msleep(100);
662 }
663 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
664 }
665
666 void generic_set_cpu_dead(unsigned int cpu)
667 {
668 per_cpu(cpu_state, cpu) = CPU_DEAD;
669 }
670
671 /*
672 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
673 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
674 * which makes the delay in generic_cpu_die() not happen.
675 */
676 void generic_set_cpu_up(unsigned int cpu)
677 {
678 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
679 }
680
681 int generic_check_cpu_restart(unsigned int cpu)
682 {
683 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
684 }
685
686 int is_cpu_dead(unsigned int cpu)
687 {
688 return per_cpu(cpu_state, cpu) == CPU_DEAD;
689 }
690
691 static bool secondaries_inhibited(void)
692 {
693 return kvm_hv_mode_active();
694 }
695
696 #else /* HOTPLUG_CPU */
697
698 #define secondaries_inhibited() 0
699
700 #endif
701
702 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
703 {
704 struct thread_info *ti = task_thread_info(idle);
705
706 #ifdef CONFIG_PPC64
707 paca[cpu].__current = idle;
708 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
709 #endif
710 ti->cpu = cpu;
711 secondary_ti = current_set[cpu] = ti;
712 }
713
714 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
715 {
716 int rc, c;
717
718 /*
719 * Don't allow secondary threads to come online if inhibited
720 */
721 if (threads_per_core > 1 && secondaries_inhibited() &&
722 cpu_thread_in_subcore(cpu))
723 return -EBUSY;
724
725 if (smp_ops == NULL ||
726 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
727 return -EINVAL;
728
729 cpu_idle_thread_init(cpu, tidle);
730
731 /*
732 * The platform might need to allocate resources prior to bringing
733 * up the CPU
734 */
735 if (smp_ops->prepare_cpu) {
736 rc = smp_ops->prepare_cpu(cpu);
737 if (rc)
738 return rc;
739 }
740
741 /* Make sure callin-map entry is 0 (can be leftover a CPU
742 * hotplug
743 */
744 cpu_callin_map[cpu] = 0;
745
746 /* The information for processor bringup must
747 * be written out to main store before we release
748 * the processor.
749 */
750 smp_mb();
751
752 /* wake up cpus */
753 DBG("smp: kicking cpu %d\n", cpu);
754 rc = smp_ops->kick_cpu(cpu);
755 if (rc) {
756 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
757 return rc;
758 }
759
760 /*
761 * wait to see if the cpu made a callin (is actually up).
762 * use this value that I found through experimentation.
763 * -- Cort
764 */
765 if (system_state < SYSTEM_RUNNING)
766 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
767 udelay(100);
768 #ifdef CONFIG_HOTPLUG_CPU
769 else
770 /*
771 * CPUs can take much longer to come up in the
772 * hotplug case. Wait five seconds.
773 */
774 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
775 msleep(1);
776 #endif
777
778 if (!cpu_callin_map[cpu]) {
779 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
780 return -ENOENT;
781 }
782
783 DBG("Processor %u found.\n", cpu);
784
785 if (smp_ops->give_timebase)
786 smp_ops->give_timebase();
787
788 /* Wait until cpu puts itself in the online & active maps */
789 spin_until_cond(cpu_online(cpu));
790
791 return 0;
792 }
793
794 /* Return the value of the reg property corresponding to the given
795 * logical cpu.
796 */
797 int cpu_to_core_id(int cpu)
798 {
799 struct device_node *np;
800 const __be32 *reg;
801 int id = -1;
802
803 np = of_get_cpu_node(cpu, NULL);
804 if (!np)
805 goto out;
806
807 reg = of_get_property(np, "reg", NULL);
808 if (!reg)
809 goto out;
810
811 id = be32_to_cpup(reg);
812 out:
813 of_node_put(np);
814 return id;
815 }
816 EXPORT_SYMBOL_GPL(cpu_to_core_id);
817
818 /* Helper routines for cpu to core mapping */
819 int cpu_core_index_of_thread(int cpu)
820 {
821 return cpu >> threads_shift;
822 }
823 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
824
825 int cpu_first_thread_of_core(int core)
826 {
827 return core << threads_shift;
828 }
829 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
830
831 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
832 {
833 const struct cpumask *mask;
834 struct device_node *np;
835 int i, plen;
836 const __be32 *prop;
837
838 mask = add ? cpu_online_mask : cpu_present_mask;
839 for_each_cpu(i, mask) {
840 np = of_get_cpu_node(i, NULL);
841 if (!np)
842 continue;
843 prop = of_get_property(np, "ibm,chip-id", &plen);
844 if (prop && plen == sizeof(int) &&
845 of_read_number(prop, 1) == chipid) {
846 if (add) {
847 cpumask_set_cpu(cpu, cpu_core_mask(i));
848 cpumask_set_cpu(i, cpu_core_mask(cpu));
849 } else {
850 cpumask_clear_cpu(cpu, cpu_core_mask(i));
851 cpumask_clear_cpu(i, cpu_core_mask(cpu));
852 }
853 }
854 of_node_put(np);
855 }
856 }
857
858 /* Must be called when no change can occur to cpu_present_mask,
859 * i.e. during cpu online or offline.
860 */
861 static struct device_node *cpu_to_l2cache(int cpu)
862 {
863 struct device_node *np;
864 struct device_node *cache;
865
866 if (!cpu_present(cpu))
867 return NULL;
868
869 np = of_get_cpu_node(cpu, NULL);
870 if (np == NULL)
871 return NULL;
872
873 cache = of_find_next_cache_node(np);
874
875 of_node_put(np);
876
877 return cache;
878 }
879
880 static void traverse_core_siblings(int cpu, bool add)
881 {
882 struct device_node *l2_cache, *np;
883 const struct cpumask *mask;
884 int i, chip, plen;
885 const __be32 *prop;
886
887 /* First see if we have ibm,chip-id properties in cpu nodes */
888 np = of_get_cpu_node(cpu, NULL);
889 if (np) {
890 chip = -1;
891 prop = of_get_property(np, "ibm,chip-id", &plen);
892 if (prop && plen == sizeof(int))
893 chip = of_read_number(prop, 1);
894 of_node_put(np);
895 if (chip >= 0) {
896 traverse_siblings_chip_id(cpu, add, chip);
897 return;
898 }
899 }
900
901 l2_cache = cpu_to_l2cache(cpu);
902 mask = add ? cpu_online_mask : cpu_present_mask;
903 for_each_cpu(i, mask) {
904 np = cpu_to_l2cache(i);
905 if (!np)
906 continue;
907 if (np == l2_cache) {
908 if (add) {
909 cpumask_set_cpu(cpu, cpu_core_mask(i));
910 cpumask_set_cpu(i, cpu_core_mask(cpu));
911 } else {
912 cpumask_clear_cpu(cpu, cpu_core_mask(i));
913 cpumask_clear_cpu(i, cpu_core_mask(cpu));
914 }
915 }
916 of_node_put(np);
917 }
918 of_node_put(l2_cache);
919 }
920
921 /* Activate a secondary processor. */
922 void start_secondary(void *unused)
923 {
924 unsigned int cpu = smp_processor_id();
925 int i, base;
926
927 mmgrab(&init_mm);
928 current->active_mm = &init_mm;
929
930 smp_store_cpu_info(cpu);
931 set_dec(tb_ticks_per_jiffy);
932 preempt_disable();
933 cpu_callin_map[cpu] = 1;
934
935 if (smp_ops->setup_cpu)
936 smp_ops->setup_cpu(cpu);
937 if (smp_ops->take_timebase)
938 smp_ops->take_timebase();
939
940 secondary_cpu_time_init();
941
942 #ifdef CONFIG_PPC64
943 if (system_state == SYSTEM_RUNNING)
944 vdso_data->processorCount++;
945
946 vdso_getcpu_init();
947 #endif
948 /* Update sibling maps */
949 base = cpu_first_thread_sibling(cpu);
950 for (i = 0; i < threads_per_core; i++) {
951 if (cpu_is_offline(base + i) && (cpu != base + i))
952 continue;
953 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
954 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
955
956 /* cpu_core_map should be a superset of
957 * cpu_sibling_map even if we don't have cache
958 * information, so update the former here, too.
959 */
960 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
961 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
962 }
963 traverse_core_siblings(cpu, true);
964
965 set_numa_node(numa_cpu_lookup_table[cpu]);
966 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
967
968 smp_wmb();
969 notify_cpu_starting(cpu);
970 set_cpu_online(cpu, true);
971
972 local_irq_enable();
973
974 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
975
976 BUG();
977 }
978
979 int setup_profiling_timer(unsigned int multiplier)
980 {
981 return 0;
982 }
983
984 #ifdef CONFIG_SCHED_SMT
985 /* cpumask of CPUs with asymetric SMT dependancy */
986 static int powerpc_smt_flags(void)
987 {
988 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
989
990 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
991 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
992 flags |= SD_ASYM_PACKING;
993 }
994 return flags;
995 }
996 #endif
997
998 static struct sched_domain_topology_level powerpc_topology[] = {
999 #ifdef CONFIG_SCHED_SMT
1000 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1001 #endif
1002 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1003 { NULL, },
1004 };
1005
1006 static __init long smp_setup_cpu_workfn(void *data __always_unused)
1007 {
1008 smp_ops->setup_cpu(boot_cpuid);
1009 return 0;
1010 }
1011
1012 void __init smp_cpus_done(unsigned int max_cpus)
1013 {
1014 /*
1015 * We want the setup_cpu() here to be called on the boot CPU, but
1016 * init might run on any CPU, so make sure it's invoked on the boot
1017 * CPU.
1018 */
1019 if (smp_ops && smp_ops->setup_cpu)
1020 work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
1021
1022 if (smp_ops && smp_ops->bringup_done)
1023 smp_ops->bringup_done();
1024
1025 dump_numa_cpu_topology();
1026
1027 set_sched_topology(powerpc_topology);
1028 }
1029
1030 #ifdef CONFIG_HOTPLUG_CPU
1031 int __cpu_disable(void)
1032 {
1033 int cpu = smp_processor_id();
1034 int base, i;
1035 int err;
1036
1037 if (!smp_ops->cpu_disable)
1038 return -ENOSYS;
1039
1040 err = smp_ops->cpu_disable();
1041 if (err)
1042 return err;
1043
1044 /* Update sibling maps */
1045 base = cpu_first_thread_sibling(cpu);
1046 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
1047 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
1048 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
1049 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
1050 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
1051 }
1052 traverse_core_siblings(cpu, false);
1053
1054 return 0;
1055 }
1056
1057 void __cpu_die(unsigned int cpu)
1058 {
1059 if (smp_ops->cpu_die)
1060 smp_ops->cpu_die(cpu);
1061 }
1062
1063 void cpu_die(void)
1064 {
1065 if (ppc_md.cpu_die)
1066 ppc_md.cpu_die();
1067
1068 /* If we return, we re-enter start_secondary */
1069 start_secondary_resume();
1070 }
1071
1072 #endif