4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34 #include <linux/profile.h>
36 #include <asm/ptrace.h>
37 #include <linux/atomic.h>
39 #include <asm/hw_irq.h>
40 #include <asm/kvm_ppc.h>
41 #include <asm/dbell.h>
43 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/cputhreads.h>
49 #include <asm/cputable.h>
51 #include <asm/vdso_datapage.h>
56 #include <asm/debug.h>
57 #include <asm/kexec.h>
58 #include <asm/asm-prototypes.h>
59 #include <asm/cpu_has_feature.h>
63 #define DBG(fmt...) udbg_printf(fmt)
68 #ifdef CONFIG_HOTPLUG_CPU
69 /* State of each CPU during hotplug phases */
70 static DEFINE_PER_CPU(int, cpu_state
) = { 0 };
73 struct thread_info
*secondary_ti
;
75 DEFINE_PER_CPU(cpumask_var_t
, cpu_sibling_map
);
76 DEFINE_PER_CPU(cpumask_var_t
, cpu_core_map
);
78 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
79 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
81 /* SMP operations for this machine */
82 struct smp_ops_t
*smp_ops
;
84 /* Can't be static due to PowerMac hackery */
85 volatile unsigned int cpu_callin_map
[NR_CPUS
];
87 int smt_enabled_at_boot
= 1;
89 static void (*crash_ipi_function_ptr
)(struct pt_regs
*) = NULL
;
92 * Returns 1 if the specified cpu should be brought up during boot.
93 * Used to inhibit booting threads if they've been disabled or
94 * limited on the command line
96 int smp_generic_cpu_bootable(unsigned int nr
)
98 /* Special case - we inhibit secondary thread startup
99 * during boot if the user requests it.
101 if (system_state
== SYSTEM_BOOTING
&& cpu_has_feature(CPU_FTR_SMT
)) {
102 if (!smt_enabled_at_boot
&& cpu_thread_in_core(nr
) != 0)
104 if (smt_enabled_at_boot
105 && cpu_thread_in_core(nr
) >= smt_enabled_at_boot
)
114 int smp_generic_kick_cpu(int nr
)
116 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
119 * The processor is currently spinning, waiting for the
120 * cpu_start field to become non-zero After we set cpu_start,
121 * the processor will continue on to secondary_start
123 if (!paca
[nr
].cpu_start
) {
124 paca
[nr
].cpu_start
= 1;
129 #ifdef CONFIG_HOTPLUG_CPU
131 * Ok it's not there, so it might be soft-unplugged, let's
132 * try to bring it back
134 generic_set_cpu_up(nr
);
136 smp_send_reschedule(nr
);
137 #endif /* CONFIG_HOTPLUG_CPU */
141 #endif /* CONFIG_PPC64 */
143 static irqreturn_t
call_function_action(int irq
, void *data
)
145 generic_smp_call_function_interrupt();
149 static irqreturn_t
reschedule_action(int irq
, void *data
)
155 static irqreturn_t
tick_broadcast_ipi_action(int irq
, void *data
)
157 tick_broadcast_ipi_handler();
161 static irqreturn_t
debug_ipi_action(int irq
, void *data
)
163 if (crash_ipi_function_ptr
) {
164 crash_ipi_function_ptr(get_irq_regs());
168 #ifdef CONFIG_DEBUGGER
169 debugger_ipi(get_irq_regs());
170 #endif /* CONFIG_DEBUGGER */
175 static irq_handler_t smp_ipi_action
[] = {
176 [PPC_MSG_CALL_FUNCTION
] = call_function_action
,
177 [PPC_MSG_RESCHEDULE
] = reschedule_action
,
178 [PPC_MSG_TICK_BROADCAST
] = tick_broadcast_ipi_action
,
179 [PPC_MSG_DEBUGGER_BREAK
] = debug_ipi_action
,
182 const char *smp_ipi_name
[] = {
183 [PPC_MSG_CALL_FUNCTION
] = "ipi call function",
184 [PPC_MSG_RESCHEDULE
] = "ipi reschedule",
185 [PPC_MSG_TICK_BROADCAST
] = "ipi tick-broadcast",
186 [PPC_MSG_DEBUGGER_BREAK
] = "ipi debugger",
189 /* optional function to request ipi, for controllers with >= 4 ipis */
190 int smp_request_message_ipi(int virq
, int msg
)
194 if (msg
< 0 || msg
> PPC_MSG_DEBUGGER_BREAK
) {
197 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE)
198 if (msg
== PPC_MSG_DEBUGGER_BREAK
) {
202 err
= request_irq(virq
, smp_ipi_action
[msg
],
203 IRQF_PERCPU
| IRQF_NO_THREAD
| IRQF_NO_SUSPEND
,
204 smp_ipi_name
[msg
], NULL
);
205 WARN(err
< 0, "unable to request_irq %d for %s (rc %d)\n",
206 virq
, smp_ipi_name
[msg
], err
);
211 #ifdef CONFIG_PPC_SMP_MUXED_IPI
212 struct cpu_messages
{
213 long messages
; /* current messages */
215 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages
, ipi_message
);
217 void smp_muxed_ipi_set_message(int cpu
, int msg
)
219 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
220 char *message
= (char *)&info
->messages
;
223 * Order previous accesses before accesses in the IPI handler.
229 void smp_muxed_ipi_message_pass(int cpu
, int msg
)
231 smp_muxed_ipi_set_message(cpu
, msg
);
234 * cause_ipi functions are required to include a full barrier
235 * before doing whatever causes the IPI.
237 smp_ops
->cause_ipi(cpu
);
240 #ifdef __BIG_ENDIAN__
241 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
243 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
246 irqreturn_t
smp_ipi_demux(void)
248 mb(); /* order any irq clear */
250 return smp_ipi_demux_relaxed();
253 /* sync-free variant. Callers should ensure synchronization */
254 irqreturn_t
smp_ipi_demux_relaxed(void)
256 struct cpu_messages
*info
;
259 info
= this_cpu_ptr(&ipi_message
);
261 all
= xchg(&info
->messages
, 0);
262 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
264 * Must check for PPC_MSG_RM_HOST_ACTION messages
265 * before PPC_MSG_CALL_FUNCTION messages because when
266 * a VM is destroyed, we call kick_all_cpus_sync()
267 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
268 * messages have completed before we free any VCPUs.
270 if (all
& IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION
))
271 kvmppc_xics_ipi_action();
273 if (all
& IPI_MESSAGE(PPC_MSG_CALL_FUNCTION
))
274 generic_smp_call_function_interrupt();
275 if (all
& IPI_MESSAGE(PPC_MSG_RESCHEDULE
))
277 if (all
& IPI_MESSAGE(PPC_MSG_TICK_BROADCAST
))
278 tick_broadcast_ipi_handler();
279 if (all
& IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK
))
280 debug_ipi_action(0, NULL
);
281 } while (info
->messages
);
285 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
287 static inline void do_message_pass(int cpu
, int msg
)
289 if (smp_ops
->message_pass
)
290 smp_ops
->message_pass(cpu
, msg
);
291 #ifdef CONFIG_PPC_SMP_MUXED_IPI
293 smp_muxed_ipi_message_pass(cpu
, msg
);
297 void smp_send_reschedule(int cpu
)
300 do_message_pass(cpu
, PPC_MSG_RESCHEDULE
);
302 EXPORT_SYMBOL_GPL(smp_send_reschedule
);
304 void arch_send_call_function_single_ipi(int cpu
)
306 do_message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
309 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
313 for_each_cpu(cpu
, mask
)
314 do_message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
317 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
318 void tick_broadcast(const struct cpumask
*mask
)
322 for_each_cpu(cpu
, mask
)
323 do_message_pass(cpu
, PPC_MSG_TICK_BROADCAST
);
327 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
328 void smp_send_debugger_break(void)
331 int me
= raw_smp_processor_id();
333 if (unlikely(!smp_ops
))
336 for_each_online_cpu(cpu
)
338 do_message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
342 #ifdef CONFIG_KEXEC_CORE
343 void crash_send_ipi(void (*crash_ipi_callback
)(struct pt_regs
*))
345 crash_ipi_function_ptr
= crash_ipi_callback
;
346 if (crash_ipi_callback
) {
348 smp_send_debugger_break();
353 static void stop_this_cpu(void *dummy
)
355 /* Remove this CPU */
356 set_cpu_online(smp_processor_id(), false);
363 void smp_send_stop(void)
365 smp_call_function(stop_this_cpu
, NULL
, 0);
368 struct thread_info
*current_set
[NR_CPUS
];
370 static void smp_store_cpu_info(int id
)
372 per_cpu(cpu_pvr
, id
) = mfspr(SPRN_PVR
);
373 #ifdef CONFIG_PPC_FSL_BOOK3E
374 per_cpu(next_tlbcam_idx
, id
)
375 = (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
379 void __init
smp_prepare_cpus(unsigned int max_cpus
)
383 DBG("smp_prepare_cpus\n");
386 * setup_cpu may need to be called on the boot cpu. We havent
387 * spun any cpus up but lets be paranoid.
389 BUG_ON(boot_cpuid
!= smp_processor_id());
392 smp_store_cpu_info(boot_cpuid
);
393 cpu_callin_map
[boot_cpuid
] = 1;
395 for_each_possible_cpu(cpu
) {
396 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map
, cpu
),
397 GFP_KERNEL
, cpu_to_node(cpu
));
398 zalloc_cpumask_var_node(&per_cpu(cpu_core_map
, cpu
),
399 GFP_KERNEL
, cpu_to_node(cpu
));
401 * numa_node_id() works after this.
403 if (cpu_present(cpu
)) {
404 set_cpu_numa_node(cpu
, numa_cpu_lookup_table
[cpu
]);
405 set_cpu_numa_mem(cpu
,
406 local_memory_node(numa_cpu_lookup_table
[cpu
]));
410 cpumask_set_cpu(boot_cpuid
, cpu_sibling_mask(boot_cpuid
));
411 cpumask_set_cpu(boot_cpuid
, cpu_core_mask(boot_cpuid
));
413 if (smp_ops
&& smp_ops
->probe
)
417 void smp_prepare_boot_cpu(void)
419 BUG_ON(smp_processor_id() != boot_cpuid
);
421 paca
[boot_cpuid
].__current
= current
;
423 set_numa_node(numa_cpu_lookup_table
[boot_cpuid
]);
424 current_set
[boot_cpuid
] = task_thread_info(current
);
427 #ifdef CONFIG_HOTPLUG_CPU
429 int generic_cpu_disable(void)
431 unsigned int cpu
= smp_processor_id();
433 if (cpu
== boot_cpuid
)
436 set_cpu_online(cpu
, false);
438 vdso_data
->processorCount
--;
444 void generic_cpu_die(unsigned int cpu
)
448 for (i
= 0; i
< 100; i
++) {
450 if (is_cpu_dead(cpu
))
454 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
457 void generic_set_cpu_dead(unsigned int cpu
)
459 per_cpu(cpu_state
, cpu
) = CPU_DEAD
;
463 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
464 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
465 * which makes the delay in generic_cpu_die() not happen.
467 void generic_set_cpu_up(unsigned int cpu
)
469 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
472 int generic_check_cpu_restart(unsigned int cpu
)
474 return per_cpu(cpu_state
, cpu
) == CPU_UP_PREPARE
;
477 int is_cpu_dead(unsigned int cpu
)
479 return per_cpu(cpu_state
, cpu
) == CPU_DEAD
;
482 static bool secondaries_inhibited(void)
484 return kvm_hv_mode_active();
487 #else /* HOTPLUG_CPU */
489 #define secondaries_inhibited() 0
493 static void cpu_idle_thread_init(unsigned int cpu
, struct task_struct
*idle
)
495 struct thread_info
*ti
= task_thread_info(idle
);
498 paca
[cpu
].__current
= idle
;
499 paca
[cpu
].kstack
= (unsigned long)ti
+ THREAD_SIZE
- STACK_FRAME_OVERHEAD
;
502 secondary_ti
= current_set
[cpu
] = ti
;
505 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
510 * Don't allow secondary threads to come online if inhibited
512 if (threads_per_core
> 1 && secondaries_inhibited() &&
513 cpu_thread_in_subcore(cpu
))
516 if (smp_ops
== NULL
||
517 (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
)))
520 cpu_idle_thread_init(cpu
, tidle
);
523 * The platform might need to allocate resources prior to bringing
526 if (smp_ops
->prepare_cpu
) {
527 rc
= smp_ops
->prepare_cpu(cpu
);
532 /* Make sure callin-map entry is 0 (can be leftover a CPU
535 cpu_callin_map
[cpu
] = 0;
537 /* The information for processor bringup must
538 * be written out to main store before we release
544 DBG("smp: kicking cpu %d\n", cpu
);
545 rc
= smp_ops
->kick_cpu(cpu
);
547 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu
, rc
);
552 * wait to see if the cpu made a callin (is actually up).
553 * use this value that I found through experimentation.
556 if (system_state
< SYSTEM_RUNNING
)
557 for (c
= 50000; c
&& !cpu_callin_map
[cpu
]; c
--)
559 #ifdef CONFIG_HOTPLUG_CPU
562 * CPUs can take much longer to come up in the
563 * hotplug case. Wait five seconds.
565 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
569 if (!cpu_callin_map
[cpu
]) {
570 printk(KERN_ERR
"Processor %u is stuck.\n", cpu
);
574 DBG("Processor %u found.\n", cpu
);
576 if (smp_ops
->give_timebase
)
577 smp_ops
->give_timebase();
579 /* Wait until cpu puts itself in the online & active maps */
580 while (!cpu_online(cpu
))
586 /* Return the value of the reg property corresponding to the given
589 int cpu_to_core_id(int cpu
)
591 struct device_node
*np
;
595 np
= of_get_cpu_node(cpu
, NULL
);
599 reg
= of_get_property(np
, "reg", NULL
);
603 id
= be32_to_cpup(reg
);
608 EXPORT_SYMBOL_GPL(cpu_to_core_id
);
610 /* Helper routines for cpu to core mapping */
611 int cpu_core_index_of_thread(int cpu
)
613 return cpu
>> threads_shift
;
615 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread
);
617 int cpu_first_thread_of_core(int core
)
619 return core
<< threads_shift
;
621 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core
);
623 static void traverse_siblings_chip_id(int cpu
, bool add
, int chipid
)
625 const struct cpumask
*mask
;
626 struct device_node
*np
;
630 mask
= add
? cpu_online_mask
: cpu_present_mask
;
631 for_each_cpu(i
, mask
) {
632 np
= of_get_cpu_node(i
, NULL
);
635 prop
= of_get_property(np
, "ibm,chip-id", &plen
);
636 if (prop
&& plen
== sizeof(int) &&
637 of_read_number(prop
, 1) == chipid
) {
639 cpumask_set_cpu(cpu
, cpu_core_mask(i
));
640 cpumask_set_cpu(i
, cpu_core_mask(cpu
));
642 cpumask_clear_cpu(cpu
, cpu_core_mask(i
));
643 cpumask_clear_cpu(i
, cpu_core_mask(cpu
));
650 /* Must be called when no change can occur to cpu_present_mask,
651 * i.e. during cpu online or offline.
653 static struct device_node
*cpu_to_l2cache(int cpu
)
655 struct device_node
*np
;
656 struct device_node
*cache
;
658 if (!cpu_present(cpu
))
661 np
= of_get_cpu_node(cpu
, NULL
);
665 cache
= of_find_next_cache_node(np
);
672 static void traverse_core_siblings(int cpu
, bool add
)
674 struct device_node
*l2_cache
, *np
;
675 const struct cpumask
*mask
;
679 /* First see if we have ibm,chip-id properties in cpu nodes */
680 np
= of_get_cpu_node(cpu
, NULL
);
683 prop
= of_get_property(np
, "ibm,chip-id", &plen
);
684 if (prop
&& plen
== sizeof(int))
685 chip
= of_read_number(prop
, 1);
688 traverse_siblings_chip_id(cpu
, add
, chip
);
693 l2_cache
= cpu_to_l2cache(cpu
);
694 mask
= add
? cpu_online_mask
: cpu_present_mask
;
695 for_each_cpu(i
, mask
) {
696 np
= cpu_to_l2cache(i
);
699 if (np
== l2_cache
) {
701 cpumask_set_cpu(cpu
, cpu_core_mask(i
));
702 cpumask_set_cpu(i
, cpu_core_mask(cpu
));
704 cpumask_clear_cpu(cpu
, cpu_core_mask(i
));
705 cpumask_clear_cpu(i
, cpu_core_mask(cpu
));
710 of_node_put(l2_cache
);
713 /* Activate a secondary processor. */
714 void start_secondary(void *unused
)
716 unsigned int cpu
= smp_processor_id();
719 atomic_inc(&init_mm
.mm_count
);
720 current
->active_mm
= &init_mm
;
722 smp_store_cpu_info(cpu
);
723 set_dec(tb_ticks_per_jiffy
);
725 cpu_callin_map
[cpu
] = 1;
727 if (smp_ops
->setup_cpu
)
728 smp_ops
->setup_cpu(cpu
);
729 if (smp_ops
->take_timebase
)
730 smp_ops
->take_timebase();
732 secondary_cpu_time_init();
735 if (system_state
== SYSTEM_RUNNING
)
736 vdso_data
->processorCount
++;
740 /* Update sibling maps */
741 base
= cpu_first_thread_sibling(cpu
);
742 for (i
= 0; i
< threads_per_core
; i
++) {
743 if (cpu_is_offline(base
+ i
) && (cpu
!= base
+ i
))
745 cpumask_set_cpu(cpu
, cpu_sibling_mask(base
+ i
));
746 cpumask_set_cpu(base
+ i
, cpu_sibling_mask(cpu
));
748 /* cpu_core_map should be a superset of
749 * cpu_sibling_map even if we don't have cache
750 * information, so update the former here, too.
752 cpumask_set_cpu(cpu
, cpu_core_mask(base
+ i
));
753 cpumask_set_cpu(base
+ i
, cpu_core_mask(cpu
));
755 traverse_core_siblings(cpu
, true);
757 set_numa_node(numa_cpu_lookup_table
[cpu
]);
758 set_numa_mem(local_memory_node(numa_cpu_lookup_table
[cpu
]));
761 notify_cpu_starting(cpu
);
762 set_cpu_online(cpu
, true);
766 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
771 int setup_profiling_timer(unsigned int multiplier
)
776 #ifdef CONFIG_SCHED_SMT
777 /* cpumask of CPUs with asymetric SMT dependancy */
778 static int powerpc_smt_flags(void)
780 int flags
= SD_SHARE_CPUCAPACITY
| SD_SHARE_PKG_RESOURCES
;
782 if (cpu_has_feature(CPU_FTR_ASYM_SMT
)) {
783 printk_once(KERN_INFO
"Enabling Asymmetric SMT scheduling\n");
784 flags
|= SD_ASYM_PACKING
;
790 static struct sched_domain_topology_level powerpc_topology
[] = {
791 #ifdef CONFIG_SCHED_SMT
792 { cpu_smt_mask
, powerpc_smt_flags
, SD_INIT_NAME(SMT
) },
794 { cpu_cpu_mask
, SD_INIT_NAME(DIE
) },
798 void __init
smp_cpus_done(unsigned int max_cpus
)
800 cpumask_var_t old_mask
;
802 /* We want the setup_cpu() here to be called from CPU 0, but our
803 * init thread may have been "borrowed" by another CPU in the meantime
804 * se we pin us down to CPU 0 for a short while
806 alloc_cpumask_var(&old_mask
, GFP_NOWAIT
);
807 cpumask_copy(old_mask
, tsk_cpus_allowed(current
));
808 set_cpus_allowed_ptr(current
, cpumask_of(boot_cpuid
));
810 if (smp_ops
&& smp_ops
->setup_cpu
)
811 smp_ops
->setup_cpu(boot_cpuid
);
813 set_cpus_allowed_ptr(current
, old_mask
);
815 free_cpumask_var(old_mask
);
817 if (smp_ops
&& smp_ops
->bringup_done
)
818 smp_ops
->bringup_done();
820 dump_numa_cpu_topology();
822 set_sched_topology(powerpc_topology
);
826 #ifdef CONFIG_HOTPLUG_CPU
827 int __cpu_disable(void)
829 int cpu
= smp_processor_id();
833 if (!smp_ops
->cpu_disable
)
836 err
= smp_ops
->cpu_disable();
840 /* Update sibling maps */
841 base
= cpu_first_thread_sibling(cpu
);
842 for (i
= 0; i
< threads_per_core
&& base
+ i
< nr_cpu_ids
; i
++) {
843 cpumask_clear_cpu(cpu
, cpu_sibling_mask(base
+ i
));
844 cpumask_clear_cpu(base
+ i
, cpu_sibling_mask(cpu
));
845 cpumask_clear_cpu(cpu
, cpu_core_mask(base
+ i
));
846 cpumask_clear_cpu(base
+ i
, cpu_core_mask(cpu
));
848 traverse_core_siblings(cpu
, false);
853 void __cpu_die(unsigned int cpu
)
855 if (smp_ops
->cpu_die
)
856 smp_ops
->cpu_die(cpu
);
864 /* If we return, we re-enter start_secondary */
865 start_secondary_resume();