]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
1da177e4 | 20 | #include <linux/kernel.h> |
4b16f8e2 | 21 | #include <linux/export.h> |
68e21be2 | 22 | #include <linux/sched/mm.h> |
105ab3d8 | 23 | #include <linux/sched/topology.h> |
1da177e4 LT |
24 | #include <linux/smp.h> |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/err.h> | |
8a25a2fd | 31 | #include <linux/device.h> |
1da177e4 LT |
32 | #include <linux/cpu.h> |
33 | #include <linux/notifier.h> | |
4b703a23 | 34 | #include <linux/topology.h> |
665e87ff | 35 | #include <linux/profile.h> |
1da177e4 LT |
36 | |
37 | #include <asm/ptrace.h> | |
60063497 | 38 | #include <linux/atomic.h> |
1da177e4 | 39 | #include <asm/irq.h> |
1b67bee1 | 40 | #include <asm/hw_irq.h> |
441c19c8 | 41 | #include <asm/kvm_ppc.h> |
1da177e4 LT |
42 | #include <asm/page.h> |
43 | #include <asm/pgtable.h> | |
44 | #include <asm/prom.h> | |
45 | #include <asm/smp.h> | |
1da177e4 LT |
46 | #include <asm/time.h> |
47 | #include <asm/machdep.h> | |
e2075f79 | 48 | #include <asm/cputhreads.h> |
1da177e4 | 49 | #include <asm/cputable.h> |
bbeb3f4c | 50 | #include <asm/mpic.h> |
a7f290da | 51 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
52 | #ifdef CONFIG_PPC64 |
53 | #include <asm/paca.h> | |
54 | #endif | |
18ad51dd | 55 | #include <asm/vdso.h> |
ae3a197e | 56 | #include <asm/debug.h> |
1217d34b | 57 | #include <asm/kexec.h> |
42f5b4ca | 58 | #include <asm/asm-prototypes.h> |
b92a226e | 59 | #include <asm/cpu_has_feature.h> |
5ad57078 | 60 | |
1da177e4 | 61 | #ifdef DEBUG |
f9e4ec57 | 62 | #include <asm/udbg.h> |
1da177e4 LT |
63 | #define DBG(fmt...) udbg_printf(fmt) |
64 | #else | |
65 | #define DBG(fmt...) | |
66 | #endif | |
67 | ||
c56e5853 | 68 | #ifdef CONFIG_HOTPLUG_CPU |
fb82b839 BH |
69 | /* State of each CPU during hotplug phases */ |
70 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
c56e5853 BH |
71 | #endif |
72 | ||
f9e4ec57 ME |
73 | struct thread_info *secondary_ti; |
74 | ||
cc1ba8ea AB |
75 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
76 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | |
1da177e4 | 77 | |
d5a7430d | 78 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
440a0857 | 79 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
1da177e4 | 80 | |
5ad57078 | 81 | /* SMP operations for this machine */ |
1da177e4 LT |
82 | struct smp_ops_t *smp_ops; |
83 | ||
7ccbe504 BH |
84 | /* Can't be static due to PowerMac hackery */ |
85 | volatile unsigned int cpu_callin_map[NR_CPUS]; | |
1da177e4 | 86 | |
1da177e4 LT |
87 | int smt_enabled_at_boot = 1; |
88 | ||
cc532915 ME |
89 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
90 | ||
3cd85250 AF |
91 | /* |
92 | * Returns 1 if the specified cpu should be brought up during boot. | |
93 | * Used to inhibit booting threads if they've been disabled or | |
94 | * limited on the command line | |
95 | */ | |
96 | int smp_generic_cpu_bootable(unsigned int nr) | |
97 | { | |
98 | /* Special case - we inhibit secondary thread startup | |
99 | * during boot if the user requests it. | |
100 | */ | |
101 | if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { | |
102 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) | |
103 | return 0; | |
104 | if (smt_enabled_at_boot | |
105 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) | |
106 | return 0; | |
107 | } | |
108 | ||
109 | return 1; | |
110 | } | |
111 | ||
112 | ||
5ad57078 | 113 | #ifdef CONFIG_PPC64 |
cad5cef6 | 114 | int smp_generic_kick_cpu(int nr) |
1da177e4 LT |
115 | { |
116 | BUG_ON(nr < 0 || nr >= NR_CPUS); | |
117 | ||
118 | /* | |
119 | * The processor is currently spinning, waiting for the | |
120 | * cpu_start field to become non-zero After we set cpu_start, | |
121 | * the processor will continue on to secondary_start | |
122 | */ | |
fb82b839 BH |
123 | if (!paca[nr].cpu_start) { |
124 | paca[nr].cpu_start = 1; | |
125 | smp_mb(); | |
126 | return 0; | |
127 | } | |
128 | ||
129 | #ifdef CONFIG_HOTPLUG_CPU | |
130 | /* | |
131 | * Ok it's not there, so it might be soft-unplugged, let's | |
132 | * try to bring it back | |
133 | */ | |
ae5cab47 | 134 | generic_set_cpu_up(nr); |
fb82b839 BH |
135 | smp_wmb(); |
136 | smp_send_reschedule(nr); | |
137 | #endif /* CONFIG_HOTPLUG_CPU */ | |
de300974 ME |
138 | |
139 | return 0; | |
1da177e4 | 140 | } |
fb82b839 | 141 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 142 | |
25ddd738 MM |
143 | static irqreturn_t call_function_action(int irq, void *data) |
144 | { | |
145 | generic_smp_call_function_interrupt(); | |
146 | return IRQ_HANDLED; | |
147 | } | |
148 | ||
149 | static irqreturn_t reschedule_action(int irq, void *data) | |
150 | { | |
184748cc | 151 | scheduler_ipi(); |
25ddd738 MM |
152 | return IRQ_HANDLED; |
153 | } | |
154 | ||
1b67bee1 | 155 | static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) |
25ddd738 | 156 | { |
1b67bee1 | 157 | tick_broadcast_ipi_handler(); |
25ddd738 MM |
158 | return IRQ_HANDLED; |
159 | } | |
160 | ||
7ef71d75 | 161 | static irqreturn_t debug_ipi_action(int irq, void *data) |
25ddd738 | 162 | { |
23d72bfd MM |
163 | if (crash_ipi_function_ptr) { |
164 | crash_ipi_function_ptr(get_irq_regs()); | |
165 | return IRQ_HANDLED; | |
166 | } | |
167 | ||
168 | #ifdef CONFIG_DEBUGGER | |
169 | debugger_ipi(get_irq_regs()); | |
170 | #endif /* CONFIG_DEBUGGER */ | |
171 | ||
25ddd738 MM |
172 | return IRQ_HANDLED; |
173 | } | |
174 | ||
175 | static irq_handler_t smp_ipi_action[] = { | |
176 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
177 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
1b67bee1 | 178 | [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, |
25ddd738 MM |
179 | [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, |
180 | }; | |
181 | ||
182 | const char *smp_ipi_name[] = { | |
183 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
184 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
1b67bee1 | 185 | [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", |
25ddd738 MM |
186 | [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", |
187 | }; | |
188 | ||
189 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
190 | int smp_request_message_ipi(int virq, int msg) | |
191 | { | |
192 | int err; | |
193 | ||
194 | if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { | |
195 | return -EINVAL; | |
196 | } | |
da665885 | 197 | #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE) |
25ddd738 MM |
198 | if (msg == PPC_MSG_DEBUGGER_BREAK) { |
199 | return 1; | |
200 | } | |
201 | #endif | |
3b5e16d7 | 202 | err = request_irq(virq, smp_ipi_action[msg], |
e6651de9 | 203 | IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, |
b0d436c7 | 204 | smp_ipi_name[msg], NULL); |
25ddd738 MM |
205 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", |
206 | virq, smp_ipi_name[msg], err); | |
207 | ||
208 | return err; | |
209 | } | |
210 | ||
1ece355b | 211 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
23d72bfd | 212 | struct cpu_messages { |
bd7f561f | 213 | long messages; /* current messages */ |
23d72bfd MM |
214 | unsigned long data; /* data for cause ipi */ |
215 | }; | |
216 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
217 | ||
218 | void smp_muxed_ipi_set_data(int cpu, unsigned long data) | |
219 | { | |
220 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
221 | ||
222 | info->data = data; | |
223 | } | |
224 | ||
31639c77 | 225 | void smp_muxed_ipi_set_message(int cpu, int msg) |
23d72bfd MM |
226 | { |
227 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
71454272 | 228 | char *message = (char *)&info->messages; |
23d72bfd | 229 | |
9fb1b36c PM |
230 | /* |
231 | * Order previous accesses before accesses in the IPI handler. | |
232 | */ | |
233 | smp_mb(); | |
71454272 | 234 | message[msg] = 1; |
31639c77 SW |
235 | } |
236 | ||
237 | void smp_muxed_ipi_message_pass(int cpu, int msg) | |
238 | { | |
239 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
240 | ||
241 | smp_muxed_ipi_set_message(cpu, msg); | |
9fb1b36c PM |
242 | /* |
243 | * cause_ipi functions are required to include a full barrier | |
244 | * before doing whatever causes the IPI. | |
245 | */ | |
23d72bfd MM |
246 | smp_ops->cause_ipi(cpu, info->data); |
247 | } | |
248 | ||
0654de1c | 249 | #ifdef __BIG_ENDIAN__ |
bd7f561f | 250 | #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) |
0654de1c | 251 | #else |
bd7f561f | 252 | #define IPI_MESSAGE(A) (1uL << (8 * (A))) |
0654de1c AB |
253 | #endif |
254 | ||
23d72bfd MM |
255 | irqreturn_t smp_ipi_demux(void) |
256 | { | |
69111bac | 257 | struct cpu_messages *info = this_cpu_ptr(&ipi_message); |
bd7f561f | 258 | unsigned long all; |
23d72bfd MM |
259 | |
260 | mb(); /* order any irq clear */ | |
71454272 MM |
261 | |
262 | do { | |
9fb1b36c | 263 | all = xchg(&info->messages, 0); |
e17769eb SW |
264 | #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
265 | /* | |
266 | * Must check for PPC_MSG_RM_HOST_ACTION messages | |
267 | * before PPC_MSG_CALL_FUNCTION messages because when | |
268 | * a VM is destroyed, we call kick_all_cpus_sync() | |
269 | * to ensure that any pending PPC_MSG_RM_HOST_ACTION | |
270 | * messages have completed before we free any VCPUs. | |
271 | */ | |
272 | if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) | |
273 | kvmppc_xics_ipi_action(); | |
274 | #endif | |
0654de1c | 275 | if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) |
23d72bfd | 276 | generic_smp_call_function_interrupt(); |
0654de1c | 277 | if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) |
880102e7 | 278 | scheduler_ipi(); |
1b67bee1 SB |
279 | if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) |
280 | tick_broadcast_ipi_handler(); | |
0654de1c | 281 | if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) |
23d72bfd | 282 | debug_ipi_action(0, NULL); |
71454272 MM |
283 | } while (info->messages); |
284 | ||
23d72bfd MM |
285 | return IRQ_HANDLED; |
286 | } | |
1ece355b | 287 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ |
23d72bfd | 288 | |
9ca980dc PM |
289 | static inline void do_message_pass(int cpu, int msg) |
290 | { | |
291 | if (smp_ops->message_pass) | |
292 | smp_ops->message_pass(cpu, msg); | |
293 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | |
294 | else | |
295 | smp_muxed_ipi_message_pass(cpu, msg); | |
296 | #endif | |
297 | } | |
298 | ||
1da177e4 LT |
299 | void smp_send_reschedule(int cpu) |
300 | { | |
8cffc6ac | 301 | if (likely(smp_ops)) |
9ca980dc | 302 | do_message_pass(cpu, PPC_MSG_RESCHEDULE); |
1da177e4 | 303 | } |
de56a948 | 304 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |
1da177e4 | 305 | |
b7d7a240 JA |
306 | void arch_send_call_function_single_ipi(int cpu) |
307 | { | |
402d9a1e | 308 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
309 | } |
310 | ||
f063ea02 | 311 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
b7d7a240 JA |
312 | { |
313 | unsigned int cpu; | |
314 | ||
f063ea02 | 315 | for_each_cpu(cpu, mask) |
9ca980dc | 316 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
317 | } |
318 | ||
1b67bee1 SB |
319 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
320 | void tick_broadcast(const struct cpumask *mask) | |
321 | { | |
322 | unsigned int cpu; | |
323 | ||
324 | for_each_cpu(cpu, mask) | |
325 | do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); | |
326 | } | |
327 | #endif | |
328 | ||
da665885 | 329 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) |
e0476371 | 330 | void smp_send_debugger_break(void) |
1da177e4 | 331 | { |
e0476371 MM |
332 | int cpu; |
333 | int me = raw_smp_processor_id(); | |
334 | ||
335 | if (unlikely(!smp_ops)) | |
336 | return; | |
337 | ||
338 | for_each_online_cpu(cpu) | |
339 | if (cpu != me) | |
9ca980dc | 340 | do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); |
1da177e4 LT |
341 | } |
342 | #endif | |
343 | ||
da665885 | 344 | #ifdef CONFIG_KEXEC_CORE |
cc532915 ME |
345 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
346 | { | |
347 | crash_ipi_function_ptr = crash_ipi_callback; | |
e0476371 | 348 | if (crash_ipi_callback) { |
cc532915 | 349 | mb(); |
e0476371 | 350 | smp_send_debugger_break(); |
cc532915 ME |
351 | } |
352 | } | |
353 | #endif | |
354 | ||
1da177e4 LT |
355 | static void stop_this_cpu(void *dummy) |
356 | { | |
8389b37d VB |
357 | /* Remove this CPU */ |
358 | set_cpu_online(smp_processor_id(), false); | |
359 | ||
1da177e4 LT |
360 | local_irq_disable(); |
361 | while (1) | |
362 | ; | |
363 | } | |
364 | ||
8fd7675c SS |
365 | void smp_send_stop(void) |
366 | { | |
8691e5a8 | 367 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 LT |
368 | } |
369 | ||
1da177e4 LT |
370 | struct thread_info *current_set[NR_CPUS]; |
371 | ||
cad5cef6 | 372 | static void smp_store_cpu_info(int id) |
1da177e4 | 373 | { |
6b7487fc | 374 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
3160b097 BB |
375 | #ifdef CONFIG_PPC_FSL_BOOK3E |
376 | per_cpu(next_tlbcam_idx, id) | |
377 | = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | |
378 | #endif | |
1da177e4 LT |
379 | } |
380 | ||
1da177e4 LT |
381 | void __init smp_prepare_cpus(unsigned int max_cpus) |
382 | { | |
383 | unsigned int cpu; | |
384 | ||
385 | DBG("smp_prepare_cpus\n"); | |
386 | ||
387 | /* | |
388 | * setup_cpu may need to be called on the boot cpu. We havent | |
389 | * spun any cpus up but lets be paranoid. | |
390 | */ | |
391 | BUG_ON(boot_cpuid != smp_processor_id()); | |
392 | ||
393 | /* Fixup boot cpu */ | |
394 | smp_store_cpu_info(boot_cpuid); | |
395 | cpu_callin_map[boot_cpuid] = 1; | |
396 | ||
cc1ba8ea AB |
397 | for_each_possible_cpu(cpu) { |
398 | zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
399 | GFP_KERNEL, cpu_to_node(cpu)); | |
400 | zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), | |
401 | GFP_KERNEL, cpu_to_node(cpu)); | |
2fabf084 NA |
402 | /* |
403 | * numa_node_id() works after this. | |
404 | */ | |
bc3c4327 LZ |
405 | if (cpu_present(cpu)) { |
406 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); | |
407 | set_cpu_numa_mem(cpu, | |
408 | local_memory_node(numa_cpu_lookup_table[cpu])); | |
409 | } | |
cc1ba8ea AB |
410 | } |
411 | ||
412 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); | |
413 | cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); | |
414 | ||
dfee0efe CG |
415 | if (smp_ops && smp_ops->probe) |
416 | smp_ops->probe(); | |
1da177e4 LT |
417 | } |
418 | ||
cad5cef6 | 419 | void smp_prepare_boot_cpu(void) |
1da177e4 LT |
420 | { |
421 | BUG_ON(smp_processor_id() != boot_cpuid); | |
5ad57078 | 422 | #ifdef CONFIG_PPC64 |
1da177e4 | 423 | paca[boot_cpuid].__current = current; |
5ad57078 | 424 | #endif |
8c272261 | 425 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); |
b5e2fc1c | 426 | current_set[boot_cpuid] = task_thread_info(current); |
1da177e4 LT |
427 | } |
428 | ||
429 | #ifdef CONFIG_HOTPLUG_CPU | |
1da177e4 LT |
430 | |
431 | int generic_cpu_disable(void) | |
432 | { | |
433 | unsigned int cpu = smp_processor_id(); | |
434 | ||
435 | if (cpu == boot_cpuid) | |
436 | return -EBUSY; | |
437 | ||
ea0f1cab | 438 | set_cpu_online(cpu, false); |
799d6046 | 439 | #ifdef CONFIG_PPC64 |
a7f290da | 440 | vdso_data->processorCount--; |
094fe2e7 | 441 | #endif |
1c91cc57 | 442 | migrate_irqs(); |
1da177e4 LT |
443 | return 0; |
444 | } | |
445 | ||
1da177e4 LT |
446 | void generic_cpu_die(unsigned int cpu) |
447 | { | |
448 | int i; | |
449 | ||
450 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 451 | smp_rmb(); |
2f4f1f81 | 452 | if (is_cpu_dead(cpu)) |
1da177e4 LT |
453 | return; |
454 | msleep(100); | |
455 | } | |
456 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
457 | } | |
458 | ||
105765f4 BH |
459 | void generic_set_cpu_dead(unsigned int cpu) |
460 | { | |
461 | per_cpu(cpu_state, cpu) = CPU_DEAD; | |
462 | } | |
fb82b839 | 463 | |
ae5cab47 ZC |
464 | /* |
465 | * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise | |
466 | * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), | |
467 | * which makes the delay in generic_cpu_die() not happen. | |
468 | */ | |
469 | void generic_set_cpu_up(unsigned int cpu) | |
470 | { | |
471 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
472 | } | |
473 | ||
fb82b839 BH |
474 | int generic_check_cpu_restart(unsigned int cpu) |
475 | { | |
476 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; | |
477 | } | |
512691d4 | 478 | |
2f4f1f81 | 479 | int is_cpu_dead(unsigned int cpu) |
480 | { | |
481 | return per_cpu(cpu_state, cpu) == CPU_DEAD; | |
482 | } | |
483 | ||
441c19c8 | 484 | static bool secondaries_inhibited(void) |
512691d4 | 485 | { |
441c19c8 | 486 | return kvm_hv_mode_active(); |
512691d4 PM |
487 | } |
488 | ||
489 | #else /* HOTPLUG_CPU */ | |
490 | ||
491 | #define secondaries_inhibited() 0 | |
492 | ||
1da177e4 LT |
493 | #endif |
494 | ||
17e32eac | 495 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
c56e5853 | 496 | { |
17e32eac | 497 | struct thread_info *ti = task_thread_info(idle); |
c56e5853 BH |
498 | |
499 | #ifdef CONFIG_PPC64 | |
17e32eac | 500 | paca[cpu].__current = idle; |
c56e5853 BH |
501 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; |
502 | #endif | |
503 | ti->cpu = cpu; | |
17e32eac | 504 | secondary_ti = current_set[cpu] = ti; |
c56e5853 BH |
505 | } |
506 | ||
061d19f2 | 507 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1da177e4 | 508 | { |
c56e5853 | 509 | int rc, c; |
1da177e4 | 510 | |
512691d4 PM |
511 | /* |
512 | * Don't allow secondary threads to come online if inhibited | |
513 | */ | |
514 | if (threads_per_core > 1 && secondaries_inhibited() && | |
6f5e40a3 | 515 | cpu_thread_in_subcore(cpu)) |
512691d4 PM |
516 | return -EBUSY; |
517 | ||
8cffc6ac BH |
518 | if (smp_ops == NULL || |
519 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
520 | return -EINVAL; |
521 | ||
17e32eac | 522 | cpu_idle_thread_init(cpu, tidle); |
c560bbce | 523 | |
1da177e4 LT |
524 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
525 | * hotplug | |
526 | */ | |
527 | cpu_callin_map[cpu] = 0; | |
528 | ||
529 | /* The information for processor bringup must | |
530 | * be written out to main store before we release | |
531 | * the processor. | |
532 | */ | |
0d8d4d42 | 533 | smp_mb(); |
1da177e4 LT |
534 | |
535 | /* wake up cpus */ | |
536 | DBG("smp: kicking cpu %d\n", cpu); | |
de300974 ME |
537 | rc = smp_ops->kick_cpu(cpu); |
538 | if (rc) { | |
539 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | |
540 | return rc; | |
541 | } | |
1da177e4 LT |
542 | |
543 | /* | |
544 | * wait to see if the cpu made a callin (is actually up). | |
545 | * use this value that I found through experimentation. | |
546 | * -- Cort | |
547 | */ | |
548 | if (system_state < SYSTEM_RUNNING) | |
ee0339f2 | 549 | for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
1da177e4 LT |
550 | udelay(100); |
551 | #ifdef CONFIG_HOTPLUG_CPU | |
552 | else | |
553 | /* | |
554 | * CPUs can take much longer to come up in the | |
555 | * hotplug case. Wait five seconds. | |
556 | */ | |
67764263 GS |
557 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) |
558 | msleep(1); | |
1da177e4 LT |
559 | #endif |
560 | ||
561 | if (!cpu_callin_map[cpu]) { | |
6685a477 | 562 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
1da177e4 LT |
563 | return -ENOENT; |
564 | } | |
565 | ||
6685a477 | 566 | DBG("Processor %u found.\n", cpu); |
1da177e4 LT |
567 | |
568 | if (smp_ops->give_timebase) | |
569 | smp_ops->give_timebase(); | |
570 | ||
875ebe94 | 571 | /* Wait until cpu puts itself in the online & active maps */ |
e9d867a6 | 572 | while (!cpu_online(cpu)) |
1da177e4 LT |
573 | cpu_relax(); |
574 | ||
575 | return 0; | |
576 | } | |
577 | ||
e9efed3b NL |
578 | /* Return the value of the reg property corresponding to the given |
579 | * logical cpu. | |
580 | */ | |
581 | int cpu_to_core_id(int cpu) | |
582 | { | |
583 | struct device_node *np; | |
f8a1883a | 584 | const __be32 *reg; |
e9efed3b NL |
585 | int id = -1; |
586 | ||
587 | np = of_get_cpu_node(cpu, NULL); | |
588 | if (!np) | |
589 | goto out; | |
590 | ||
591 | reg = of_get_property(np, "reg", NULL); | |
592 | if (!reg) | |
593 | goto out; | |
594 | ||
f8a1883a | 595 | id = be32_to_cpup(reg); |
e9efed3b NL |
596 | out: |
597 | of_node_put(np); | |
598 | return id; | |
599 | } | |
f8ab4810 | 600 | EXPORT_SYMBOL_GPL(cpu_to_core_id); |
e9efed3b | 601 | |
99d86705 VS |
602 | /* Helper routines for cpu to core mapping */ |
603 | int cpu_core_index_of_thread(int cpu) | |
604 | { | |
605 | return cpu >> threads_shift; | |
606 | } | |
607 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | |
608 | ||
609 | int cpu_first_thread_of_core(int core) | |
610 | { | |
611 | return core << threads_shift; | |
612 | } | |
613 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | |
614 | ||
256f2d4b PM |
615 | static void traverse_siblings_chip_id(int cpu, bool add, int chipid) |
616 | { | |
617 | const struct cpumask *mask; | |
618 | struct device_node *np; | |
619 | int i, plen; | |
620 | const __be32 *prop; | |
621 | ||
622 | mask = add ? cpu_online_mask : cpu_present_mask; | |
623 | for_each_cpu(i, mask) { | |
624 | np = of_get_cpu_node(i, NULL); | |
625 | if (!np) | |
626 | continue; | |
627 | prop = of_get_property(np, "ibm,chip-id", &plen); | |
628 | if (prop && plen == sizeof(int) && | |
629 | of_read_number(prop, 1) == chipid) { | |
630 | if (add) { | |
631 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
632 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
633 | } else { | |
634 | cpumask_clear_cpu(cpu, cpu_core_mask(i)); | |
635 | cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
636 | } | |
637 | } | |
638 | of_node_put(np); | |
639 | } | |
640 | } | |
641 | ||
104699c0 | 642 | /* Must be called when no change can occur to cpu_present_mask, |
440a0857 NL |
643 | * i.e. during cpu online or offline. |
644 | */ | |
645 | static struct device_node *cpu_to_l2cache(int cpu) | |
646 | { | |
647 | struct device_node *np; | |
b2ea25b9 | 648 | struct device_node *cache; |
440a0857 NL |
649 | |
650 | if (!cpu_present(cpu)) | |
651 | return NULL; | |
652 | ||
653 | np = of_get_cpu_node(cpu, NULL); | |
654 | if (np == NULL) | |
655 | return NULL; | |
656 | ||
b2ea25b9 NL |
657 | cache = of_find_next_cache_node(np); |
658 | ||
440a0857 NL |
659 | of_node_put(np); |
660 | ||
b2ea25b9 | 661 | return cache; |
440a0857 | 662 | } |
1da177e4 | 663 | |
a8a5356c PM |
664 | static void traverse_core_siblings(int cpu, bool add) |
665 | { | |
256f2d4b | 666 | struct device_node *l2_cache, *np; |
a8a5356c | 667 | const struct cpumask *mask; |
256f2d4b PM |
668 | int i, chip, plen; |
669 | const __be32 *prop; | |
670 | ||
671 | /* First see if we have ibm,chip-id properties in cpu nodes */ | |
672 | np = of_get_cpu_node(cpu, NULL); | |
673 | if (np) { | |
674 | chip = -1; | |
675 | prop = of_get_property(np, "ibm,chip-id", &plen); | |
676 | if (prop && plen == sizeof(int)) | |
677 | chip = of_read_number(prop, 1); | |
678 | of_node_put(np); | |
679 | if (chip >= 0) { | |
680 | traverse_siblings_chip_id(cpu, add, chip); | |
681 | return; | |
682 | } | |
683 | } | |
a8a5356c PM |
684 | |
685 | l2_cache = cpu_to_l2cache(cpu); | |
686 | mask = add ? cpu_online_mask : cpu_present_mask; | |
687 | for_each_cpu(i, mask) { | |
256f2d4b | 688 | np = cpu_to_l2cache(i); |
a8a5356c PM |
689 | if (!np) |
690 | continue; | |
691 | if (np == l2_cache) { | |
692 | if (add) { | |
693 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
694 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
695 | } else { | |
696 | cpumask_clear_cpu(cpu, cpu_core_mask(i)); | |
697 | cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
698 | } | |
699 | } | |
700 | of_node_put(np); | |
701 | } | |
702 | of_node_put(l2_cache); | |
703 | } | |
704 | ||
1da177e4 | 705 | /* Activate a secondary processor. */ |
061d19f2 | 706 | void start_secondary(void *unused) |
1da177e4 LT |
707 | { |
708 | unsigned int cpu = smp_processor_id(); | |
e2075f79 | 709 | int i, base; |
1da177e4 | 710 | |
f1f10076 | 711 | mmgrab(&init_mm); |
1da177e4 LT |
712 | current->active_mm = &init_mm; |
713 | ||
714 | smp_store_cpu_info(cpu); | |
5ad57078 | 715 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 716 | preempt_disable(); |
1be6f10f | 717 | cpu_callin_map[cpu] = 1; |
1da177e4 | 718 | |
757cbd46 KG |
719 | if (smp_ops->setup_cpu) |
720 | smp_ops->setup_cpu(cpu); | |
1da177e4 LT |
721 | if (smp_ops->take_timebase) |
722 | smp_ops->take_timebase(); | |
723 | ||
d831d0b8 TB |
724 | secondary_cpu_time_init(); |
725 | ||
aeeafbfa BH |
726 | #ifdef CONFIG_PPC64 |
727 | if (system_state == SYSTEM_RUNNING) | |
728 | vdso_data->processorCount++; | |
18ad51dd AB |
729 | |
730 | vdso_getcpu_init(); | |
aeeafbfa | 731 | #endif |
e2075f79 | 732 | /* Update sibling maps */ |
99d86705 | 733 | base = cpu_first_thread_sibling(cpu); |
e2075f79 | 734 | for (i = 0; i < threads_per_core; i++) { |
cce606fe | 735 | if (cpu_is_offline(base + i) && (cpu != base + i)) |
e2075f79 | 736 | continue; |
cc1ba8ea AB |
737 | cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); |
738 | cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); | |
440a0857 NL |
739 | |
740 | /* cpu_core_map should be a superset of | |
741 | * cpu_sibling_map even if we don't have cache | |
742 | * information, so update the former here, too. | |
743 | */ | |
cc1ba8ea AB |
744 | cpumask_set_cpu(cpu, cpu_core_mask(base + i)); |
745 | cpumask_set_cpu(base + i, cpu_core_mask(cpu)); | |
e2075f79 | 746 | } |
a8a5356c | 747 | traverse_core_siblings(cpu, true); |
1da177e4 | 748 | |
bc3c4327 LZ |
749 | set_numa_node(numa_cpu_lookup_table[cpu]); |
750 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | |
751 | ||
cce606fe LZ |
752 | smp_wmb(); |
753 | notify_cpu_starting(cpu); | |
754 | set_cpu_online(cpu, true); | |
755 | ||
1da177e4 LT |
756 | local_irq_enable(); |
757 | ||
fc6d73d6 | 758 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
fa3f82c8 BH |
759 | |
760 | BUG(); | |
1da177e4 LT |
761 | } |
762 | ||
763 | int setup_profiling_timer(unsigned int multiplier) | |
764 | { | |
765 | return 0; | |
766 | } | |
767 | ||
607b45e9 VG |
768 | #ifdef CONFIG_SCHED_SMT |
769 | /* cpumask of CPUs with asymetric SMT dependancy */ | |
b6220ad6 | 770 | static int powerpc_smt_flags(void) |
607b45e9 | 771 | { |
5d4dfddd | 772 | int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; |
607b45e9 VG |
773 | |
774 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | |
775 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | |
776 | flags |= SD_ASYM_PACKING; | |
777 | } | |
778 | return flags; | |
779 | } | |
780 | #endif | |
781 | ||
782 | static struct sched_domain_topology_level powerpc_topology[] = { | |
783 | #ifdef CONFIG_SCHED_SMT | |
784 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, | |
785 | #endif | |
786 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | |
787 | { NULL, }, | |
788 | }; | |
789 | ||
1da177e4 LT |
790 | void __init smp_cpus_done(unsigned int max_cpus) |
791 | { | |
bfb9126d | 792 | cpumask_var_t old_mask; |
1da177e4 LT |
793 | |
794 | /* We want the setup_cpu() here to be called from CPU 0, but our | |
795 | * init thread may have been "borrowed" by another CPU in the meantime | |
796 | * se we pin us down to CPU 0 for a short while | |
797 | */ | |
bfb9126d | 798 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); |
0c98d344 | 799 | cpumask_copy(old_mask, ¤t->cpus_allowed); |
21dbeb91 | 800 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); |
1da177e4 | 801 | |
757cbd46 | 802 | if (smp_ops && smp_ops->setup_cpu) |
8cffc6ac | 803 | smp_ops->setup_cpu(boot_cpuid); |
1da177e4 | 804 | |
bfb9126d AB |
805 | set_cpus_allowed_ptr(current, old_mask); |
806 | ||
807 | free_cpumask_var(old_mask); | |
4b703a23 | 808 | |
d7294445 BH |
809 | if (smp_ops && smp_ops->bringup_done) |
810 | smp_ops->bringup_done(); | |
811 | ||
4b703a23 | 812 | dump_numa_cpu_topology(); |
d7294445 | 813 | |
607b45e9 | 814 | set_sched_topology(powerpc_topology); |
1da177e4 | 815 | |
e1f0ece1 MN |
816 | } |
817 | ||
1da177e4 LT |
818 | #ifdef CONFIG_HOTPLUG_CPU |
819 | int __cpu_disable(void) | |
820 | { | |
e2075f79 NL |
821 | int cpu = smp_processor_id(); |
822 | int base, i; | |
823 | int err; | |
1da177e4 | 824 | |
e2075f79 NL |
825 | if (!smp_ops->cpu_disable) |
826 | return -ENOSYS; | |
827 | ||
828 | err = smp_ops->cpu_disable(); | |
829 | if (err) | |
830 | return err; | |
831 | ||
832 | /* Update sibling maps */ | |
99d86705 | 833 | base = cpu_first_thread_sibling(cpu); |
19ab58d1 | 834 | for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { |
cc1ba8ea AB |
835 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); |
836 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | |
837 | cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); | |
838 | cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); | |
440a0857 | 839 | } |
a8a5356c | 840 | traverse_core_siblings(cpu, false); |
e2075f79 NL |
841 | |
842 | return 0; | |
1da177e4 LT |
843 | } |
844 | ||
845 | void __cpu_die(unsigned int cpu) | |
846 | { | |
847 | if (smp_ops->cpu_die) | |
848 | smp_ops->cpu_die(cpu); | |
849 | } | |
d0174c72 | 850 | |
abb17f9c MM |
851 | void cpu_die(void) |
852 | { | |
853 | if (ppc_md.cpu_die) | |
854 | ppc_md.cpu_die(); | |
fa3f82c8 BH |
855 | |
856 | /* If we return, we re-enter start_secondary */ | |
857 | start_secondary_resume(); | |
abb17f9c | 858 | } |
fa3f82c8 | 859 | |
1da177e4 | 860 | #endif |