]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
1da177e4 | 20 | #include <linux/kernel.h> |
4b16f8e2 | 21 | #include <linux/export.h> |
68e21be2 | 22 | #include <linux/sched/mm.h> |
105ab3d8 | 23 | #include <linux/sched/topology.h> |
1da177e4 LT |
24 | #include <linux/smp.h> |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/err.h> | |
8a25a2fd | 31 | #include <linux/device.h> |
1da177e4 LT |
32 | #include <linux/cpu.h> |
33 | #include <linux/notifier.h> | |
4b703a23 | 34 | #include <linux/topology.h> |
665e87ff | 35 | #include <linux/profile.h> |
4e287e65 | 36 | #include <linux/processor.h> |
1da177e4 LT |
37 | |
38 | #include <asm/ptrace.h> | |
60063497 | 39 | #include <linux/atomic.h> |
1da177e4 | 40 | #include <asm/irq.h> |
1b67bee1 | 41 | #include <asm/hw_irq.h> |
441c19c8 | 42 | #include <asm/kvm_ppc.h> |
b866cc21 | 43 | #include <asm/dbell.h> |
1da177e4 LT |
44 | #include <asm/page.h> |
45 | #include <asm/pgtable.h> | |
46 | #include <asm/prom.h> | |
47 | #include <asm/smp.h> | |
1da177e4 LT |
48 | #include <asm/time.h> |
49 | #include <asm/machdep.h> | |
e2075f79 | 50 | #include <asm/cputhreads.h> |
1da177e4 | 51 | #include <asm/cputable.h> |
bbeb3f4c | 52 | #include <asm/mpic.h> |
a7f290da | 53 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
54 | #ifdef CONFIG_PPC64 |
55 | #include <asm/paca.h> | |
56 | #endif | |
18ad51dd | 57 | #include <asm/vdso.h> |
ae3a197e | 58 | #include <asm/debug.h> |
1217d34b | 59 | #include <asm/kexec.h> |
42f5b4ca | 60 | #include <asm/asm-prototypes.h> |
b92a226e | 61 | #include <asm/cpu_has_feature.h> |
5ad57078 | 62 | |
1da177e4 | 63 | #ifdef DEBUG |
f9e4ec57 | 64 | #include <asm/udbg.h> |
1da177e4 LT |
65 | #define DBG(fmt...) udbg_printf(fmt) |
66 | #else | |
67 | #define DBG(fmt...) | |
68 | #endif | |
69 | ||
c56e5853 | 70 | #ifdef CONFIG_HOTPLUG_CPU |
fb82b839 BH |
71 | /* State of each CPU during hotplug phases */ |
72 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
c56e5853 BH |
73 | #endif |
74 | ||
f9e4ec57 ME |
75 | struct thread_info *secondary_ti; |
76 | ||
cc1ba8ea AB |
77 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
78 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | |
1da177e4 | 79 | |
d5a7430d | 80 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
440a0857 | 81 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
1da177e4 | 82 | |
5ad57078 | 83 | /* SMP operations for this machine */ |
1da177e4 LT |
84 | struct smp_ops_t *smp_ops; |
85 | ||
7ccbe504 BH |
86 | /* Can't be static due to PowerMac hackery */ |
87 | volatile unsigned int cpu_callin_map[NR_CPUS]; | |
1da177e4 | 88 | |
1da177e4 LT |
89 | int smt_enabled_at_boot = 1; |
90 | ||
3cd85250 AF |
91 | /* |
92 | * Returns 1 if the specified cpu should be brought up during boot. | |
93 | * Used to inhibit booting threads if they've been disabled or | |
94 | * limited on the command line | |
95 | */ | |
96 | int smp_generic_cpu_bootable(unsigned int nr) | |
97 | { | |
98 | /* Special case - we inhibit secondary thread startup | |
99 | * during boot if the user requests it. | |
100 | */ | |
a8fcfc19 | 101 | if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { |
3cd85250 AF |
102 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) |
103 | return 0; | |
104 | if (smt_enabled_at_boot | |
105 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) | |
106 | return 0; | |
107 | } | |
108 | ||
109 | return 1; | |
110 | } | |
111 | ||
112 | ||
5ad57078 | 113 | #ifdef CONFIG_PPC64 |
cad5cef6 | 114 | int smp_generic_kick_cpu(int nr) |
1da177e4 | 115 | { |
c642af9c | 116 | if (nr < 0 || nr >= nr_cpu_ids) |
f8d0d5dc | 117 | return -EINVAL; |
1da177e4 LT |
118 | |
119 | /* | |
120 | * The processor is currently spinning, waiting for the | |
121 | * cpu_start field to become non-zero After we set cpu_start, | |
122 | * the processor will continue on to secondary_start | |
123 | */ | |
fb82b839 BH |
124 | if (!paca[nr].cpu_start) { |
125 | paca[nr].cpu_start = 1; | |
126 | smp_mb(); | |
127 | return 0; | |
128 | } | |
129 | ||
130 | #ifdef CONFIG_HOTPLUG_CPU | |
131 | /* | |
132 | * Ok it's not there, so it might be soft-unplugged, let's | |
133 | * try to bring it back | |
134 | */ | |
ae5cab47 | 135 | generic_set_cpu_up(nr); |
fb82b839 BH |
136 | smp_wmb(); |
137 | smp_send_reschedule(nr); | |
138 | #endif /* CONFIG_HOTPLUG_CPU */ | |
de300974 ME |
139 | |
140 | return 0; | |
1da177e4 | 141 | } |
fb82b839 | 142 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 143 | |
25ddd738 MM |
144 | static irqreturn_t call_function_action(int irq, void *data) |
145 | { | |
146 | generic_smp_call_function_interrupt(); | |
147 | return IRQ_HANDLED; | |
148 | } | |
149 | ||
150 | static irqreturn_t reschedule_action(int irq, void *data) | |
151 | { | |
184748cc | 152 | scheduler_ipi(); |
25ddd738 MM |
153 | return IRQ_HANDLED; |
154 | } | |
155 | ||
1b67bee1 | 156 | static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) |
25ddd738 | 157 | { |
1b67bee1 | 158 | tick_broadcast_ipi_handler(); |
25ddd738 MM |
159 | return IRQ_HANDLED; |
160 | } | |
161 | ||
ddd703ca NP |
162 | #ifdef CONFIG_NMI_IPI |
163 | static irqreturn_t nmi_ipi_action(int irq, void *data) | |
25ddd738 | 164 | { |
ddd703ca | 165 | smp_handle_nmi_ipi(get_irq_regs()); |
25ddd738 MM |
166 | return IRQ_HANDLED; |
167 | } | |
ddd703ca | 168 | #endif |
25ddd738 MM |
169 | |
170 | static irq_handler_t smp_ipi_action[] = { | |
171 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
172 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
1b67bee1 | 173 | [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, |
ddd703ca NP |
174 | #ifdef CONFIG_NMI_IPI |
175 | [PPC_MSG_NMI_IPI] = nmi_ipi_action, | |
176 | #endif | |
25ddd738 MM |
177 | }; |
178 | ||
ddd703ca NP |
179 | /* |
180 | * The NMI IPI is a fallback and not truly non-maskable. It is simpler | |
181 | * than going through the call function infrastructure, and strongly | |
182 | * serialized, so it is more appropriate for debugging. | |
183 | */ | |
25ddd738 MM |
184 | const char *smp_ipi_name[] = { |
185 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
186 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
1b67bee1 | 187 | [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", |
ddd703ca | 188 | [PPC_MSG_NMI_IPI] = "nmi ipi", |
25ddd738 MM |
189 | }; |
190 | ||
191 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
192 | int smp_request_message_ipi(int virq, int msg) | |
193 | { | |
194 | int err; | |
195 | ||
ddd703ca | 196 | if (msg < 0 || msg > PPC_MSG_NMI_IPI) |
25ddd738 | 197 | return -EINVAL; |
ddd703ca NP |
198 | #ifndef CONFIG_NMI_IPI |
199 | if (msg == PPC_MSG_NMI_IPI) | |
25ddd738 | 200 | return 1; |
25ddd738 | 201 | #endif |
ddd703ca | 202 | |
3b5e16d7 | 203 | err = request_irq(virq, smp_ipi_action[msg], |
e6651de9 | 204 | IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, |
b0d436c7 | 205 | smp_ipi_name[msg], NULL); |
25ddd738 MM |
206 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", |
207 | virq, smp_ipi_name[msg], err); | |
208 | ||
209 | return err; | |
210 | } | |
211 | ||
1ece355b | 212 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
23d72bfd | 213 | struct cpu_messages { |
bd7f561f | 214 | long messages; /* current messages */ |
23d72bfd MM |
215 | }; |
216 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
217 | ||
31639c77 | 218 | void smp_muxed_ipi_set_message(int cpu, int msg) |
23d72bfd MM |
219 | { |
220 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
71454272 | 221 | char *message = (char *)&info->messages; |
23d72bfd | 222 | |
9fb1b36c PM |
223 | /* |
224 | * Order previous accesses before accesses in the IPI handler. | |
225 | */ | |
226 | smp_mb(); | |
71454272 | 227 | message[msg] = 1; |
31639c77 SW |
228 | } |
229 | ||
230 | void smp_muxed_ipi_message_pass(int cpu, int msg) | |
231 | { | |
31639c77 | 232 | smp_muxed_ipi_set_message(cpu, msg); |
b866cc21 | 233 | |
9fb1b36c PM |
234 | /* |
235 | * cause_ipi functions are required to include a full barrier | |
236 | * before doing whatever causes the IPI. | |
237 | */ | |
b866cc21 | 238 | smp_ops->cause_ipi(cpu); |
23d72bfd MM |
239 | } |
240 | ||
0654de1c | 241 | #ifdef __BIG_ENDIAN__ |
bd7f561f | 242 | #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) |
0654de1c | 243 | #else |
bd7f561f | 244 | #define IPI_MESSAGE(A) (1uL << (8 * (A))) |
0654de1c AB |
245 | #endif |
246 | ||
23d72bfd MM |
247 | irqreturn_t smp_ipi_demux(void) |
248 | { | |
23d72bfd | 249 | mb(); /* order any irq clear */ |
71454272 | 250 | |
b87ac021 NP |
251 | return smp_ipi_demux_relaxed(); |
252 | } | |
253 | ||
254 | /* sync-free variant. Callers should ensure synchronization */ | |
255 | irqreturn_t smp_ipi_demux_relaxed(void) | |
23d72bfd | 256 | { |
b866cc21 | 257 | struct cpu_messages *info; |
bd7f561f | 258 | unsigned long all; |
23d72bfd | 259 | |
b866cc21 | 260 | info = this_cpu_ptr(&ipi_message); |
71454272 | 261 | do { |
9fb1b36c | 262 | all = xchg(&info->messages, 0); |
e17769eb SW |
263 | #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
264 | /* | |
265 | * Must check for PPC_MSG_RM_HOST_ACTION messages | |
266 | * before PPC_MSG_CALL_FUNCTION messages because when | |
267 | * a VM is destroyed, we call kick_all_cpus_sync() | |
268 | * to ensure that any pending PPC_MSG_RM_HOST_ACTION | |
269 | * messages have completed before we free any VCPUs. | |
270 | */ | |
271 | if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) | |
272 | kvmppc_xics_ipi_action(); | |
273 | #endif | |
0654de1c | 274 | if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) |
23d72bfd | 275 | generic_smp_call_function_interrupt(); |
0654de1c | 276 | if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) |
880102e7 | 277 | scheduler_ipi(); |
1b67bee1 SB |
278 | if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) |
279 | tick_broadcast_ipi_handler(); | |
ddd703ca NP |
280 | #ifdef CONFIG_NMI_IPI |
281 | if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) | |
282 | nmi_ipi_action(0, NULL); | |
283 | #endif | |
71454272 MM |
284 | } while (info->messages); |
285 | ||
23d72bfd MM |
286 | return IRQ_HANDLED; |
287 | } | |
1ece355b | 288 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ |
23d72bfd | 289 | |
9ca980dc PM |
290 | static inline void do_message_pass(int cpu, int msg) |
291 | { | |
292 | if (smp_ops->message_pass) | |
293 | smp_ops->message_pass(cpu, msg); | |
294 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | |
295 | else | |
296 | smp_muxed_ipi_message_pass(cpu, msg); | |
297 | #endif | |
298 | } | |
299 | ||
1da177e4 LT |
300 | void smp_send_reschedule(int cpu) |
301 | { | |
8cffc6ac | 302 | if (likely(smp_ops)) |
9ca980dc | 303 | do_message_pass(cpu, PPC_MSG_RESCHEDULE); |
1da177e4 | 304 | } |
de56a948 | 305 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |
1da177e4 | 306 | |
b7d7a240 JA |
307 | void arch_send_call_function_single_ipi(int cpu) |
308 | { | |
402d9a1e | 309 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
310 | } |
311 | ||
f063ea02 | 312 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
b7d7a240 JA |
313 | { |
314 | unsigned int cpu; | |
315 | ||
f063ea02 | 316 | for_each_cpu(cpu, mask) |
9ca980dc | 317 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
318 | } |
319 | ||
ddd703ca NP |
320 | #ifdef CONFIG_NMI_IPI |
321 | ||
322 | /* | |
323 | * "NMI IPI" system. | |
324 | * | |
325 | * NMI IPIs may not be recoverable, so should not be used as ongoing part of | |
326 | * a running system. They can be used for crash, debug, halt/reboot, etc. | |
327 | * | |
328 | * NMI IPIs are globally single threaded. No more than one in progress at | |
329 | * any time. | |
330 | * | |
331 | * The IPI call waits with interrupts disabled until all targets enter the | |
332 | * NMI handler, then the call returns. | |
333 | * | |
334 | * No new NMI can be initiated until targets exit the handler. | |
335 | * | |
336 | * The IPI call may time out without all targets entering the NMI handler. | |
337 | * In that case, there is some logic to recover (and ignore subsequent | |
338 | * NMI interrupts that may eventually be raised), but the platform interrupt | |
339 | * handler may not be able to distinguish this from other exception causes, | |
340 | * which may cause a crash. | |
341 | */ | |
342 | ||
343 | static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); | |
344 | static struct cpumask nmi_ipi_pending_mask; | |
345 | static int nmi_ipi_busy_count = 0; | |
346 | static void (*nmi_ipi_function)(struct pt_regs *) = NULL; | |
347 | ||
348 | static void nmi_ipi_lock_start(unsigned long *flags) | |
349 | { | |
350 | raw_local_irq_save(*flags); | |
351 | hard_irq_disable(); | |
352 | while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { | |
353 | raw_local_irq_restore(*flags); | |
0459ddfd | 354 | spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); |
ddd703ca NP |
355 | raw_local_irq_save(*flags); |
356 | hard_irq_disable(); | |
357 | } | |
358 | } | |
359 | ||
360 | static void nmi_ipi_lock(void) | |
361 | { | |
362 | while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) | |
0459ddfd | 363 | spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); |
ddd703ca NP |
364 | } |
365 | ||
366 | static void nmi_ipi_unlock(void) | |
367 | { | |
368 | smp_mb(); | |
369 | WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); | |
370 | atomic_set(&__nmi_ipi_lock, 0); | |
371 | } | |
372 | ||
373 | static void nmi_ipi_unlock_end(unsigned long *flags) | |
374 | { | |
375 | nmi_ipi_unlock(); | |
376 | raw_local_irq_restore(*flags); | |
377 | } | |
378 | ||
379 | /* | |
380 | * Platform NMI handler calls this to ack | |
381 | */ | |
382 | int smp_handle_nmi_ipi(struct pt_regs *regs) | |
383 | { | |
384 | void (*fn)(struct pt_regs *); | |
385 | unsigned long flags; | |
386 | int me = raw_smp_processor_id(); | |
387 | int ret = 0; | |
388 | ||
389 | /* | |
390 | * Unexpected NMIs are possible here because the interrupt may not | |
391 | * be able to distinguish NMI IPIs from other types of NMIs, or | |
392 | * because the caller may have timed out. | |
393 | */ | |
394 | nmi_ipi_lock_start(&flags); | |
395 | if (!nmi_ipi_busy_count) | |
396 | goto out; | |
397 | if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) | |
398 | goto out; | |
399 | ||
400 | fn = nmi_ipi_function; | |
401 | if (!fn) | |
402 | goto out; | |
403 | ||
404 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
405 | nmi_ipi_busy_count++; | |
406 | nmi_ipi_unlock(); | |
407 | ||
408 | ret = 1; | |
409 | ||
410 | fn(regs); | |
411 | ||
412 | nmi_ipi_lock(); | |
413 | nmi_ipi_busy_count--; | |
414 | out: | |
415 | nmi_ipi_unlock_end(&flags); | |
416 | ||
417 | return ret; | |
418 | } | |
419 | ||
420 | static void do_smp_send_nmi_ipi(int cpu) | |
421 | { | |
c64af645 NP |
422 | if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) |
423 | return; | |
424 | ||
ddd703ca NP |
425 | if (cpu >= 0) { |
426 | do_message_pass(cpu, PPC_MSG_NMI_IPI); | |
427 | } else { | |
428 | int c; | |
429 | ||
430 | for_each_online_cpu(c) { | |
431 | if (c == raw_smp_processor_id()) | |
432 | continue; | |
433 | do_message_pass(c, PPC_MSG_NMI_IPI); | |
434 | } | |
435 | } | |
436 | } | |
437 | ||
2104180a NP |
438 | void smp_flush_nmi_ipi(u64 delay_us) |
439 | { | |
440 | unsigned long flags; | |
441 | ||
442 | nmi_ipi_lock_start(&flags); | |
443 | while (nmi_ipi_busy_count) { | |
444 | nmi_ipi_unlock_end(&flags); | |
445 | udelay(1); | |
446 | if (delay_us) { | |
447 | delay_us--; | |
448 | if (!delay_us) | |
449 | return; | |
450 | } | |
451 | nmi_ipi_lock_start(&flags); | |
452 | } | |
453 | nmi_ipi_unlock_end(&flags); | |
454 | } | |
455 | ||
ddd703ca NP |
456 | /* |
457 | * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. | |
458 | * - fn is the target callback function. | |
459 | * - delay_us > 0 is the delay before giving up waiting for targets to | |
460 | * enter the handler, == 0 specifies indefinite delay. | |
461 | */ | |
2104180a | 462 | int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) |
ddd703ca NP |
463 | { |
464 | unsigned long flags; | |
465 | int me = raw_smp_processor_id(); | |
466 | int ret = 1; | |
467 | ||
468 | BUG_ON(cpu == me); | |
469 | BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); | |
470 | ||
471 | if (unlikely(!smp_ops)) | |
472 | return 0; | |
473 | ||
474 | /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ | |
475 | nmi_ipi_lock_start(&flags); | |
476 | while (nmi_ipi_busy_count) { | |
477 | nmi_ipi_unlock_end(&flags); | |
0459ddfd | 478 | spin_until_cond(nmi_ipi_busy_count == 0); |
ddd703ca NP |
479 | nmi_ipi_lock_start(&flags); |
480 | } | |
481 | ||
482 | nmi_ipi_function = fn; | |
483 | ||
484 | if (cpu < 0) { | |
485 | /* ALL_OTHERS */ | |
486 | cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); | |
487 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
488 | } else { | |
489 | /* cpumask starts clear */ | |
490 | cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); | |
491 | } | |
492 | nmi_ipi_busy_count++; | |
493 | nmi_ipi_unlock(); | |
494 | ||
495 | do_smp_send_nmi_ipi(cpu); | |
496 | ||
497 | while (!cpumask_empty(&nmi_ipi_pending_mask)) { | |
498 | udelay(1); | |
499 | if (delay_us) { | |
500 | delay_us--; | |
501 | if (!delay_us) | |
502 | break; | |
503 | } | |
504 | } | |
505 | ||
506 | nmi_ipi_lock(); | |
507 | if (!cpumask_empty(&nmi_ipi_pending_mask)) { | |
508 | /* Could not gather all CPUs */ | |
509 | ret = 0; | |
510 | cpumask_clear(&nmi_ipi_pending_mask); | |
511 | } | |
512 | nmi_ipi_busy_count--; | |
513 | nmi_ipi_unlock_end(&flags); | |
514 | ||
515 | return ret; | |
516 | } | |
517 | #endif /* CONFIG_NMI_IPI */ | |
518 | ||
1b67bee1 SB |
519 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
520 | void tick_broadcast(const struct cpumask *mask) | |
521 | { | |
522 | unsigned int cpu; | |
523 | ||
524 | for_each_cpu(cpu, mask) | |
525 | do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); | |
526 | } | |
527 | #endif | |
528 | ||
ddd703ca NP |
529 | #ifdef CONFIG_DEBUGGER |
530 | void debugger_ipi_callback(struct pt_regs *regs) | |
1da177e4 | 531 | { |
ddd703ca NP |
532 | debugger_ipi(regs); |
533 | } | |
e0476371 | 534 | |
ddd703ca NP |
535 | void smp_send_debugger_break(void) |
536 | { | |
537 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); | |
1da177e4 LT |
538 | } |
539 | #endif | |
540 | ||
da665885 | 541 | #ifdef CONFIG_KEXEC_CORE |
cc532915 ME |
542 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
543 | { | |
ddd703ca | 544 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); |
cc532915 ME |
545 | } |
546 | #endif | |
547 | ||
1da177e4 LT |
548 | static void stop_this_cpu(void *dummy) |
549 | { | |
8389b37d VB |
550 | /* Remove this CPU */ |
551 | set_cpu_online(smp_processor_id(), false); | |
552 | ||
1da177e4 LT |
553 | local_irq_disable(); |
554 | while (1) | |
555 | ; | |
556 | } | |
557 | ||
8fd7675c SS |
558 | void smp_send_stop(void) |
559 | { | |
8691e5a8 | 560 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 LT |
561 | } |
562 | ||
1da177e4 LT |
563 | struct thread_info *current_set[NR_CPUS]; |
564 | ||
cad5cef6 | 565 | static void smp_store_cpu_info(int id) |
1da177e4 | 566 | { |
6b7487fc | 567 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
3160b097 BB |
568 | #ifdef CONFIG_PPC_FSL_BOOK3E |
569 | per_cpu(next_tlbcam_idx, id) | |
570 | = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | |
571 | #endif | |
1da177e4 LT |
572 | } |
573 | ||
1da177e4 LT |
574 | void __init smp_prepare_cpus(unsigned int max_cpus) |
575 | { | |
576 | unsigned int cpu; | |
577 | ||
578 | DBG("smp_prepare_cpus\n"); | |
579 | ||
580 | /* | |
581 | * setup_cpu may need to be called on the boot cpu. We havent | |
582 | * spun any cpus up but lets be paranoid. | |
583 | */ | |
584 | BUG_ON(boot_cpuid != smp_processor_id()); | |
585 | ||
586 | /* Fixup boot cpu */ | |
587 | smp_store_cpu_info(boot_cpuid); | |
588 | cpu_callin_map[boot_cpuid] = 1; | |
589 | ||
cc1ba8ea AB |
590 | for_each_possible_cpu(cpu) { |
591 | zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
592 | GFP_KERNEL, cpu_to_node(cpu)); | |
593 | zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), | |
594 | GFP_KERNEL, cpu_to_node(cpu)); | |
2fabf084 NA |
595 | /* |
596 | * numa_node_id() works after this. | |
597 | */ | |
bc3c4327 LZ |
598 | if (cpu_present(cpu)) { |
599 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); | |
600 | set_cpu_numa_mem(cpu, | |
601 | local_memory_node(numa_cpu_lookup_table[cpu])); | |
602 | } | |
cc1ba8ea AB |
603 | } |
604 | ||
605 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); | |
606 | cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); | |
607 | ||
dfee0efe CG |
608 | if (smp_ops && smp_ops->probe) |
609 | smp_ops->probe(); | |
1da177e4 LT |
610 | } |
611 | ||
cad5cef6 | 612 | void smp_prepare_boot_cpu(void) |
1da177e4 LT |
613 | { |
614 | BUG_ON(smp_processor_id() != boot_cpuid); | |
5ad57078 | 615 | #ifdef CONFIG_PPC64 |
1da177e4 | 616 | paca[boot_cpuid].__current = current; |
5ad57078 | 617 | #endif |
8c272261 | 618 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); |
b5e2fc1c | 619 | current_set[boot_cpuid] = task_thread_info(current); |
1da177e4 LT |
620 | } |
621 | ||
622 | #ifdef CONFIG_HOTPLUG_CPU | |
1da177e4 LT |
623 | |
624 | int generic_cpu_disable(void) | |
625 | { | |
626 | unsigned int cpu = smp_processor_id(); | |
627 | ||
628 | if (cpu == boot_cpuid) | |
629 | return -EBUSY; | |
630 | ||
ea0f1cab | 631 | set_cpu_online(cpu, false); |
799d6046 | 632 | #ifdef CONFIG_PPC64 |
a7f290da | 633 | vdso_data->processorCount--; |
094fe2e7 | 634 | #endif |
a978e139 BH |
635 | /* Update affinity of all IRQs previously aimed at this CPU */ |
636 | irq_migrate_all_off_this_cpu(); | |
637 | ||
687b8f24 ME |
638 | /* |
639 | * Depending on the details of the interrupt controller, it's possible | |
640 | * that one of the interrupts we just migrated away from this CPU is | |
641 | * actually already pending on this CPU. If we leave it in that state | |
642 | * the interrupt will never be EOI'ed, and will never fire again. So | |
643 | * temporarily enable interrupts here, to allow any pending interrupt to | |
644 | * be received (and EOI'ed), before we take this CPU offline. | |
645 | */ | |
a978e139 BH |
646 | local_irq_enable(); |
647 | mdelay(1); | |
648 | local_irq_disable(); | |
649 | ||
1da177e4 LT |
650 | return 0; |
651 | } | |
652 | ||
1da177e4 LT |
653 | void generic_cpu_die(unsigned int cpu) |
654 | { | |
655 | int i; | |
656 | ||
657 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 658 | smp_rmb(); |
2f4f1f81 | 659 | if (is_cpu_dead(cpu)) |
1da177e4 LT |
660 | return; |
661 | msleep(100); | |
662 | } | |
663 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
664 | } | |
665 | ||
105765f4 BH |
666 | void generic_set_cpu_dead(unsigned int cpu) |
667 | { | |
668 | per_cpu(cpu_state, cpu) = CPU_DEAD; | |
669 | } | |
fb82b839 | 670 | |
ae5cab47 ZC |
671 | /* |
672 | * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise | |
673 | * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), | |
674 | * which makes the delay in generic_cpu_die() not happen. | |
675 | */ | |
676 | void generic_set_cpu_up(unsigned int cpu) | |
677 | { | |
678 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
679 | } | |
680 | ||
fb82b839 BH |
681 | int generic_check_cpu_restart(unsigned int cpu) |
682 | { | |
683 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; | |
684 | } | |
512691d4 | 685 | |
2f4f1f81 | 686 | int is_cpu_dead(unsigned int cpu) |
687 | { | |
688 | return per_cpu(cpu_state, cpu) == CPU_DEAD; | |
689 | } | |
690 | ||
441c19c8 | 691 | static bool secondaries_inhibited(void) |
512691d4 | 692 | { |
441c19c8 | 693 | return kvm_hv_mode_active(); |
512691d4 PM |
694 | } |
695 | ||
696 | #else /* HOTPLUG_CPU */ | |
697 | ||
698 | #define secondaries_inhibited() 0 | |
699 | ||
1da177e4 LT |
700 | #endif |
701 | ||
17e32eac | 702 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
c56e5853 | 703 | { |
17e32eac | 704 | struct thread_info *ti = task_thread_info(idle); |
c56e5853 BH |
705 | |
706 | #ifdef CONFIG_PPC64 | |
17e32eac | 707 | paca[cpu].__current = idle; |
c56e5853 BH |
708 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; |
709 | #endif | |
710 | ti->cpu = cpu; | |
17e32eac | 711 | secondary_ti = current_set[cpu] = ti; |
c56e5853 BH |
712 | } |
713 | ||
061d19f2 | 714 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1da177e4 | 715 | { |
c56e5853 | 716 | int rc, c; |
1da177e4 | 717 | |
512691d4 PM |
718 | /* |
719 | * Don't allow secondary threads to come online if inhibited | |
720 | */ | |
721 | if (threads_per_core > 1 && secondaries_inhibited() && | |
6f5e40a3 | 722 | cpu_thread_in_subcore(cpu)) |
512691d4 PM |
723 | return -EBUSY; |
724 | ||
8cffc6ac BH |
725 | if (smp_ops == NULL || |
726 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
727 | return -EINVAL; |
728 | ||
17e32eac | 729 | cpu_idle_thread_init(cpu, tidle); |
c560bbce | 730 | |
14d4ae5c BH |
731 | /* |
732 | * The platform might need to allocate resources prior to bringing | |
733 | * up the CPU | |
734 | */ | |
735 | if (smp_ops->prepare_cpu) { | |
736 | rc = smp_ops->prepare_cpu(cpu); | |
737 | if (rc) | |
738 | return rc; | |
739 | } | |
740 | ||
1da177e4 LT |
741 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
742 | * hotplug | |
743 | */ | |
744 | cpu_callin_map[cpu] = 0; | |
745 | ||
746 | /* The information for processor bringup must | |
747 | * be written out to main store before we release | |
748 | * the processor. | |
749 | */ | |
0d8d4d42 | 750 | smp_mb(); |
1da177e4 LT |
751 | |
752 | /* wake up cpus */ | |
753 | DBG("smp: kicking cpu %d\n", cpu); | |
de300974 ME |
754 | rc = smp_ops->kick_cpu(cpu); |
755 | if (rc) { | |
756 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | |
757 | return rc; | |
758 | } | |
1da177e4 LT |
759 | |
760 | /* | |
761 | * wait to see if the cpu made a callin (is actually up). | |
762 | * use this value that I found through experimentation. | |
763 | * -- Cort | |
764 | */ | |
765 | if (system_state < SYSTEM_RUNNING) | |
ee0339f2 | 766 | for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
1da177e4 LT |
767 | udelay(100); |
768 | #ifdef CONFIG_HOTPLUG_CPU | |
769 | else | |
770 | /* | |
771 | * CPUs can take much longer to come up in the | |
772 | * hotplug case. Wait five seconds. | |
773 | */ | |
67764263 GS |
774 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) |
775 | msleep(1); | |
1da177e4 LT |
776 | #endif |
777 | ||
778 | if (!cpu_callin_map[cpu]) { | |
6685a477 | 779 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
1da177e4 LT |
780 | return -ENOENT; |
781 | } | |
782 | ||
6685a477 | 783 | DBG("Processor %u found.\n", cpu); |
1da177e4 LT |
784 | |
785 | if (smp_ops->give_timebase) | |
786 | smp_ops->give_timebase(); | |
787 | ||
875ebe94 | 788 | /* Wait until cpu puts itself in the online & active maps */ |
4e287e65 | 789 | spin_until_cond(cpu_online(cpu)); |
1da177e4 LT |
790 | |
791 | return 0; | |
792 | } | |
793 | ||
e9efed3b NL |
794 | /* Return the value of the reg property corresponding to the given |
795 | * logical cpu. | |
796 | */ | |
797 | int cpu_to_core_id(int cpu) | |
798 | { | |
799 | struct device_node *np; | |
f8a1883a | 800 | const __be32 *reg; |
e9efed3b NL |
801 | int id = -1; |
802 | ||
803 | np = of_get_cpu_node(cpu, NULL); | |
804 | if (!np) | |
805 | goto out; | |
806 | ||
807 | reg = of_get_property(np, "reg", NULL); | |
808 | if (!reg) | |
809 | goto out; | |
810 | ||
f8a1883a | 811 | id = be32_to_cpup(reg); |
e9efed3b NL |
812 | out: |
813 | of_node_put(np); | |
814 | return id; | |
815 | } | |
f8ab4810 | 816 | EXPORT_SYMBOL_GPL(cpu_to_core_id); |
e9efed3b | 817 | |
99d86705 VS |
818 | /* Helper routines for cpu to core mapping */ |
819 | int cpu_core_index_of_thread(int cpu) | |
820 | { | |
821 | return cpu >> threads_shift; | |
822 | } | |
823 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | |
824 | ||
825 | int cpu_first_thread_of_core(int core) | |
826 | { | |
827 | return core << threads_shift; | |
828 | } | |
829 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | |
830 | ||
256f2d4b PM |
831 | static void traverse_siblings_chip_id(int cpu, bool add, int chipid) |
832 | { | |
833 | const struct cpumask *mask; | |
834 | struct device_node *np; | |
835 | int i, plen; | |
836 | const __be32 *prop; | |
837 | ||
838 | mask = add ? cpu_online_mask : cpu_present_mask; | |
839 | for_each_cpu(i, mask) { | |
840 | np = of_get_cpu_node(i, NULL); | |
841 | if (!np) | |
842 | continue; | |
843 | prop = of_get_property(np, "ibm,chip-id", &plen); | |
844 | if (prop && plen == sizeof(int) && | |
845 | of_read_number(prop, 1) == chipid) { | |
846 | if (add) { | |
847 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
848 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
849 | } else { | |
850 | cpumask_clear_cpu(cpu, cpu_core_mask(i)); | |
851 | cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
852 | } | |
853 | } | |
854 | of_node_put(np); | |
855 | } | |
856 | } | |
857 | ||
104699c0 | 858 | /* Must be called when no change can occur to cpu_present_mask, |
440a0857 NL |
859 | * i.e. during cpu online or offline. |
860 | */ | |
861 | static struct device_node *cpu_to_l2cache(int cpu) | |
862 | { | |
863 | struct device_node *np; | |
b2ea25b9 | 864 | struct device_node *cache; |
440a0857 NL |
865 | |
866 | if (!cpu_present(cpu)) | |
867 | return NULL; | |
868 | ||
869 | np = of_get_cpu_node(cpu, NULL); | |
870 | if (np == NULL) | |
871 | return NULL; | |
872 | ||
b2ea25b9 NL |
873 | cache = of_find_next_cache_node(np); |
874 | ||
440a0857 NL |
875 | of_node_put(np); |
876 | ||
b2ea25b9 | 877 | return cache; |
440a0857 | 878 | } |
1da177e4 | 879 | |
a8a5356c PM |
880 | static void traverse_core_siblings(int cpu, bool add) |
881 | { | |
256f2d4b | 882 | struct device_node *l2_cache, *np; |
a8a5356c | 883 | const struct cpumask *mask; |
256f2d4b PM |
884 | int i, chip, plen; |
885 | const __be32 *prop; | |
886 | ||
887 | /* First see if we have ibm,chip-id properties in cpu nodes */ | |
888 | np = of_get_cpu_node(cpu, NULL); | |
889 | if (np) { | |
890 | chip = -1; | |
891 | prop = of_get_property(np, "ibm,chip-id", &plen); | |
892 | if (prop && plen == sizeof(int)) | |
893 | chip = of_read_number(prop, 1); | |
894 | of_node_put(np); | |
895 | if (chip >= 0) { | |
896 | traverse_siblings_chip_id(cpu, add, chip); | |
897 | return; | |
898 | } | |
899 | } | |
a8a5356c PM |
900 | |
901 | l2_cache = cpu_to_l2cache(cpu); | |
902 | mask = add ? cpu_online_mask : cpu_present_mask; | |
903 | for_each_cpu(i, mask) { | |
256f2d4b | 904 | np = cpu_to_l2cache(i); |
a8a5356c PM |
905 | if (!np) |
906 | continue; | |
907 | if (np == l2_cache) { | |
908 | if (add) { | |
909 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
910 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
911 | } else { | |
912 | cpumask_clear_cpu(cpu, cpu_core_mask(i)); | |
913 | cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
914 | } | |
915 | } | |
916 | of_node_put(np); | |
917 | } | |
918 | of_node_put(l2_cache); | |
919 | } | |
920 | ||
1da177e4 | 921 | /* Activate a secondary processor. */ |
061d19f2 | 922 | void start_secondary(void *unused) |
1da177e4 LT |
923 | { |
924 | unsigned int cpu = smp_processor_id(); | |
e2075f79 | 925 | int i, base; |
1da177e4 | 926 | |
f1f10076 | 927 | mmgrab(&init_mm); |
1da177e4 LT |
928 | current->active_mm = &init_mm; |
929 | ||
930 | smp_store_cpu_info(cpu); | |
5ad57078 | 931 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 932 | preempt_disable(); |
1be6f10f | 933 | cpu_callin_map[cpu] = 1; |
1da177e4 | 934 | |
757cbd46 KG |
935 | if (smp_ops->setup_cpu) |
936 | smp_ops->setup_cpu(cpu); | |
1da177e4 LT |
937 | if (smp_ops->take_timebase) |
938 | smp_ops->take_timebase(); | |
939 | ||
d831d0b8 TB |
940 | secondary_cpu_time_init(); |
941 | ||
aeeafbfa BH |
942 | #ifdef CONFIG_PPC64 |
943 | if (system_state == SYSTEM_RUNNING) | |
944 | vdso_data->processorCount++; | |
18ad51dd AB |
945 | |
946 | vdso_getcpu_init(); | |
aeeafbfa | 947 | #endif |
e2075f79 | 948 | /* Update sibling maps */ |
99d86705 | 949 | base = cpu_first_thread_sibling(cpu); |
e2075f79 | 950 | for (i = 0; i < threads_per_core; i++) { |
cce606fe | 951 | if (cpu_is_offline(base + i) && (cpu != base + i)) |
e2075f79 | 952 | continue; |
cc1ba8ea AB |
953 | cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); |
954 | cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); | |
440a0857 NL |
955 | |
956 | /* cpu_core_map should be a superset of | |
957 | * cpu_sibling_map even if we don't have cache | |
958 | * information, so update the former here, too. | |
959 | */ | |
cc1ba8ea AB |
960 | cpumask_set_cpu(cpu, cpu_core_mask(base + i)); |
961 | cpumask_set_cpu(base + i, cpu_core_mask(cpu)); | |
e2075f79 | 962 | } |
a8a5356c | 963 | traverse_core_siblings(cpu, true); |
1da177e4 | 964 | |
bc3c4327 LZ |
965 | set_numa_node(numa_cpu_lookup_table[cpu]); |
966 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | |
967 | ||
cce606fe LZ |
968 | smp_wmb(); |
969 | notify_cpu_starting(cpu); | |
970 | set_cpu_online(cpu, true); | |
971 | ||
1da177e4 LT |
972 | local_irq_enable(); |
973 | ||
fc6d73d6 | 974 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
fa3f82c8 BH |
975 | |
976 | BUG(); | |
1da177e4 LT |
977 | } |
978 | ||
979 | int setup_profiling_timer(unsigned int multiplier) | |
980 | { | |
981 | return 0; | |
982 | } | |
983 | ||
607b45e9 VG |
984 | #ifdef CONFIG_SCHED_SMT |
985 | /* cpumask of CPUs with asymetric SMT dependancy */ | |
b6220ad6 | 986 | static int powerpc_smt_flags(void) |
607b45e9 | 987 | { |
5d4dfddd | 988 | int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; |
607b45e9 VG |
989 | |
990 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | |
991 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | |
992 | flags |= SD_ASYM_PACKING; | |
993 | } | |
994 | return flags; | |
995 | } | |
996 | #endif | |
997 | ||
998 | static struct sched_domain_topology_level powerpc_topology[] = { | |
999 | #ifdef CONFIG_SCHED_SMT | |
1000 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, | |
1001 | #endif | |
1002 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | |
1003 | { NULL, }, | |
1004 | }; | |
1005 | ||
6d11b87d TG |
1006 | void __init smp_cpus_done(unsigned int max_cpus) |
1007 | { | |
1008 | /* | |
7b7622bb | 1009 | * We are running pinned to the boot CPU, see rest_init(). |
1da177e4 | 1010 | */ |
757cbd46 | 1011 | if (smp_ops && smp_ops->setup_cpu) |
7b7622bb | 1012 | smp_ops->setup_cpu(boot_cpuid); |
4b703a23 | 1013 | |
d7294445 BH |
1014 | if (smp_ops && smp_ops->bringup_done) |
1015 | smp_ops->bringup_done(); | |
1016 | ||
4b703a23 | 1017 | dump_numa_cpu_topology(); |
d7294445 | 1018 | |
607b45e9 | 1019 | set_sched_topology(powerpc_topology); |
e1f0ece1 MN |
1020 | } |
1021 | ||
1da177e4 LT |
1022 | #ifdef CONFIG_HOTPLUG_CPU |
1023 | int __cpu_disable(void) | |
1024 | { | |
e2075f79 NL |
1025 | int cpu = smp_processor_id(); |
1026 | int base, i; | |
1027 | int err; | |
1da177e4 | 1028 | |
e2075f79 NL |
1029 | if (!smp_ops->cpu_disable) |
1030 | return -ENOSYS; | |
1031 | ||
1032 | err = smp_ops->cpu_disable(); | |
1033 | if (err) | |
1034 | return err; | |
1035 | ||
1036 | /* Update sibling maps */ | |
99d86705 | 1037 | base = cpu_first_thread_sibling(cpu); |
19ab58d1 | 1038 | for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { |
cc1ba8ea AB |
1039 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); |
1040 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | |
1041 | cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); | |
1042 | cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); | |
440a0857 | 1043 | } |
a8a5356c | 1044 | traverse_core_siblings(cpu, false); |
e2075f79 NL |
1045 | |
1046 | return 0; | |
1da177e4 LT |
1047 | } |
1048 | ||
1049 | void __cpu_die(unsigned int cpu) | |
1050 | { | |
1051 | if (smp_ops->cpu_die) | |
1052 | smp_ops->cpu_die(cpu); | |
1053 | } | |
d0174c72 | 1054 | |
abb17f9c MM |
1055 | void cpu_die(void) |
1056 | { | |
1057 | if (ppc_md.cpu_die) | |
1058 | ppc_md.cpu_die(); | |
fa3f82c8 BH |
1059 | |
1060 | /* If we return, we re-enter start_secondary */ | |
1061 | start_secondary_resume(); | |
abb17f9c | 1062 | } |
fa3f82c8 | 1063 | |
1da177e4 | 1064 | #endif |