]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
1da177e4 LT |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/cache.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/sysdev.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/notifier.h> | |
4b703a23 | 33 | #include <linux/topology.h> |
1da177e4 LT |
34 | |
35 | #include <asm/ptrace.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <asm/irq.h> | |
38 | #include <asm/page.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/prom.h> | |
41 | #include <asm/smp.h> | |
1da177e4 LT |
42 | #include <asm/time.h> |
43 | #include <asm/machdep.h> | |
e2075f79 | 44 | #include <asm/cputhreads.h> |
1da177e4 LT |
45 | #include <asm/cputable.h> |
46 | #include <asm/system.h> | |
bbeb3f4c | 47 | #include <asm/mpic.h> |
a7f290da | 48 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
49 | #ifdef CONFIG_PPC64 |
50 | #include <asm/paca.h> | |
51 | #endif | |
52 | ||
1da177e4 | 53 | #ifdef DEBUG |
f9e4ec57 | 54 | #include <asm/udbg.h> |
1da177e4 LT |
55 | #define DBG(fmt...) udbg_printf(fmt) |
56 | #else | |
57 | #define DBG(fmt...) | |
58 | #endif | |
59 | ||
c56e5853 BH |
60 | |
61 | /* Store all idle threads, this can be reused instead of creating | |
62 | * a new thread. Also avoids complicated thread destroy functionality | |
63 | * for idle threads. | |
64 | */ | |
65 | #ifdef CONFIG_HOTPLUG_CPU | |
66 | /* | |
67 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
68 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
69 | */ | |
70 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
71 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
72 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | |
73 | #else | |
74 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |
75 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | |
76 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | |
77 | #endif | |
78 | ||
f9e4ec57 ME |
79 | struct thread_info *secondary_ti; |
80 | ||
cc1ba8ea AB |
81 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
82 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | |
1da177e4 | 83 | |
d5a7430d | 84 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
440a0857 | 85 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
1da177e4 | 86 | |
5ad57078 | 87 | /* SMP operations for this machine */ |
1da177e4 LT |
88 | struct smp_ops_t *smp_ops; |
89 | ||
7ccbe504 BH |
90 | /* Can't be static due to PowerMac hackery */ |
91 | volatile unsigned int cpu_callin_map[NR_CPUS]; | |
1da177e4 | 92 | |
1da177e4 LT |
93 | int smt_enabled_at_boot = 1; |
94 | ||
cc532915 ME |
95 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
96 | ||
5ad57078 | 97 | #ifdef CONFIG_PPC64 |
de300974 | 98 | int __devinit smp_generic_kick_cpu(int nr) |
1da177e4 LT |
99 | { |
100 | BUG_ON(nr < 0 || nr >= NR_CPUS); | |
101 | ||
102 | /* | |
103 | * The processor is currently spinning, waiting for the | |
104 | * cpu_start field to become non-zero After we set cpu_start, | |
105 | * the processor will continue on to secondary_start | |
106 | */ | |
107 | paca[nr].cpu_start = 1; | |
0d8d4d42 | 108 | smp_mb(); |
de300974 ME |
109 | |
110 | return 0; | |
1da177e4 | 111 | } |
5ad57078 | 112 | #endif |
1da177e4 | 113 | |
25ddd738 MM |
114 | static irqreturn_t call_function_action(int irq, void *data) |
115 | { | |
116 | generic_smp_call_function_interrupt(); | |
117 | return IRQ_HANDLED; | |
118 | } | |
119 | ||
120 | static irqreturn_t reschedule_action(int irq, void *data) | |
121 | { | |
122 | /* we just need the return path side effect of checking need_resched */ | |
123 | return IRQ_HANDLED; | |
124 | } | |
125 | ||
126 | static irqreturn_t call_function_single_action(int irq, void *data) | |
127 | { | |
128 | generic_smp_call_function_single_interrupt(); | |
129 | return IRQ_HANDLED; | |
130 | } | |
131 | ||
23d72bfd | 132 | irqreturn_t debug_ipi_action(int irq, void *data) |
25ddd738 | 133 | { |
23d72bfd MM |
134 | if (crash_ipi_function_ptr) { |
135 | crash_ipi_function_ptr(get_irq_regs()); | |
136 | return IRQ_HANDLED; | |
137 | } | |
138 | ||
139 | #ifdef CONFIG_DEBUGGER | |
140 | debugger_ipi(get_irq_regs()); | |
141 | #endif /* CONFIG_DEBUGGER */ | |
142 | ||
25ddd738 MM |
143 | return IRQ_HANDLED; |
144 | } | |
145 | ||
146 | static irq_handler_t smp_ipi_action[] = { | |
147 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
148 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
149 | [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, | |
150 | [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, | |
151 | }; | |
152 | ||
153 | const char *smp_ipi_name[] = { | |
154 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
155 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
156 | [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", | |
157 | [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", | |
158 | }; | |
159 | ||
160 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
161 | int smp_request_message_ipi(int virq, int msg) | |
162 | { | |
163 | int err; | |
164 | ||
165 | if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) { | |
166 | return -EINVAL; | |
167 | } | |
168 | #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC) | |
169 | if (msg == PPC_MSG_DEBUGGER_BREAK) { | |
170 | return 1; | |
171 | } | |
172 | #endif | |
173 | err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, | |
174 | smp_ipi_name[msg], 0); | |
175 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", | |
176 | virq, smp_ipi_name[msg], err); | |
177 | ||
178 | return err; | |
179 | } | |
180 | ||
23d72bfd MM |
181 | struct cpu_messages { |
182 | unsigned long messages; /* current messages bits */ | |
183 | unsigned long data; /* data for cause ipi */ | |
184 | }; | |
185 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
186 | ||
187 | void smp_muxed_ipi_set_data(int cpu, unsigned long data) | |
188 | { | |
189 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
190 | ||
191 | info->data = data; | |
192 | } | |
193 | ||
194 | void smp_muxed_ipi_message_pass(int cpu, int msg) | |
195 | { | |
196 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
197 | unsigned long *tgt = &info->messages; | |
198 | ||
199 | set_bit(msg, tgt); | |
200 | mb(); | |
201 | smp_ops->cause_ipi(cpu, info->data); | |
202 | } | |
203 | ||
204 | void smp_muxed_ipi_resend(void) | |
205 | { | |
206 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | |
207 | unsigned long *tgt = &info->messages; | |
208 | ||
209 | if (*tgt) | |
210 | smp_ops->cause_ipi(smp_processor_id(), info->data); | |
211 | } | |
212 | ||
213 | irqreturn_t smp_ipi_demux(void) | |
214 | { | |
215 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | |
216 | unsigned long *tgt = &info->messages; | |
217 | ||
218 | mb(); /* order any irq clear */ | |
219 | while (*tgt) { | |
220 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) | |
221 | generic_smp_call_function_interrupt(); | |
222 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) | |
223 | reschedule_action(0, NULL); /* upcoming sched hook */ | |
224 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) | |
225 | generic_smp_call_function_single_interrupt(); | |
226 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | |
227 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) | |
228 | debug_ipi_action(0, NULL); | |
229 | #endif | |
230 | } | |
231 | return IRQ_HANDLED; | |
232 | } | |
233 | ||
1da177e4 LT |
234 | void smp_send_reschedule(int cpu) |
235 | { | |
8cffc6ac BH |
236 | if (likely(smp_ops)) |
237 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | |
1da177e4 LT |
238 | } |
239 | ||
b7d7a240 JA |
240 | void arch_send_call_function_single_ipi(int cpu) |
241 | { | |
242 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); | |
243 | } | |
244 | ||
f063ea02 | 245 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
b7d7a240 JA |
246 | { |
247 | unsigned int cpu; | |
248 | ||
f063ea02 | 249 | for_each_cpu(cpu, mask) |
b7d7a240 JA |
250 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
251 | } | |
252 | ||
e0476371 MM |
253 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
254 | void smp_send_debugger_break(void) | |
1da177e4 | 255 | { |
e0476371 MM |
256 | int cpu; |
257 | int me = raw_smp_processor_id(); | |
258 | ||
259 | if (unlikely(!smp_ops)) | |
260 | return; | |
261 | ||
262 | for_each_online_cpu(cpu) | |
263 | if (cpu != me) | |
264 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | |
1da177e4 LT |
265 | } |
266 | #endif | |
267 | ||
cc532915 ME |
268 | #ifdef CONFIG_KEXEC |
269 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | |
270 | { | |
271 | crash_ipi_function_ptr = crash_ipi_callback; | |
e0476371 | 272 | if (crash_ipi_callback) { |
cc532915 | 273 | mb(); |
e0476371 | 274 | smp_send_debugger_break(); |
cc532915 ME |
275 | } |
276 | } | |
277 | #endif | |
278 | ||
1da177e4 LT |
279 | static void stop_this_cpu(void *dummy) |
280 | { | |
8389b37d VB |
281 | /* Remove this CPU */ |
282 | set_cpu_online(smp_processor_id(), false); | |
283 | ||
1da177e4 LT |
284 | local_irq_disable(); |
285 | while (1) | |
286 | ; | |
287 | } | |
288 | ||
8fd7675c SS |
289 | void smp_send_stop(void) |
290 | { | |
8691e5a8 | 291 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 LT |
292 | } |
293 | ||
1da177e4 LT |
294 | struct thread_info *current_set[NR_CPUS]; |
295 | ||
1da177e4 LT |
296 | static void __devinit smp_store_cpu_info(int id) |
297 | { | |
6b7487fc | 298 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
1da177e4 LT |
299 | } |
300 | ||
1da177e4 LT |
301 | void __init smp_prepare_cpus(unsigned int max_cpus) |
302 | { | |
303 | unsigned int cpu; | |
304 | ||
305 | DBG("smp_prepare_cpus\n"); | |
306 | ||
307 | /* | |
308 | * setup_cpu may need to be called on the boot cpu. We havent | |
309 | * spun any cpus up but lets be paranoid. | |
310 | */ | |
311 | BUG_ON(boot_cpuid != smp_processor_id()); | |
312 | ||
313 | /* Fixup boot cpu */ | |
314 | smp_store_cpu_info(boot_cpuid); | |
315 | cpu_callin_map[boot_cpuid] = 1; | |
316 | ||
cc1ba8ea AB |
317 | for_each_possible_cpu(cpu) { |
318 | zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
319 | GFP_KERNEL, cpu_to_node(cpu)); | |
320 | zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), | |
321 | GFP_KERNEL, cpu_to_node(cpu)); | |
322 | } | |
323 | ||
324 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); | |
325 | cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); | |
326 | ||
8cffc6ac | 327 | if (smp_ops) |
757cbd46 KG |
328 | if (smp_ops->probe) |
329 | max_cpus = smp_ops->probe(); | |
330 | else | |
331 | max_cpus = NR_CPUS; | |
8cffc6ac BH |
332 | else |
333 | max_cpus = 1; | |
1da177e4 LT |
334 | } |
335 | ||
336 | void __devinit smp_prepare_boot_cpu(void) | |
337 | { | |
338 | BUG_ON(smp_processor_id() != boot_cpuid); | |
5ad57078 | 339 | #ifdef CONFIG_PPC64 |
1da177e4 | 340 | paca[boot_cpuid].__current = current; |
5ad57078 | 341 | #endif |
b5e2fc1c | 342 | current_set[boot_cpuid] = task_thread_info(current); |
1da177e4 LT |
343 | } |
344 | ||
345 | #ifdef CONFIG_HOTPLUG_CPU | |
346 | /* State of each CPU during hotplug phases */ | |
105765f4 | 347 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
1da177e4 LT |
348 | |
349 | int generic_cpu_disable(void) | |
350 | { | |
351 | unsigned int cpu = smp_processor_id(); | |
352 | ||
353 | if (cpu == boot_cpuid) | |
354 | return -EBUSY; | |
355 | ||
ea0f1cab | 356 | set_cpu_online(cpu, false); |
799d6046 | 357 | #ifdef CONFIG_PPC64 |
a7f290da | 358 | vdso_data->processorCount--; |
094fe2e7 | 359 | #endif |
1c91cc57 | 360 | migrate_irqs(); |
1da177e4 LT |
361 | return 0; |
362 | } | |
363 | ||
1da177e4 LT |
364 | void generic_cpu_die(unsigned int cpu) |
365 | { | |
366 | int i; | |
367 | ||
368 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 369 | smp_rmb(); |
1da177e4 LT |
370 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
371 | return; | |
372 | msleep(100); | |
373 | } | |
374 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
375 | } | |
376 | ||
377 | void generic_mach_cpu_die(void) | |
378 | { | |
379 | unsigned int cpu; | |
380 | ||
381 | local_irq_disable(); | |
4fcb8833 | 382 | idle_task_exit(); |
1da177e4 LT |
383 | cpu = smp_processor_id(); |
384 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | |
385 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
0d8d4d42 | 386 | smp_wmb(); |
1da177e4 LT |
387 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
388 | cpu_relax(); | |
1da177e4 | 389 | } |
105765f4 BH |
390 | |
391 | void generic_set_cpu_dead(unsigned int cpu) | |
392 | { | |
393 | per_cpu(cpu_state, cpu) = CPU_DEAD; | |
394 | } | |
1da177e4 LT |
395 | #endif |
396 | ||
c56e5853 BH |
397 | struct create_idle { |
398 | struct work_struct work; | |
399 | struct task_struct *idle; | |
400 | struct completion done; | |
401 | int cpu; | |
402 | }; | |
403 | ||
404 | static void __cpuinit do_fork_idle(struct work_struct *work) | |
405 | { | |
406 | struct create_idle *c_idle = | |
407 | container_of(work, struct create_idle, work); | |
408 | ||
409 | c_idle->idle = fork_idle(c_idle->cpu); | |
410 | complete(&c_idle->done); | |
411 | } | |
412 | ||
413 | static int __cpuinit create_idle(unsigned int cpu) | |
414 | { | |
415 | struct thread_info *ti; | |
416 | struct create_idle c_idle = { | |
417 | .cpu = cpu, | |
418 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | |
419 | }; | |
420 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | |
421 | ||
422 | c_idle.idle = get_idle_for_cpu(cpu); | |
423 | ||
424 | /* We can't use kernel_thread since we must avoid to | |
425 | * reschedule the child. We use a workqueue because | |
426 | * we want to fork from a kernel thread, not whatever | |
427 | * userspace process happens to be trying to online us. | |
428 | */ | |
429 | if (!c_idle.idle) { | |
430 | schedule_work(&c_idle.work); | |
431 | wait_for_completion(&c_idle.done); | |
432 | } else | |
433 | init_idle(c_idle.idle, cpu); | |
434 | if (IS_ERR(c_idle.idle)) { | |
435 | pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); | |
436 | return PTR_ERR(c_idle.idle); | |
437 | } | |
438 | ti = task_thread_info(c_idle.idle); | |
439 | ||
440 | #ifdef CONFIG_PPC64 | |
441 | paca[cpu].__current = c_idle.idle; | |
442 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; | |
443 | #endif | |
444 | ti->cpu = cpu; | |
445 | current_set[cpu] = ti; | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
b282b6f8 | 450 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 | 451 | { |
c56e5853 | 452 | int rc, c; |
1da177e4 | 453 | |
5ad57078 | 454 | secondary_ti = current_set[cpu]; |
1da177e4 | 455 | |
8cffc6ac BH |
456 | if (smp_ops == NULL || |
457 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
458 | return -EINVAL; |
459 | ||
c56e5853 BH |
460 | /* Make sure we have an idle thread */ |
461 | rc = create_idle(cpu); | |
462 | if (rc) | |
463 | return rc; | |
464 | ||
1da177e4 LT |
465 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
466 | * hotplug | |
467 | */ | |
468 | cpu_callin_map[cpu] = 0; | |
469 | ||
470 | /* The information for processor bringup must | |
471 | * be written out to main store before we release | |
472 | * the processor. | |
473 | */ | |
0d8d4d42 | 474 | smp_mb(); |
1da177e4 LT |
475 | |
476 | /* wake up cpus */ | |
477 | DBG("smp: kicking cpu %d\n", cpu); | |
de300974 ME |
478 | rc = smp_ops->kick_cpu(cpu); |
479 | if (rc) { | |
480 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | |
481 | return rc; | |
482 | } | |
1da177e4 LT |
483 | |
484 | /* | |
485 | * wait to see if the cpu made a callin (is actually up). | |
486 | * use this value that I found through experimentation. | |
487 | * -- Cort | |
488 | */ | |
489 | if (system_state < SYSTEM_RUNNING) | |
ee0339f2 | 490 | for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
1da177e4 LT |
491 | udelay(100); |
492 | #ifdef CONFIG_HOTPLUG_CPU | |
493 | else | |
494 | /* | |
495 | * CPUs can take much longer to come up in the | |
496 | * hotplug case. Wait five seconds. | |
497 | */ | |
67764263 GS |
498 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) |
499 | msleep(1); | |
1da177e4 LT |
500 | #endif |
501 | ||
502 | if (!cpu_callin_map[cpu]) { | |
6685a477 | 503 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
1da177e4 LT |
504 | return -ENOENT; |
505 | } | |
506 | ||
6685a477 | 507 | DBG("Processor %u found.\n", cpu); |
1da177e4 LT |
508 | |
509 | if (smp_ops->give_timebase) | |
510 | smp_ops->give_timebase(); | |
511 | ||
512 | /* Wait until cpu puts itself in the online map */ | |
513 | while (!cpu_online(cpu)) | |
514 | cpu_relax(); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
e9efed3b NL |
519 | /* Return the value of the reg property corresponding to the given |
520 | * logical cpu. | |
521 | */ | |
522 | int cpu_to_core_id(int cpu) | |
523 | { | |
524 | struct device_node *np; | |
525 | const int *reg; | |
526 | int id = -1; | |
527 | ||
528 | np = of_get_cpu_node(cpu, NULL); | |
529 | if (!np) | |
530 | goto out; | |
531 | ||
532 | reg = of_get_property(np, "reg", NULL); | |
533 | if (!reg) | |
534 | goto out; | |
535 | ||
536 | id = *reg; | |
537 | out: | |
538 | of_node_put(np); | |
539 | return id; | |
540 | } | |
541 | ||
99d86705 VS |
542 | /* Helper routines for cpu to core mapping */ |
543 | int cpu_core_index_of_thread(int cpu) | |
544 | { | |
545 | return cpu >> threads_shift; | |
546 | } | |
547 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | |
548 | ||
549 | int cpu_first_thread_of_core(int core) | |
550 | { | |
551 | return core << threads_shift; | |
552 | } | |
553 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | |
554 | ||
104699c0 | 555 | /* Must be called when no change can occur to cpu_present_mask, |
440a0857 NL |
556 | * i.e. during cpu online or offline. |
557 | */ | |
558 | static struct device_node *cpu_to_l2cache(int cpu) | |
559 | { | |
560 | struct device_node *np; | |
b2ea25b9 | 561 | struct device_node *cache; |
440a0857 NL |
562 | |
563 | if (!cpu_present(cpu)) | |
564 | return NULL; | |
565 | ||
566 | np = of_get_cpu_node(cpu, NULL); | |
567 | if (np == NULL) | |
568 | return NULL; | |
569 | ||
b2ea25b9 NL |
570 | cache = of_find_next_cache_node(np); |
571 | ||
440a0857 NL |
572 | of_node_put(np); |
573 | ||
b2ea25b9 | 574 | return cache; |
440a0857 | 575 | } |
1da177e4 LT |
576 | |
577 | /* Activate a secondary processor. */ | |
fa3f82c8 | 578 | void __devinit start_secondary(void *unused) |
1da177e4 LT |
579 | { |
580 | unsigned int cpu = smp_processor_id(); | |
440a0857 | 581 | struct device_node *l2_cache; |
e2075f79 | 582 | int i, base; |
1da177e4 LT |
583 | |
584 | atomic_inc(&init_mm.mm_count); | |
585 | current->active_mm = &init_mm; | |
586 | ||
587 | smp_store_cpu_info(cpu); | |
5ad57078 | 588 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 589 | preempt_disable(); |
1da177e4 LT |
590 | cpu_callin_map[cpu] = 1; |
591 | ||
757cbd46 KG |
592 | if (smp_ops->setup_cpu) |
593 | smp_ops->setup_cpu(cpu); | |
1da177e4 LT |
594 | if (smp_ops->take_timebase) |
595 | smp_ops->take_timebase(); | |
596 | ||
d831d0b8 TB |
597 | secondary_cpu_time_init(); |
598 | ||
aeeafbfa BH |
599 | #ifdef CONFIG_PPC64 |
600 | if (system_state == SYSTEM_RUNNING) | |
601 | vdso_data->processorCount++; | |
602 | #endif | |
b7d7a240 | 603 | ipi_call_lock(); |
e545a614 | 604 | notify_cpu_starting(cpu); |
ea0f1cab | 605 | set_cpu_online(cpu, true); |
e2075f79 | 606 | /* Update sibling maps */ |
99d86705 | 607 | base = cpu_first_thread_sibling(cpu); |
e2075f79 NL |
608 | for (i = 0; i < threads_per_core; i++) { |
609 | if (cpu_is_offline(base + i)) | |
610 | continue; | |
cc1ba8ea AB |
611 | cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); |
612 | cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); | |
440a0857 NL |
613 | |
614 | /* cpu_core_map should be a superset of | |
615 | * cpu_sibling_map even if we don't have cache | |
616 | * information, so update the former here, too. | |
617 | */ | |
cc1ba8ea AB |
618 | cpumask_set_cpu(cpu, cpu_core_mask(base + i)); |
619 | cpumask_set_cpu(base + i, cpu_core_mask(cpu)); | |
e2075f79 | 620 | } |
440a0857 NL |
621 | l2_cache = cpu_to_l2cache(cpu); |
622 | for_each_online_cpu(i) { | |
623 | struct device_node *np = cpu_to_l2cache(i); | |
624 | if (!np) | |
625 | continue; | |
626 | if (np == l2_cache) { | |
cc1ba8ea AB |
627 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
628 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
440a0857 NL |
629 | } |
630 | of_node_put(np); | |
631 | } | |
632 | of_node_put(l2_cache); | |
b7d7a240 | 633 | ipi_call_unlock(); |
1da177e4 LT |
634 | |
635 | local_irq_enable(); | |
636 | ||
637 | cpu_idle(); | |
fa3f82c8 BH |
638 | |
639 | BUG(); | |
1da177e4 LT |
640 | } |
641 | ||
642 | int setup_profiling_timer(unsigned int multiplier) | |
643 | { | |
644 | return 0; | |
645 | } | |
646 | ||
647 | void __init smp_cpus_done(unsigned int max_cpus) | |
648 | { | |
bfb9126d | 649 | cpumask_var_t old_mask; |
1da177e4 LT |
650 | |
651 | /* We want the setup_cpu() here to be called from CPU 0, but our | |
652 | * init thread may have been "borrowed" by another CPU in the meantime | |
653 | * se we pin us down to CPU 0 for a short while | |
654 | */ | |
bfb9126d | 655 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); |
104699c0 | 656 | cpumask_copy(old_mask, tsk_cpus_allowed(current)); |
21dbeb91 | 657 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); |
1da177e4 | 658 | |
757cbd46 | 659 | if (smp_ops && smp_ops->setup_cpu) |
8cffc6ac | 660 | smp_ops->setup_cpu(boot_cpuid); |
1da177e4 | 661 | |
bfb9126d AB |
662 | set_cpus_allowed_ptr(current, old_mask); |
663 | ||
664 | free_cpumask_var(old_mask); | |
4b703a23 | 665 | |
d7294445 BH |
666 | if (smp_ops && smp_ops->bringup_done) |
667 | smp_ops->bringup_done(); | |
668 | ||
4b703a23 | 669 | dump_numa_cpu_topology(); |
d7294445 | 670 | |
1da177e4 LT |
671 | } |
672 | ||
e1f0ece1 MN |
673 | int arch_sd_sibling_asym_packing(void) |
674 | { | |
675 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | |
676 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | |
677 | return SD_ASYM_PACKING; | |
678 | } | |
679 | return 0; | |
680 | } | |
681 | ||
1da177e4 LT |
682 | #ifdef CONFIG_HOTPLUG_CPU |
683 | int __cpu_disable(void) | |
684 | { | |
440a0857 | 685 | struct device_node *l2_cache; |
e2075f79 NL |
686 | int cpu = smp_processor_id(); |
687 | int base, i; | |
688 | int err; | |
1da177e4 | 689 | |
e2075f79 NL |
690 | if (!smp_ops->cpu_disable) |
691 | return -ENOSYS; | |
692 | ||
693 | err = smp_ops->cpu_disable(); | |
694 | if (err) | |
695 | return err; | |
696 | ||
697 | /* Update sibling maps */ | |
99d86705 | 698 | base = cpu_first_thread_sibling(cpu); |
e2075f79 | 699 | for (i = 0; i < threads_per_core; i++) { |
cc1ba8ea AB |
700 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); |
701 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | |
702 | cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); | |
703 | cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); | |
440a0857 NL |
704 | } |
705 | ||
706 | l2_cache = cpu_to_l2cache(cpu); | |
707 | for_each_present_cpu(i) { | |
708 | struct device_node *np = cpu_to_l2cache(i); | |
709 | if (!np) | |
710 | continue; | |
711 | if (np == l2_cache) { | |
cc1ba8ea AB |
712 | cpumask_clear_cpu(cpu, cpu_core_mask(i)); |
713 | cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
440a0857 NL |
714 | } |
715 | of_node_put(np); | |
e2075f79 | 716 | } |
440a0857 NL |
717 | of_node_put(l2_cache); |
718 | ||
e2075f79 NL |
719 | |
720 | return 0; | |
1da177e4 LT |
721 | } |
722 | ||
723 | void __cpu_die(unsigned int cpu) | |
724 | { | |
725 | if (smp_ops->cpu_die) | |
726 | smp_ops->cpu_die(cpu); | |
727 | } | |
d0174c72 NF |
728 | |
729 | static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); | |
730 | ||
731 | void cpu_hotplug_driver_lock() | |
732 | { | |
733 | mutex_lock(&powerpc_cpu_hotplug_driver_mutex); | |
734 | } | |
735 | ||
736 | void cpu_hotplug_driver_unlock() | |
737 | { | |
738 | mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); | |
739 | } | |
abb17f9c MM |
740 | |
741 | void cpu_die(void) | |
742 | { | |
743 | if (ppc_md.cpu_die) | |
744 | ppc_md.cpu_die(); | |
fa3f82c8 BH |
745 | |
746 | /* If we return, we re-enter start_secondary */ | |
747 | start_secondary_resume(); | |
abb17f9c | 748 | } |
fa3f82c8 | 749 | |
1da177e4 | 750 | #endif |