]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/smp.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
c97d4869 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/errno.h> | |
61b5cb1c | 19 | #include <linux/ftrace.h> |
1da177e4 | 20 | #include <linux/mm.h> |
4e950f6f | 21 | #include <linux/err.h> |
1da177e4 LT |
22 | #include <linux/cpu.h> |
23 | #include <linux/smp.h> | |
24 | #include <linux/seq_file.h> | |
c97d4869 | 25 | #include <linux/irq.h> |
bc28248e RK |
26 | #include <linux/percpu.h> |
27 | #include <linux/clockchips.h> | |
3c030bea | 28 | #include <linux/completion.h> |
1da177e4 | 29 | |
60063497 | 30 | #include <linux/atomic.h> |
1da177e4 LT |
31 | #include <asm/cacheflush.h> |
32 | #include <asm/cpu.h> | |
42578c82 | 33 | #include <asm/cputype.h> |
c9018aab | 34 | #include <asm/topology.h> |
e65f38ed RK |
35 | #include <asm/mmu_context.h> |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/pgalloc.h> | |
1da177e4 | 38 | #include <asm/processor.h> |
37b05b63 | 39 | #include <asm/sections.h> |
1da177e4 LT |
40 | #include <asm/tlbflush.h> |
41 | #include <asm/ptrace.h> | |
bc28248e | 42 | #include <asm/localtimer.h> |
d6257288 | 43 | #include <asm/smp_plat.h> |
1da177e4 | 44 | |
e65f38ed RK |
45 | /* |
46 | * as from 2.5, kernels no longer have an init_tasks structure | |
47 | * so we need some other way of telling a new secondary core | |
48 | * where to place its SVC stack | |
49 | */ | |
50 | struct secondary_data secondary_data; | |
51 | ||
1da177e4 | 52 | enum ipi_msg_type { |
24480d98 | 53 | IPI_TIMER = 2, |
1da177e4 LT |
54 | IPI_RESCHEDULE, |
55 | IPI_CALL_FUNC, | |
f6dd9fa5 | 56 | IPI_CALL_FUNC_SINGLE, |
1da177e4 LT |
57 | IPI_CPU_STOP, |
58 | }; | |
59 | ||
bd6f68af | 60 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 | 61 | { |
71f512e8 RK |
62 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
63 | struct task_struct *idle = ci->idle; | |
e65f38ed | 64 | pgd_t *pgd; |
1da177e4 LT |
65 | int ret; |
66 | ||
67 | /* | |
71f512e8 RK |
68 | * Spawn a new process manually, if not already done. |
69 | * Grab a pointer to its task struct so we can mess with it | |
1da177e4 | 70 | */ |
71f512e8 RK |
71 | if (!idle) { |
72 | idle = fork_idle(cpu); | |
73 | if (IS_ERR(idle)) { | |
74 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | |
75 | return PTR_ERR(idle); | |
76 | } | |
77 | ci->idle = idle; | |
13ea9cc8 SS |
78 | } else { |
79 | /* | |
80 | * Since this idle thread is being re-used, call | |
81 | * init_idle() to reinitialize the thread structure. | |
82 | */ | |
83 | init_idle(idle, cpu); | |
1da177e4 LT |
84 | } |
85 | ||
e65f38ed RK |
86 | /* |
87 | * Allocate initial page tables to allow the new CPU to | |
88 | * enable the MMU safely. This essentially means a set | |
89 | * of our "standard" page tables, with the addition of | |
90 | * a 1:1 mapping for the physical address of the kernel. | |
91 | */ | |
92 | pgd = pgd_alloc(&init_mm); | |
37b05b63 RK |
93 | if (!pgd) |
94 | return -ENOMEM; | |
95 | ||
96 | if (PHYS_OFFSET != PAGE_OFFSET) { | |
97 | #ifndef CONFIG_HOTPLUG_CPU | |
98 | identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); | |
99 | #endif | |
100 | identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); | |
101 | identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); | |
102 | } | |
e65f38ed RK |
103 | |
104 | /* | |
105 | * We need to tell the secondary core where to find | |
106 | * its stack and the page tables. | |
107 | */ | |
32d39a93 | 108 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
e65f38ed | 109 | secondary_data.pgdir = virt_to_phys(pgd); |
d427958a | 110 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); |
1027247f RK |
111 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
112 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | |
e65f38ed | 113 | |
1da177e4 LT |
114 | /* |
115 | * Now bring the CPU into our world. | |
116 | */ | |
117 | ret = boot_secondary(cpu, idle); | |
e65f38ed RK |
118 | if (ret == 0) { |
119 | unsigned long timeout; | |
120 | ||
121 | /* | |
122 | * CPU was successfully started, wait for it | |
123 | * to come online or time out. | |
124 | */ | |
125 | timeout = jiffies + HZ; | |
126 | while (time_before(jiffies, timeout)) { | |
127 | if (cpu_online(cpu)) | |
128 | break; | |
129 | ||
130 | udelay(10); | |
131 | barrier(); | |
132 | } | |
133 | ||
58613cd1 RK |
134 | if (!cpu_online(cpu)) { |
135 | pr_crit("CPU%u: failed to come online\n", cpu); | |
e65f38ed | 136 | ret = -EIO; |
58613cd1 RK |
137 | } |
138 | } else { | |
139 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | |
e65f38ed RK |
140 | } |
141 | ||
5d43045b | 142 | secondary_data.stack = NULL; |
e65f38ed RK |
143 | secondary_data.pgdir = 0; |
144 | ||
37b05b63 RK |
145 | if (PHYS_OFFSET != PAGE_OFFSET) { |
146 | #ifndef CONFIG_HOTPLUG_CPU | |
147 | identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); | |
148 | #endif | |
149 | identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); | |
150 | identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); | |
151 | } | |
152 | ||
5e541973 | 153 | pgd_free(&init_mm, pgd); |
e65f38ed | 154 | |
1da177e4 LT |
155 | return ret; |
156 | } | |
157 | ||
a054a811 | 158 | #ifdef CONFIG_HOTPLUG_CPU |
10034aab RK |
159 | static void percpu_timer_stop(void); |
160 | ||
a054a811 RK |
161 | /* |
162 | * __cpu_disable runs on the processor to be shutdown. | |
163 | */ | |
90140c30 | 164 | int __cpu_disable(void) |
a054a811 RK |
165 | { |
166 | unsigned int cpu = smp_processor_id(); | |
167 | struct task_struct *p; | |
168 | int ret; | |
169 | ||
8e2a43f5 | 170 | ret = platform_cpu_disable(cpu); |
a054a811 RK |
171 | if (ret) |
172 | return ret; | |
173 | ||
174 | /* | |
175 | * Take this CPU offline. Once we clear this, we can't return, | |
176 | * and we must not schedule until we're ready to give up the cpu. | |
177 | */ | |
e03cdade | 178 | set_cpu_online(cpu, false); |
a054a811 RK |
179 | |
180 | /* | |
181 | * OK - migrate IRQs away from this CPU | |
182 | */ | |
183 | migrate_irqs(); | |
184 | ||
37ee16ae RK |
185 | /* |
186 | * Stop the local timer for this CPU. | |
187 | */ | |
10034aab | 188 | percpu_timer_stop(); |
37ee16ae | 189 | |
a054a811 RK |
190 | /* |
191 | * Flush user cache and TLB mappings, and then remove this CPU | |
192 | * from the vm mask set of all processes. | |
193 | */ | |
194 | flush_cache_all(); | |
195 | local_flush_tlb_all(); | |
196 | ||
197 | read_lock(&tasklist_lock); | |
198 | for_each_process(p) { | |
199 | if (p->mm) | |
56f8ba83 | 200 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
a054a811 RK |
201 | } |
202 | read_unlock(&tasklist_lock); | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
3c030bea RK |
207 | static DECLARE_COMPLETION(cpu_died); |
208 | ||
a054a811 RK |
209 | /* |
210 | * called on the thread which is asking for a CPU to be shutdown - | |
211 | * waits until shutdown has completed, or it is timed out. | |
212 | */ | |
90140c30 | 213 | void __cpu_die(unsigned int cpu) |
a054a811 | 214 | { |
3c030bea RK |
215 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
216 | pr_err("CPU%u: cpu didn't die\n", cpu); | |
217 | return; | |
218 | } | |
219 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | |
220 | ||
a054a811 RK |
221 | if (!platform_cpu_kill(cpu)) |
222 | printk("CPU%u: unable to kill\n", cpu); | |
223 | } | |
224 | ||
225 | /* | |
226 | * Called from the idle thread for the CPU which has been shutdown. | |
227 | * | |
228 | * Note that we disable IRQs here, but do not re-enable them | |
229 | * before returning to the caller. This is also the behaviour | |
230 | * of the other hotplug-cpu capable cores, so presumably coming | |
231 | * out of idle fixes this. | |
232 | */ | |
90140c30 | 233 | void __ref cpu_die(void) |
a054a811 RK |
234 | { |
235 | unsigned int cpu = smp_processor_id(); | |
236 | ||
a054a811 RK |
237 | idle_task_exit(); |
238 | ||
f36d3401 RK |
239 | local_irq_disable(); |
240 | mb(); | |
241 | ||
3c030bea RK |
242 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ |
243 | complete(&cpu_died); | |
244 | ||
a054a811 RK |
245 | /* |
246 | * actual CPU shutdown procedure is at least platform (if not | |
3c030bea | 247 | * CPU) specific. |
a054a811 RK |
248 | */ |
249 | platform_cpu_die(cpu); | |
250 | ||
251 | /* | |
252 | * Do not return to the idle loop - jump back to the secondary | |
253 | * cpu initialisation. There's some initialisation which needs | |
254 | * to be repeated to undo the effects of taking the CPU offline. | |
255 | */ | |
256 | __asm__("mov sp, %0\n" | |
faabfa08 | 257 | " mov fp, #0\n" |
a054a811 RK |
258 | " b secondary_start_kernel" |
259 | : | |
32d39a93 | 260 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
a054a811 RK |
261 | } |
262 | #endif /* CONFIG_HOTPLUG_CPU */ | |
263 | ||
d6257288 WD |
264 | int __cpu_logical_map[NR_CPUS]; |
265 | ||
266 | void __init smp_setup_processor_id(void) | |
267 | { | |
268 | int i; | |
269 | u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0; | |
270 | ||
271 | cpu_logical_map(0) = cpu; | |
272 | for (i = 1; i < NR_CPUS; ++i) | |
273 | cpu_logical_map(i) = i == cpu ? 0 : i; | |
274 | ||
275 | printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu); | |
276 | } | |
277 | ||
05c74a6c RK |
278 | /* |
279 | * Called by both boot and secondaries to move global data into | |
280 | * per-processor storage. | |
281 | */ | |
282 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | |
283 | { | |
284 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | |
285 | ||
286 | cpu_info->loops_per_jiffy = loops_per_jiffy; | |
c9018aab VG |
287 | |
288 | store_cpu_topology(cpuid); | |
05c74a6c RK |
289 | } |
290 | ||
e65f38ed RK |
291 | /* |
292 | * This is the secondary CPU boot entry. We're using this CPUs | |
293 | * idle thread stack, but a set of temporary page tables. | |
294 | */ | |
bd6f68af | 295 | asmlinkage void __cpuinit secondary_start_kernel(void) |
e65f38ed RK |
296 | { |
297 | struct mm_struct *mm = &init_mm; | |
da2660d2 | 298 | unsigned int cpu = smp_processor_id(); |
e65f38ed RK |
299 | |
300 | printk("CPU%u: Booted secondary processor\n", cpu); | |
301 | ||
302 | /* | |
303 | * All kernel threads share the same mm context; grab a | |
304 | * reference and switch to it. | |
305 | */ | |
e65f38ed RK |
306 | atomic_inc(&mm->mm_count); |
307 | current->active_mm = mm; | |
56f8ba83 | 308 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
e65f38ed RK |
309 | cpu_switch_mm(mm->pgd, mm); |
310 | enter_lazy_tlb(mm, current); | |
505d7b19 | 311 | local_flush_tlb_all(); |
e65f38ed RK |
312 | |
313 | cpu_init(); | |
5bfb5d69 | 314 | preempt_disable(); |
2c0136db | 315 | trace_hardirqs_off(); |
e65f38ed RK |
316 | |
317 | /* | |
318 | * Give the platform a chance to do its own initialisation. | |
319 | */ | |
320 | platform_secondary_init(cpu); | |
321 | ||
322 | /* | |
323 | * Enable local interrupts. | |
324 | */ | |
e545a614 | 325 | notify_cpu_starting(cpu); |
e65f38ed RK |
326 | local_irq_enable(); |
327 | local_fiq_enable(); | |
328 | ||
a8655e83 | 329 | /* |
bc28248e | 330 | * Setup the percpu timer for this CPU. |
a8655e83 | 331 | */ |
bc28248e | 332 | percpu_timer_setup(); |
a8655e83 | 333 | |
e65f38ed RK |
334 | calibrate_delay(); |
335 | ||
336 | smp_store_cpu_info(cpu); | |
337 | ||
338 | /* | |
573619d1 RK |
339 | * OK, now it's safe to let the boot CPU continue. Wait for |
340 | * the CPU migration code to notice that the CPU is online | |
341 | * before we continue. | |
e65f38ed | 342 | */ |
e03cdade | 343 | set_cpu_online(cpu, true); |
573619d1 RK |
344 | while (!cpu_active(cpu)) |
345 | cpu_relax(); | |
e65f38ed RK |
346 | |
347 | /* | |
348 | * OK, it's off to the idle thread for us | |
349 | */ | |
350 | cpu_idle(); | |
351 | } | |
352 | ||
1da177e4 LT |
353 | void __init smp_cpus_done(unsigned int max_cpus) |
354 | { | |
355 | int cpu; | |
356 | unsigned long bogosum = 0; | |
357 | ||
358 | for_each_online_cpu(cpu) | |
359 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | |
360 | ||
361 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
362 | "(%lu.%02lu BogoMIPS).\n", | |
363 | num_online_cpus(), | |
364 | bogosum / (500000/HZ), | |
365 | (bogosum / (5000/HZ)) % 100); | |
366 | } | |
367 | ||
368 | void __init smp_prepare_boot_cpu(void) | |
369 | { | |
370 | unsigned int cpu = smp_processor_id(); | |
371 | ||
71f512e8 | 372 | per_cpu(cpu_data, cpu).idle = current; |
1da177e4 LT |
373 | } |
374 | ||
05c74a6c | 375 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 | 376 | { |
05c74a6c | 377 | unsigned int ncores = num_possible_cpus(); |
1da177e4 | 378 | |
c9018aab VG |
379 | init_cpu_topology(); |
380 | ||
05c74a6c | 381 | smp_store_cpu_info(smp_processor_id()); |
1da177e4 LT |
382 | |
383 | /* | |
05c74a6c | 384 | * are we trying to boot more cores than exist? |
1da177e4 | 385 | */ |
05c74a6c RK |
386 | if (max_cpus > ncores) |
387 | max_cpus = ncores; | |
7fa22bd5 | 388 | if (ncores > 1 && max_cpus) { |
05c74a6c RK |
389 | /* |
390 | * Enable the local timer or broadcast device for the | |
391 | * boot CPU, but only if we have more than one CPU. | |
392 | */ | |
393 | percpu_timer_setup(); | |
1da177e4 | 394 | |
7fa22bd5 SB |
395 | /* |
396 | * Initialise the present map, which describes the set of CPUs | |
397 | * actually populated at the present time. A platform should | |
398 | * re-initialize the map in platform_smp_prepare_cpus() if | |
399 | * present != possible (e.g. physical hotplug). | |
400 | */ | |
401 | init_cpu_present(&cpu_possible_map); | |
402 | ||
05c74a6c RK |
403 | /* |
404 | * Initialise the SCU if there are more than one CPU | |
405 | * and let them know where to start. | |
406 | */ | |
407 | platform_smp_prepare_cpus(max_cpus); | |
408 | } | |
1da177e4 LT |
409 | } |
410 | ||
0f7b332f RK |
411 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
412 | ||
413 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | |
414 | { | |
415 | smp_cross_call = fn; | |
416 | } | |
417 | ||
82668104 | 418 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 419 | { |
e3fbb087 | 420 | smp_cross_call(mask, IPI_CALL_FUNC); |
1da177e4 LT |
421 | } |
422 | ||
f6dd9fa5 | 423 | void arch_send_call_function_single_ipi(int cpu) |
3e459990 | 424 | { |
e3fbb087 | 425 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
3e459990 | 426 | } |
3e459990 | 427 | |
4a88abd7 RK |
428 | static const char *ipi_types[NR_IPI] = { |
429 | #define S(x,s) [x - IPI_TIMER] = s | |
430 | S(IPI_TIMER, "Timer broadcast interrupts"), | |
431 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | |
432 | S(IPI_CALL_FUNC, "Function call interrupts"), | |
433 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | |
434 | S(IPI_CPU_STOP, "CPU stop interrupts"), | |
435 | }; | |
436 | ||
f13cd417 | 437 | void show_ipi_list(struct seq_file *p, int prec) |
1da177e4 | 438 | { |
4a88abd7 | 439 | unsigned int cpu, i; |
1da177e4 | 440 | |
4a88abd7 RK |
441 | for (i = 0; i < NR_IPI; i++) { |
442 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | |
1da177e4 | 443 | |
4a88abd7 RK |
444 | for_each_present_cpu(cpu) |
445 | seq_printf(p, "%10u ", | |
446 | __get_irq_stat(cpu, ipi_irqs[i])); | |
1da177e4 | 447 | |
4a88abd7 RK |
448 | seq_printf(p, " %s\n", ipi_types[i]); |
449 | } | |
1da177e4 LT |
450 | } |
451 | ||
b54992fe | 452 | u64 smp_irq_stat_cpu(unsigned int cpu) |
37ee16ae | 453 | { |
b54992fe RK |
454 | u64 sum = 0; |
455 | int i; | |
37ee16ae | 456 | |
b54992fe RK |
457 | for (i = 0; i < NR_IPI; i++) |
458 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | |
37ee16ae | 459 | |
b54992fe RK |
460 | #ifdef CONFIG_LOCAL_TIMERS |
461 | sum += __get_irq_stat(cpu, local_timer_irqs); | |
462 | #endif | |
37ee16ae | 463 | |
b54992fe | 464 | return sum; |
37ee16ae RK |
465 | } |
466 | ||
bc28248e RK |
467 | /* |
468 | * Timer (local or broadcast) support | |
469 | */ | |
470 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | |
471 | ||
c97d4869 | 472 | static void ipi_timer(void) |
1da177e4 | 473 | { |
bc28248e | 474 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); |
1da177e4 | 475 | irq_enter(); |
bc28248e | 476 | evt->event_handler(evt); |
1da177e4 LT |
477 | irq_exit(); |
478 | } | |
479 | ||
37ee16ae | 480 | #ifdef CONFIG_LOCAL_TIMERS |
61b5cb1c | 481 | asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) |
0af8aa00 SG |
482 | { |
483 | handle_local_timer(regs); | |
484 | } | |
485 | ||
486 | void handle_local_timer(struct pt_regs *regs) | |
37ee16ae | 487 | { |
c97d4869 | 488 | struct pt_regs *old_regs = set_irq_regs(regs); |
37ee16ae RK |
489 | int cpu = smp_processor_id(); |
490 | ||
491 | if (local_timer_ack()) { | |
46c48f22 | 492 | __inc_irq_stat(cpu, local_timer_irqs); |
c97d4869 | 493 | ipi_timer(); |
37ee16ae | 494 | } |
c97d4869 RK |
495 | |
496 | set_irq_regs(old_regs); | |
37ee16ae | 497 | } |
ec405ea9 | 498 | |
f13cd417 | 499 | void show_local_irqs(struct seq_file *p, int prec) |
ec405ea9 RK |
500 | { |
501 | unsigned int cpu; | |
502 | ||
f13cd417 | 503 | seq_printf(p, "%*s: ", prec, "LOC"); |
ec405ea9 RK |
504 | |
505 | for_each_present_cpu(cpu) | |
46c48f22 | 506 | seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); |
ec405ea9 | 507 | |
f13cd417 | 508 | seq_printf(p, " Local timer interrupts\n"); |
ec405ea9 | 509 | } |
37ee16ae RK |
510 | #endif |
511 | ||
bc28248e RK |
512 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
513 | static void smp_timer_broadcast(const struct cpumask *mask) | |
514 | { | |
e3fbb087 | 515 | smp_cross_call(mask, IPI_TIMER); |
bc28248e | 516 | } |
5388a6b2 RK |
517 | #else |
518 | #define smp_timer_broadcast NULL | |
519 | #endif | |
bc28248e RK |
520 | |
521 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | |
522 | struct clock_event_device *evt) | |
523 | { | |
524 | } | |
525 | ||
a8d2518c | 526 | static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) |
bc28248e RK |
527 | { |
528 | evt->name = "dummy_timer"; | |
529 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | |
530 | CLOCK_EVT_FEAT_PERIODIC | | |
531 | CLOCK_EVT_FEAT_DUMMY; | |
532 | evt->rating = 400; | |
533 | evt->mult = 1; | |
534 | evt->set_mode = broadcast_timer_set_mode; | |
bc28248e RK |
535 | |
536 | clockevents_register_device(evt); | |
537 | } | |
bc28248e RK |
538 | |
539 | void __cpuinit percpu_timer_setup(void) | |
540 | { | |
541 | unsigned int cpu = smp_processor_id(); | |
542 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | |
543 | ||
544 | evt->cpumask = cpumask_of(cpu); | |
5388a6b2 | 545 | evt->broadcast = smp_timer_broadcast; |
bc28248e | 546 | |
af90f10d SS |
547 | if (local_timer_setup(evt)) |
548 | broadcast_timer_setup(evt); | |
bc28248e RK |
549 | } |
550 | ||
10034aab RK |
551 | #ifdef CONFIG_HOTPLUG_CPU |
552 | /* | |
553 | * The generic clock events code purposely does not stop the local timer | |
554 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | |
555 | * manually here. | |
556 | */ | |
557 | static void percpu_timer_stop(void) | |
558 | { | |
559 | unsigned int cpu = smp_processor_id(); | |
560 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | |
561 | ||
562 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | |
563 | } | |
564 | #endif | |
565 | ||
1da177e4 LT |
566 | static DEFINE_SPINLOCK(stop_lock); |
567 | ||
568 | /* | |
569 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
570 | */ | |
571 | static void ipi_cpu_stop(unsigned int cpu) | |
572 | { | |
3d3f78d7 RK |
573 | if (system_state == SYSTEM_BOOTING || |
574 | system_state == SYSTEM_RUNNING) { | |
575 | spin_lock(&stop_lock); | |
576 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | |
577 | dump_stack(); | |
578 | spin_unlock(&stop_lock); | |
579 | } | |
1da177e4 | 580 | |
e03cdade | 581 | set_cpu_online(cpu, false); |
1da177e4 LT |
582 | |
583 | local_fiq_disable(); | |
584 | local_irq_disable(); | |
585 | ||
586 | while (1) | |
587 | cpu_relax(); | |
588 | } | |
589 | ||
590 | /* | |
591 | * Main handler for inter-processor interrupts | |
1da177e4 | 592 | */ |
4073723a | 593 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) |
0b5a1b95 SG |
594 | { |
595 | handle_IPI(ipinr, regs); | |
596 | } | |
597 | ||
598 | void handle_IPI(int ipinr, struct pt_regs *regs) | |
1da177e4 LT |
599 | { |
600 | unsigned int cpu = smp_processor_id(); | |
c97d4869 | 601 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 | 602 | |
4a88abd7 RK |
603 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) |
604 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); | |
1da177e4 | 605 | |
24480d98 RK |
606 | switch (ipinr) { |
607 | case IPI_TIMER: | |
608 | ipi_timer(); | |
609 | break; | |
1da177e4 | 610 | |
24480d98 | 611 | case IPI_RESCHEDULE: |
184748cc | 612 | scheduler_ipi(); |
24480d98 | 613 | break; |
1da177e4 | 614 | |
24480d98 RK |
615 | case IPI_CALL_FUNC: |
616 | generic_smp_call_function_interrupt(); | |
617 | break; | |
f6dd9fa5 | 618 | |
24480d98 RK |
619 | case IPI_CALL_FUNC_SINGLE: |
620 | generic_smp_call_function_single_interrupt(); | |
621 | break; | |
1da177e4 | 622 | |
24480d98 RK |
623 | case IPI_CPU_STOP: |
624 | ipi_cpu_stop(cpu); | |
625 | break; | |
1da177e4 | 626 | |
24480d98 RK |
627 | default: |
628 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | |
629 | cpu, ipinr); | |
630 | break; | |
1da177e4 | 631 | } |
c97d4869 | 632 | set_irq_regs(old_regs); |
1da177e4 LT |
633 | } |
634 | ||
635 | void smp_send_reschedule(int cpu) | |
636 | { | |
e3fbb087 | 637 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
1da177e4 LT |
638 | } |
639 | ||
1da177e4 LT |
640 | void smp_send_stop(void) |
641 | { | |
28e18293 | 642 | unsigned long timeout; |
1da177e4 | 643 | |
28e18293 RK |
644 | if (num_online_cpus() > 1) { |
645 | cpumask_t mask = cpu_online_map; | |
646 | cpu_clear(smp_processor_id(), mask); | |
4b0ef3b1 | 647 | |
e3fbb087 | 648 | smp_cross_call(&mask, IPI_CPU_STOP); |
28e18293 | 649 | } |
4b0ef3b1 | 650 | |
28e18293 RK |
651 | /* Wait up to one second for other CPUs to stop */ |
652 | timeout = USEC_PER_SEC; | |
653 | while (num_online_cpus() > 1 && timeout--) | |
654 | udelay(1); | |
4b0ef3b1 | 655 | |
28e18293 RK |
656 | if (num_online_cpus() > 1) |
657 | pr_warning("SMP: failed to stop secondary CPUs\n"); | |
4b0ef3b1 RK |
658 | } |
659 | ||
4b0ef3b1 | 660 | /* |
1da177e4 | 661 | * not supported here |
4b0ef3b1 | 662 | */ |
5048bcba | 663 | int setup_profiling_timer(unsigned int multiplier) |
4b0ef3b1 | 664 | { |
1da177e4 | 665 | return -EINVAL; |
4b0ef3b1 | 666 | } |