]>
Commit | Line | Data |
---|---|---|
368dd5ac AT |
1 | /* SMP support routines. |
2 | * | |
3 | * Copyright (C) 2006-2008 Panasonic Corporation | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/interrupt.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/jiffies.h> | |
20 | #include <linux/cpumask.h> | |
21 | #include <linux/err.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/profile.h> | |
26 | #include <linux/smp.h> | |
27 | #include <asm/tlbflush.h> | |
368dd5ac AT |
28 | #include <asm/bitops.h> |
29 | #include <asm/processor.h> | |
30 | #include <asm/bug.h> | |
31 | #include <asm/exceptions.h> | |
32 | #include <asm/hardirq.h> | |
33 | #include <asm/fpu.h> | |
34 | #include <asm/mmu_context.h> | |
35 | #include <asm/thread_info.h> | |
36 | #include <asm/cpu-regs.h> | |
37 | #include <asm/intctl-regs.h> | |
38 | #include "internal.h" | |
39 | ||
40 | #ifdef CONFIG_HOTPLUG_CPU | |
41 | #include <linux/cpu.h> | |
42 | #include <asm/cacheflush.h> | |
43 | ||
44 | static unsigned long sleep_mode[NR_CPUS]; | |
45 | ||
46 | static void run_sleep_cpu(unsigned int cpu); | |
47 | static void run_wakeup_cpu(unsigned int cpu); | |
48 | #endif /* CONFIG_HOTPLUG_CPU */ | |
49 | ||
50 | /* | |
51 | * Debug Message function | |
52 | */ | |
53 | ||
54 | #undef DEBUG_SMP | |
55 | #ifdef DEBUG_SMP | |
56 | #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__) | |
57 | #else | |
58 | #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) | |
59 | #endif | |
60 | ||
61 | /* timeout value in msec for smp_nmi_call_function. zero is no timeout. */ | |
62 | #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0 | |
63 | ||
64 | /* | |
65 | * Structure and data for smp_nmi_call_function(). | |
66 | */ | |
67 | struct nmi_call_data_struct { | |
68 | smp_call_func_t func; | |
69 | void *info; | |
70 | cpumask_t started; | |
71 | cpumask_t finished; | |
72 | int wait; | |
73 | char size_alignment[0] | |
74 | __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | |
75 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | |
76 | ||
77 | static DEFINE_SPINLOCK(smp_nmi_call_lock); | |
78 | static struct nmi_call_data_struct *nmi_call_data; | |
79 | ||
80 | /* | |
81 | * Data structures and variables | |
82 | */ | |
83 | static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */ | |
84 | static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */ | |
85 | cpumask_t cpu_boot_map; /* Bitmask of boot APs */ | |
86 | unsigned long start_stack[NR_CPUS - 1]; | |
87 | ||
88 | /* | |
89 | * Per CPU parameters | |
90 | */ | |
91 | struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned; | |
92 | ||
93 | static int cpucount; /* The count of boot CPUs */ | |
94 | static cpumask_t smp_commenced_mask; | |
95 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; | |
96 | ||
97 | /* | |
98 | * Function Prototypes | |
99 | */ | |
100 | static int do_boot_cpu(int); | |
101 | static void smp_show_cpu_info(int cpu_id); | |
102 | static void smp_callin(void); | |
103 | static void smp_online(void); | |
104 | static void smp_store_cpu_info(int); | |
105 | static void smp_cpu_init(void); | |
106 | static void smp_tune_scheduling(void); | |
107 | static void send_IPI_mask(const cpumask_t *cpumask, int irq); | |
108 | static void init_ipi(void); | |
109 | ||
110 | /* | |
111 | * IPI Initialization interrupt definitions | |
112 | */ | |
113 | static void mn10300_ipi_disable(unsigned int irq); | |
114 | static void mn10300_ipi_enable(unsigned int irq); | |
3ba65467 TG |
115 | static void mn10300_ipi_chip_disable(struct irq_data *d); |
116 | static void mn10300_ipi_chip_enable(struct irq_data *d); | |
117 | static void mn10300_ipi_ack(struct irq_data *d); | |
118 | static void mn10300_ipi_nop(struct irq_data *d); | |
368dd5ac AT |
119 | |
120 | static struct irq_chip mn10300_ipi_type = { | |
121 | .name = "cpu_ipi", | |
3ba65467 TG |
122 | .irq_disable = mn10300_ipi_chip_disable, |
123 | .irq_enable = mn10300_ipi_chip_enable, | |
124 | .irq_ack = mn10300_ipi_ack, | |
125 | .irq_eoi = mn10300_ipi_nop | |
368dd5ac AT |
126 | }; |
127 | ||
128 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id); | |
129 | static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id); | |
368dd5ac AT |
130 | |
131 | static struct irqaction reschedule_ipi = { | |
132 | .handler = smp_reschedule_interrupt, | |
133 | .name = "smp reschedule IPI" | |
134 | }; | |
135 | static struct irqaction call_function_ipi = { | |
136 | .handler = smp_call_function_interrupt, | |
137 | .name = "smp call function IPI" | |
138 | }; | |
730c1fad MS |
139 | |
140 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | |
141 | static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id); | |
368dd5ac AT |
142 | static struct irqaction local_timer_ipi = { |
143 | .handler = smp_ipi_timer_interrupt, | |
144 | .flags = IRQF_DISABLED, | |
145 | .name = "smp local timer IPI" | |
146 | }; | |
730c1fad | 147 | #endif |
368dd5ac AT |
148 | |
149 | /** | |
150 | * init_ipi - Initialise the IPI mechanism | |
151 | */ | |
152 | static void init_ipi(void) | |
153 | { | |
154 | unsigned long flags; | |
155 | u16 tmp16; | |
156 | ||
157 | /* set up the reschedule IPI */ | |
f4c547eb TG |
158 | irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type, |
159 | handle_percpu_irq); | |
368dd5ac AT |
160 | setup_irq(RESCHEDULE_IPI, &reschedule_ipi); |
161 | set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); | |
162 | mn10300_ipi_enable(RESCHEDULE_IPI); | |
163 | ||
164 | /* set up the call function IPI */ | |
f4c547eb TG |
165 | irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type, |
166 | handle_percpu_irq); | |
368dd5ac AT |
167 | setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); |
168 | set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); | |
169 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); | |
170 | ||
171 | /* set up the local timer IPI */ | |
730c1fad MS |
172 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ |
173 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | |
f4c547eb TG |
174 | irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type, |
175 | handle_percpu_irq); | |
368dd5ac AT |
176 | setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); |
177 | set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); | |
178 | mn10300_ipi_enable(LOCAL_TIMER_IPI); | |
730c1fad | 179 | #endif |
368dd5ac AT |
180 | |
181 | #ifdef CONFIG_MN10300_CACHE_ENABLED | |
182 | /* set up the cache flush IPI */ | |
183 | flags = arch_local_cli_save(); | |
184 | __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV), | |
185 | mn10300_low_ipi_handler); | |
186 | GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; | |
187 | mn10300_ipi_enable(FLUSH_CACHE_IPI); | |
188 | arch_local_irq_restore(flags); | |
189 | #endif | |
190 | ||
191 | /* set up the NMI call function IPI */ | |
192 | flags = arch_local_cli_save(); | |
193 | GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; | |
194 | tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); | |
195 | arch_local_irq_restore(flags); | |
196 | ||
197 | /* set up the SMP boot IPI */ | |
198 | flags = arch_local_cli_save(); | |
199 | __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV), | |
200 | mn10300_low_ipi_handler); | |
201 | arch_local_irq_restore(flags); | |
202 | } | |
203 | ||
204 | /** | |
205 | * mn10300_ipi_shutdown - Shut down handling of an IPI | |
206 | * @irq: The IPI to be shut down. | |
207 | */ | |
208 | static void mn10300_ipi_shutdown(unsigned int irq) | |
209 | { | |
210 | unsigned long flags; | |
211 | u16 tmp; | |
212 | ||
213 | flags = arch_local_cli_save(); | |
214 | ||
215 | tmp = GxICR(irq); | |
216 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | |
217 | tmp = GxICR(irq); | |
218 | ||
219 | arch_local_irq_restore(flags); | |
220 | } | |
221 | ||
222 | /** | |
223 | * mn10300_ipi_enable - Enable an IPI | |
224 | * @irq: The IPI to be enabled. | |
225 | */ | |
226 | static void mn10300_ipi_enable(unsigned int irq) | |
227 | { | |
228 | unsigned long flags; | |
229 | u16 tmp; | |
230 | ||
231 | flags = arch_local_cli_save(); | |
232 | ||
233 | tmp = GxICR(irq); | |
234 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | |
235 | tmp = GxICR(irq); | |
236 | ||
237 | arch_local_irq_restore(flags); | |
238 | } | |
239 | ||
3ba65467 TG |
240 | static void mn10300_ipi_chip_enable(struct irq_data *d) |
241 | { | |
242 | mn10300_ipi_enable(d->irq); | |
243 | } | |
244 | ||
368dd5ac AT |
245 | /** |
246 | * mn10300_ipi_disable - Disable an IPI | |
247 | * @irq: The IPI to be disabled. | |
248 | */ | |
249 | static void mn10300_ipi_disable(unsigned int irq) | |
250 | { | |
251 | unsigned long flags; | |
252 | u16 tmp; | |
253 | ||
254 | flags = arch_local_cli_save(); | |
255 | ||
256 | tmp = GxICR(irq); | |
257 | GxICR(irq) = tmp & GxICR_LEVEL; | |
258 | tmp = GxICR(irq); | |
259 | ||
260 | arch_local_irq_restore(flags); | |
261 | } | |
262 | ||
3ba65467 TG |
263 | static void mn10300_ipi_chip_disable(struct irq_data *d) |
264 | { | |
265 | mn10300_ipi_disable(d->irq); | |
266 | } | |
267 | ||
268 | ||
368dd5ac AT |
269 | /** |
270 | * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC | |
271 | * @irq: The IPI to be acknowledged. | |
272 | * | |
273 | * Clear the interrupt detection flag for the IPI on the appropriate interrupt | |
274 | * channel in the PIC. | |
275 | */ | |
3ba65467 | 276 | static void mn10300_ipi_ack(struct irq_data *d) |
368dd5ac | 277 | { |
3ba65467 | 278 | unsigned int irq = d->irq; |
368dd5ac AT |
279 | unsigned long flags; |
280 | u16 tmp; | |
281 | ||
282 | flags = arch_local_cli_save(); | |
283 | GxICR_u8(irq) = GxICR_DETECT; | |
284 | tmp = GxICR(irq); | |
285 | arch_local_irq_restore(flags); | |
286 | } | |
287 | ||
288 | /** | |
289 | * mn10300_ipi_nop - Dummy IPI action | |
290 | * @irq: The IPI to be acted upon. | |
291 | */ | |
3ba65467 | 292 | static void mn10300_ipi_nop(struct irq_data *d) |
368dd5ac AT |
293 | { |
294 | } | |
295 | ||
296 | /** | |
297 | * send_IPI_mask - Send IPIs to all CPUs in list | |
298 | * @cpumask: The list of CPUs to target. | |
299 | * @irq: The IPI request to be sent. | |
300 | * | |
301 | * Send the specified IPI to all the CPUs in the list, not waiting for them to | |
302 | * finish before returning. The caller is responsible for synchronisation if | |
303 | * that is needed. | |
304 | */ | |
305 | static void send_IPI_mask(const cpumask_t *cpumask, int irq) | |
306 | { | |
307 | int i; | |
308 | u16 tmp; | |
309 | ||
310 | for (i = 0; i < NR_CPUS; i++) { | |
8ea9716f | 311 | if (cpumask_test_cpu(i, cpumask)) { |
368dd5ac AT |
312 | /* send IPI */ |
313 | tmp = CROSS_GxICR(irq, i); | |
314 | CROSS_GxICR(irq, i) = | |
315 | tmp | GxICR_REQUEST | GxICR_DETECT; | |
316 | tmp = CROSS_GxICR(irq, i); /* flush write buffer */ | |
317 | } | |
318 | } | |
319 | } | |
320 | ||
321 | /** | |
322 | * send_IPI_self - Send an IPI to this CPU. | |
323 | * @irq: The IPI request to be sent. | |
324 | * | |
325 | * Send the specified IPI to the current CPU. | |
326 | */ | |
327 | void send_IPI_self(int irq) | |
328 | { | |
329 | send_IPI_mask(cpumask_of(smp_processor_id()), irq); | |
330 | } | |
331 | ||
332 | /** | |
333 | * send_IPI_allbutself - Send IPIs to all the other CPUs. | |
334 | * @irq: The IPI request to be sent. | |
335 | * | |
336 | * Send the specified IPI to all CPUs in the system barring the current one, | |
337 | * not waiting for them to finish before returning. The caller is responsible | |
338 | * for synchronisation if that is needed. | |
339 | */ | |
340 | void send_IPI_allbutself(int irq) | |
341 | { | |
342 | cpumask_t cpumask; | |
343 | ||
8ea9716f KM |
344 | cpumask_copy(&cpumask, cpu_online_mask); |
345 | cpumask_clear_cpu(smp_processor_id(), &cpumask); | |
368dd5ac AT |
346 | send_IPI_mask(&cpumask, irq); |
347 | } | |
348 | ||
349 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |
350 | { | |
351 | BUG(); | |
352 | /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/ | |
353 | } | |
354 | ||
355 | void arch_send_call_function_single_ipi(int cpu) | |
356 | { | |
357 | send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI); | |
358 | } | |
359 | ||
360 | /** | |
361 | * smp_send_reschedule - Send reschedule IPI to a CPU | |
362 | * @cpu: The CPU to target. | |
363 | */ | |
364 | void smp_send_reschedule(int cpu) | |
365 | { | |
366 | send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI); | |
367 | } | |
368 | ||
369 | /** | |
370 | * smp_nmi_call_function - Send a call function NMI IPI to all CPUs | |
371 | * @func: The function to ask to be run. | |
372 | * @info: The context data to pass to that function. | |
373 | * @wait: If true, wait (atomically) until function is run on all CPUs. | |
374 | * | |
375 | * Send a non-maskable request to all CPUs in the system, requesting them to | |
376 | * run the specified function with the given context data, and, potentially, to | |
377 | * wait for completion of that function on all CPUs. | |
378 | * | |
379 | * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the | |
380 | * timeout. | |
381 | */ | |
382 | int smp_nmi_call_function(smp_call_func_t func, void *info, int wait) | |
383 | { | |
384 | struct nmi_call_data_struct data; | |
385 | unsigned long flags; | |
386 | unsigned int cnt; | |
387 | int cpus, ret = 0; | |
388 | ||
389 | cpus = num_online_cpus() - 1; | |
390 | if (cpus < 1) | |
391 | return 0; | |
392 | ||
393 | data.func = func; | |
394 | data.info = info; | |
8ea9716f KM |
395 | cpumask_copy(&data.started, cpu_online_mask); |
396 | cpumask_clear_cpu(smp_processor_id(), &data.started); | |
368dd5ac AT |
397 | data.wait = wait; |
398 | if (wait) | |
399 | data.finished = data.started; | |
400 | ||
401 | spin_lock_irqsave(&smp_nmi_call_lock, flags); | |
402 | nmi_call_data = &data; | |
403 | smp_mb(); | |
404 | ||
405 | /* Send a message to all other CPUs and wait for them to respond */ | |
406 | send_IPI_allbutself(CALL_FUNCTION_NMI_IPI); | |
407 | ||
408 | /* Wait for response */ | |
409 | if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { | |
410 | for (cnt = 0; | |
411 | cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && | |
8ea9716f | 412 | !cpumask_empty(&data.started); |
368dd5ac AT |
413 | cnt++) |
414 | mdelay(1); | |
415 | ||
416 | if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { | |
417 | for (cnt = 0; | |
418 | cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && | |
8ea9716f | 419 | !cpumask_empty(&data.finished); |
368dd5ac AT |
420 | cnt++) |
421 | mdelay(1); | |
422 | } | |
423 | ||
424 | if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT) | |
425 | ret = -ETIMEDOUT; | |
426 | ||
427 | } else { | |
428 | /* If timeout value is zero, wait until cpumask has been | |
429 | * cleared */ | |
8ea9716f | 430 | while (!cpumask_empty(&data.started)) |
368dd5ac AT |
431 | barrier(); |
432 | if (wait) | |
8ea9716f | 433 | while (!cpumask_empty(&data.finished)) |
368dd5ac AT |
434 | barrier(); |
435 | } | |
436 | ||
437 | spin_unlock_irqrestore(&smp_nmi_call_lock, flags); | |
438 | return ret; | |
439 | } | |
440 | ||
67ddb405 DH |
441 | /** |
442 | * smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI | |
443 | * | |
444 | * Send a non-maskable request to all other CPUs in the system, instructing | |
445 | * them to jump into the debugger. The caller is responsible for checking that | |
446 | * the other CPUs responded to the instruction. | |
447 | * | |
448 | * The caller should make sure that this CPU's debugger IPI is disabled. | |
449 | */ | |
450 | void smp_jump_to_debugger(void) | |
451 | { | |
452 | if (num_online_cpus() > 1) | |
453 | /* Send a message to all other CPUs */ | |
454 | send_IPI_allbutself(DEBUGGER_NMI_IPI); | |
455 | } | |
456 | ||
368dd5ac AT |
457 | /** |
458 | * stop_this_cpu - Callback to stop a CPU. | |
459 | * @unused: Callback context (ignored). | |
460 | */ | |
461 | void stop_this_cpu(void *unused) | |
462 | { | |
463 | static volatile int stopflag; | |
464 | unsigned long flags; | |
465 | ||
466 | #ifdef CONFIG_GDBSTUB | |
467 | /* In case of single stepping smp_send_stop by other CPU, | |
468 | * clear procindebug to avoid deadlock. | |
469 | */ | |
470 | atomic_set(&procindebug[smp_processor_id()], 0); | |
471 | #endif /* CONFIG_GDBSTUB */ | |
472 | ||
473 | flags = arch_local_cli_save(); | |
8ea9716f | 474 | set_cpu_online(smp_processor_id(), false); |
368dd5ac AT |
475 | |
476 | while (!stopflag) | |
477 | cpu_relax(); | |
478 | ||
8ea9716f | 479 | set_cpu_online(smp_processor_id(), true); |
368dd5ac AT |
480 | arch_local_irq_restore(flags); |
481 | } | |
482 | ||
483 | /** | |
484 | * smp_send_stop - Send a stop request to all CPUs. | |
485 | */ | |
486 | void smp_send_stop(void) | |
487 | { | |
488 | smp_nmi_call_function(stop_this_cpu, NULL, 0); | |
489 | } | |
490 | ||
491 | /** | |
492 | * smp_reschedule_interrupt - Reschedule IPI handler | |
493 | * @irq: The interrupt number. | |
494 | * @dev_id: The device ID. | |
495 | * | |
368dd5ac AT |
496 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. |
497 | */ | |
498 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) | |
499 | { | |
184748cc | 500 | scheduler_ipi(); |
368dd5ac AT |
501 | return IRQ_HANDLED; |
502 | } | |
503 | ||
504 | /** | |
505 | * smp_call_function_interrupt - Call function IPI handler | |
506 | * @irq: The interrupt number. | |
507 | * @dev_id: The device ID. | |
508 | * | |
509 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | |
510 | */ | |
511 | static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) | |
512 | { | |
513 | /* generic_smp_call_function_interrupt(); */ | |
514 | generic_smp_call_function_single_interrupt(); | |
515 | return IRQ_HANDLED; | |
516 | } | |
517 | ||
518 | /** | |
519 | * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler | |
520 | */ | |
521 | void smp_nmi_call_function_interrupt(void) | |
522 | { | |
523 | smp_call_func_t func = nmi_call_data->func; | |
524 | void *info = nmi_call_data->info; | |
525 | int wait = nmi_call_data->wait; | |
526 | ||
527 | /* Notify the initiating CPU that I've grabbed the data and am about to | |
528 | * execute the function | |
529 | */ | |
530 | smp_mb(); | |
8ea9716f | 531 | cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started); |
368dd5ac AT |
532 | (*func)(info); |
533 | ||
534 | if (wait) { | |
535 | smp_mb(); | |
8ea9716f KM |
536 | cpumask_clear_cpu(smp_processor_id(), |
537 | &nmi_call_data->finished); | |
368dd5ac AT |
538 | } |
539 | } | |
540 | ||
730c1fad MS |
541 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ |
542 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | |
368dd5ac AT |
543 | /** |
544 | * smp_ipi_timer_interrupt - Local timer IPI handler | |
545 | * @irq: The interrupt number. | |
546 | * @dev_id: The device ID. | |
547 | * | |
548 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | |
549 | */ | |
550 | static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id) | |
551 | { | |
552 | return local_timer_interrupt(); | |
553 | } | |
730c1fad | 554 | #endif |
368dd5ac AT |
555 | |
556 | void __init smp_init_cpus(void) | |
557 | { | |
558 | int i; | |
559 | for (i = 0; i < NR_CPUS; i++) { | |
560 | set_cpu_possible(i, true); | |
561 | set_cpu_present(i, true); | |
562 | } | |
563 | } | |
564 | ||
565 | /** | |
566 | * smp_cpu_init - Initialise AP in start_secondary. | |
567 | * | |
568 | * For this Application Processor, set up init_mm, initialise FPU and set | |
569 | * interrupt level 0-6 setting. | |
570 | */ | |
571 | static void __init smp_cpu_init(void) | |
572 | { | |
573 | unsigned long flags; | |
574 | int cpu_id = smp_processor_id(); | |
575 | u16 tmp16; | |
576 | ||
577 | if (test_and_set_bit(cpu_id, &cpu_initialized)) { | |
578 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); | |
579 | for (;;) | |
580 | local_irq_enable(); | |
581 | } | |
582 | printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); | |
583 | ||
584 | atomic_inc(&init_mm.mm_count); | |
585 | current->active_mm = &init_mm; | |
586 | BUG_ON(current->mm); | |
587 | ||
588 | enter_lazy_tlb(&init_mm, current); | |
589 | ||
590 | /* Force FPU initialization */ | |
591 | clear_using_fpu(current); | |
592 | ||
593 | GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT; | |
594 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); | |
595 | ||
596 | GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT; | |
597 | mn10300_ipi_enable(LOCAL_TIMER_IPI); | |
598 | ||
599 | GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT; | |
600 | mn10300_ipi_enable(RESCHEDULE_IPI); | |
601 | ||
602 | #ifdef CONFIG_MN10300_CACHE_ENABLED | |
603 | GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; | |
604 | mn10300_ipi_enable(FLUSH_CACHE_IPI); | |
605 | #endif | |
606 | ||
607 | mn10300_ipi_shutdown(SMP_BOOT_IRQ); | |
608 | ||
609 | /* Set up the non-maskable call function IPI */ | |
610 | flags = arch_local_cli_save(); | |
611 | GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; | |
612 | tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); | |
613 | arch_local_irq_restore(flags); | |
614 | } | |
615 | ||
616 | /** | |
617 | * smp_prepare_cpu_init - Initialise CPU in startup_secondary | |
618 | * | |
67ddb405 | 619 | * Set interrupt level 0-6 setting and init ICR of the kernel debugger. |
368dd5ac AT |
620 | */ |
621 | void smp_prepare_cpu_init(void) | |
622 | { | |
623 | int loop; | |
624 | ||
625 | /* Set the interrupt vector registers */ | |
626 | IVAR0 = EXCEP_IRQ_LEVEL0; | |
627 | IVAR1 = EXCEP_IRQ_LEVEL1; | |
628 | IVAR2 = EXCEP_IRQ_LEVEL2; | |
629 | IVAR3 = EXCEP_IRQ_LEVEL3; | |
630 | IVAR4 = EXCEP_IRQ_LEVEL4; | |
631 | IVAR5 = EXCEP_IRQ_LEVEL5; | |
632 | IVAR6 = EXCEP_IRQ_LEVEL6; | |
633 | ||
634 | /* Disable all interrupts and set to priority 6 (lowest) */ | |
635 | for (loop = 0; loop < GxICR_NUM_IRQS; loop++) | |
636 | GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; | |
637 | ||
67ddb405 DH |
638 | #ifdef CONFIG_KERNEL_DEBUGGER |
639 | /* initialise the kernel debugger interrupt */ | |
368dd5ac AT |
640 | do { |
641 | unsigned long flags; | |
642 | u16 tmp16; | |
643 | ||
644 | flags = arch_local_cli_save(); | |
67ddb405 DH |
645 | GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; |
646 | tmp16 = GxICR(DEBUGGER_NMI_IPI); | |
368dd5ac AT |
647 | arch_local_irq_restore(flags); |
648 | } while (0); | |
649 | #endif | |
650 | } | |
651 | ||
652 | /** | |
653 | * start_secondary - Activate a secondary CPU (AP) | |
654 | * @unused: Thread parameter (ignored). | |
655 | */ | |
656 | int __init start_secondary(void *unused) | |
657 | { | |
658 | smp_cpu_init(); | |
368dd5ac | 659 | smp_callin(); |
8ea9716f | 660 | while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask)) |
368dd5ac AT |
661 | cpu_relax(); |
662 | ||
663 | local_flush_tlb(); | |
664 | preempt_disable(); | |
665 | smp_online(); | |
666 | ||
730c1fad MS |
667 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
668 | init_clockevents(); | |
669 | #endif | |
368dd5ac AT |
670 | cpu_idle(); |
671 | return 0; | |
672 | } | |
673 | ||
674 | /** | |
675 | * smp_prepare_cpus - Boot up secondary CPUs (APs) | |
676 | * @max_cpus: Maximum number of CPUs to boot. | |
677 | * | |
678 | * Call do_boot_cpu, and boot up APs. | |
679 | */ | |
680 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
681 | { | |
682 | int phy_id; | |
683 | ||
684 | /* Setup boot CPU information */ | |
685 | smp_store_cpu_info(0); | |
686 | smp_tune_scheduling(); | |
687 | ||
688 | init_ipi(); | |
689 | ||
690 | /* If SMP should be disabled, then finish */ | |
691 | if (max_cpus == 0) { | |
692 | printk(KERN_INFO "SMP mode deactivated.\n"); | |
693 | goto smp_done; | |
694 | } | |
695 | ||
696 | /* Boot secondary CPUs (for which phy_id > 0) */ | |
697 | for (phy_id = 0; phy_id < NR_CPUS; phy_id++) { | |
698 | /* Don't boot primary CPU */ | |
699 | if (max_cpus <= cpucount + 1) | |
700 | continue; | |
701 | if (phy_id != 0) | |
702 | do_boot_cpu(phy_id); | |
703 | set_cpu_possible(phy_id, true); | |
704 | smp_show_cpu_info(phy_id); | |
705 | } | |
706 | ||
707 | smp_done: | |
708 | Dprintk("Boot done.\n"); | |
709 | } | |
710 | ||
711 | /** | |
712 | * smp_store_cpu_info - Save a CPU's information | |
713 | * @cpu: The CPU to save for. | |
714 | * | |
715 | * Save boot_cpu_data and jiffy for the specified CPU. | |
716 | */ | |
717 | static void __init smp_store_cpu_info(int cpu) | |
718 | { | |
719 | struct mn10300_cpuinfo *ci = &cpu_data[cpu]; | |
720 | ||
721 | *ci = boot_cpu_data; | |
722 | ci->loops_per_jiffy = loops_per_jiffy; | |
723 | ci->type = CPUREV; | |
724 | } | |
725 | ||
726 | /** | |
727 | * smp_tune_scheduling - Set time slice value | |
728 | * | |
729 | * Nothing to do here. | |
730 | */ | |
731 | static void __init smp_tune_scheduling(void) | |
732 | { | |
733 | } | |
734 | ||
735 | /** | |
736 | * do_boot_cpu: Boot up one CPU | |
737 | * @phy_id: Physical ID of CPU to boot. | |
738 | * | |
739 | * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1 | |
740 | * otherwise. | |
741 | */ | |
742 | static int __init do_boot_cpu(int phy_id) | |
743 | { | |
744 | struct task_struct *idle; | |
745 | unsigned long send_status, callin_status; | |
746 | int timeout, cpu_id; | |
747 | ||
748 | send_status = GxICR_REQUEST; | |
749 | callin_status = 0; | |
750 | timeout = 0; | |
751 | cpu_id = phy_id; | |
752 | ||
753 | cpucount++; | |
754 | ||
755 | /* Create idle thread for this CPU */ | |
756 | idle = fork_idle(cpu_id); | |
757 | if (IS_ERR(idle)) | |
758 | panic("Failed fork for CPU#%d.", cpu_id); | |
759 | ||
760 | idle->thread.pc = (unsigned long)start_secondary; | |
761 | ||
762 | printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id); | |
763 | start_stack[cpu_id - 1] = idle->thread.sp; | |
764 | ||
765 | task_thread_info(idle)->cpu = cpu_id; | |
766 | ||
767 | /* Send boot IPI to AP */ | |
768 | send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ); | |
769 | ||
770 | Dprintk("Waiting for send to finish...\n"); | |
771 | ||
772 | /* Wait for AP's IPI receive in 100[ms] */ | |
773 | do { | |
774 | udelay(1000); | |
775 | send_status = | |
776 | CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST; | |
777 | } while (send_status == GxICR_REQUEST && timeout++ < 100); | |
778 | ||
779 | Dprintk("Waiting for cpu_callin_map.\n"); | |
780 | ||
781 | if (send_status == 0) { | |
782 | /* Allow AP to start initializing */ | |
8ea9716f | 783 | cpumask_set_cpu(cpu_id, &cpu_callout_map); |
368dd5ac AT |
784 | |
785 | /* Wait for setting cpu_callin_map */ | |
786 | timeout = 0; | |
787 | do { | |
788 | udelay(1000); | |
8ea9716f KM |
789 | callin_status = cpumask_test_cpu(cpu_id, |
790 | &cpu_callin_map); | |
368dd5ac AT |
791 | } while (callin_status == 0 && timeout++ < 5000); |
792 | ||
793 | if (callin_status == 0) | |
794 | Dprintk("Not responding.\n"); | |
795 | } else { | |
796 | printk(KERN_WARNING "IPI not delivered.\n"); | |
797 | } | |
798 | ||
799 | if (send_status == GxICR_REQUEST || callin_status == 0) { | |
8ea9716f KM |
800 | cpumask_clear_cpu(cpu_id, &cpu_callout_map); |
801 | cpumask_clear_cpu(cpu_id, &cpu_callin_map); | |
802 | cpumask_clear_cpu(cpu_id, &cpu_initialized); | |
368dd5ac AT |
803 | cpucount--; |
804 | return 1; | |
805 | } | |
806 | return 0; | |
807 | } | |
808 | ||
809 | /** | |
810 | * smp_show_cpu_info - Show SMP CPU information | |
811 | * @cpu: The CPU of interest. | |
812 | */ | |
813 | static void __init smp_show_cpu_info(int cpu) | |
814 | { | |
815 | struct mn10300_cpuinfo *ci = &cpu_data[cpu]; | |
816 | ||
817 | printk(KERN_INFO | |
818 | "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n", | |
819 | cpu, | |
820 | MN10300_IOCLK / 1000000, | |
821 | (MN10300_IOCLK / 10000) % 100, | |
822 | ci->loops_per_jiffy / (500000 / HZ), | |
823 | (ci->loops_per_jiffy / (5000 / HZ)) % 100); | |
824 | } | |
825 | ||
826 | /** | |
827 | * smp_callin - Set cpu_callin_map of the current CPU ID | |
828 | */ | |
829 | static void __init smp_callin(void) | |
830 | { | |
831 | unsigned long timeout; | |
832 | int cpu; | |
833 | ||
834 | cpu = smp_processor_id(); | |
835 | timeout = jiffies + (2 * HZ); | |
836 | ||
8ea9716f | 837 | if (cpumask_test_cpu(cpu, &cpu_callin_map)) { |
368dd5ac AT |
838 | printk(KERN_ERR "CPU#%d already present.\n", cpu); |
839 | BUG(); | |
840 | } | |
841 | Dprintk("CPU#%d waiting for CALLOUT\n", cpu); | |
842 | ||
843 | /* Wait for AP startup 2s total */ | |
844 | while (time_before(jiffies, timeout)) { | |
8ea9716f | 845 | if (cpumask_test_cpu(cpu, &cpu_callout_map)) |
368dd5ac AT |
846 | break; |
847 | cpu_relax(); | |
848 | } | |
849 | ||
850 | if (!time_before(jiffies, timeout)) { | |
851 | printk(KERN_ERR | |
852 | "BUG: CPU#%d started up but did not get a callout!\n", | |
853 | cpu); | |
854 | BUG(); | |
855 | } | |
856 | ||
857 | #ifdef CONFIG_CALIBRATE_DELAY | |
858 | calibrate_delay(); /* Get our bogomips */ | |
859 | #endif | |
860 | ||
861 | /* Save our processor parameters */ | |
862 | smp_store_cpu_info(cpu); | |
863 | ||
864 | /* Allow the boot processor to continue */ | |
8ea9716f | 865 | cpumask_set_cpu(cpu, &cpu_callin_map); |
368dd5ac AT |
866 | } |
867 | ||
868 | /** | |
8ea9716f | 869 | * smp_online - Set cpu_online_mask |
368dd5ac AT |
870 | */ |
871 | static void __init smp_online(void) | |
872 | { | |
873 | int cpu; | |
874 | ||
875 | cpu = smp_processor_id(); | |
876 | ||
877 | local_irq_enable(); | |
878 | ||
8ea9716f | 879 | set_cpu_online(cpu, true); |
368dd5ac AT |
880 | smp_wmb(); |
881 | } | |
882 | ||
883 | /** | |
884 | * smp_cpus_done - | |
885 | * @max_cpus: Maximum CPU count. | |
886 | * | |
887 | * Do nothing. | |
888 | */ | |
889 | void __init smp_cpus_done(unsigned int max_cpus) | |
890 | { | |
891 | } | |
892 | ||
893 | /* | |
894 | * smp_prepare_boot_cpu - Set up stuff for the boot processor. | |
895 | * | |
8ea9716f | 896 | * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot |
368dd5ac AT |
897 | * processor (CPU 0). |
898 | */ | |
899 | void __devinit smp_prepare_boot_cpu(void) | |
900 | { | |
8ea9716f KM |
901 | cpumask_set_cpu(0, &cpu_callout_map); |
902 | cpumask_set_cpu(0, &cpu_callin_map); | |
368dd5ac AT |
903 | current_thread_info()->cpu = 0; |
904 | } | |
905 | ||
906 | /* | |
907 | * initialize_secondary - Initialise a secondary CPU (Application Processor). | |
908 | * | |
909 | * Set SP register and jump to thread's PC address. | |
910 | */ | |
911 | void initialize_secondary(void) | |
912 | { | |
913 | asm volatile ( | |
914 | "mov %0,sp \n" | |
915 | "jmp (%1) \n" | |
916 | : | |
917 | : "a"(current->thread.sp), "a"(current->thread.pc)); | |
918 | } | |
919 | ||
920 | /** | |
921 | * __cpu_up - Set smp_commenced_mask for the nominated CPU | |
922 | * @cpu: The target CPU. | |
923 | */ | |
924 | int __devinit __cpu_up(unsigned int cpu) | |
925 | { | |
926 | int timeout; | |
927 | ||
928 | #ifdef CONFIG_HOTPLUG_CPU | |
929 | if (num_online_cpus() == 1) | |
930 | disable_hlt(); | |
931 | if (sleep_mode[cpu]) | |
932 | run_wakeup_cpu(cpu); | |
933 | #endif /* CONFIG_HOTPLUG_CPU */ | |
934 | ||
8ea9716f | 935 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
368dd5ac AT |
936 | |
937 | /* Wait 5s total for a response */ | |
938 | for (timeout = 0 ; timeout < 5000 ; timeout++) { | |
8ea9716f | 939 | if (cpu_online(cpu)) |
368dd5ac AT |
940 | break; |
941 | udelay(1000); | |
942 | } | |
943 | ||
8ea9716f | 944 | BUG_ON(!cpu_online(cpu)); |
368dd5ac AT |
945 | return 0; |
946 | } | |
947 | ||
948 | /** | |
949 | * setup_profiling_timer - Set up the profiling timer | |
950 | * @multiplier - The frequency multiplier to use | |
951 | * | |
952 | * The frequency of the profiling timer can be changed by writing a multiplier | |
953 | * value into /proc/profile. | |
954 | */ | |
955 | int setup_profiling_timer(unsigned int multiplier) | |
956 | { | |
957 | return -EINVAL; | |
958 | } | |
959 | ||
960 | /* | |
961 | * CPU hotplug routines | |
962 | */ | |
963 | #ifdef CONFIG_HOTPLUG_CPU | |
964 | ||
965 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | |
966 | ||
967 | static int __init topology_init(void) | |
968 | { | |
969 | int cpu, ret; | |
970 | ||
971 | for_each_cpu(cpu) { | |
972 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | |
973 | if (ret) | |
974 | printk(KERN_WARNING | |
975 | "topology_init: register_cpu %d failed (%d)\n", | |
976 | cpu, ret); | |
977 | } | |
978 | return 0; | |
979 | } | |
980 | ||
981 | subsys_initcall(topology_init); | |
982 | ||
983 | int __cpu_disable(void) | |
984 | { | |
985 | int cpu = smp_processor_id(); | |
986 | if (cpu == 0) | |
987 | return -EBUSY; | |
988 | ||
989 | migrate_irqs(); | |
8ea9716f | 990 | cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm)); |
368dd5ac AT |
991 | return 0; |
992 | } | |
993 | ||
994 | void __cpu_die(unsigned int cpu) | |
995 | { | |
996 | run_sleep_cpu(cpu); | |
997 | ||
998 | if (num_online_cpus() == 1) | |
999 | enable_hlt(); | |
1000 | } | |
1001 | ||
1002 | #ifdef CONFIG_MN10300_CACHE_ENABLED | |
1003 | static inline void hotplug_cpu_disable_cache(void) | |
1004 | { | |
1005 | int tmp; | |
1006 | asm volatile( | |
1007 | " movhu (%1),%0 \n" | |
1008 | " and %2,%0 \n" | |
1009 | " movhu %0,(%1) \n" | |
1010 | "1: movhu (%1),%0 \n" | |
1011 | " btst %3,%0 \n" | |
1012 | " bne 1b \n" | |
1013 | : "=&r"(tmp) | |
1014 | : "a"(&CHCTR), | |
1015 | "i"(~(CHCTR_ICEN | CHCTR_DCEN)), | |
1016 | "i"(CHCTR_ICBUSY | CHCTR_DCBUSY) | |
1017 | : "memory", "cc"); | |
1018 | } | |
1019 | ||
1020 | static inline void hotplug_cpu_enable_cache(void) | |
1021 | { | |
1022 | int tmp; | |
1023 | asm volatile( | |
1024 | "movhu (%1),%0 \n" | |
1025 | "or %2,%0 \n" | |
1026 | "movhu %0,(%1) \n" | |
1027 | : "=&r"(tmp) | |
1028 | : "a"(&CHCTR), | |
1029 | "i"(CHCTR_ICEN | CHCTR_DCEN) | |
1030 | : "memory", "cc"); | |
1031 | } | |
1032 | ||
1033 | static inline void hotplug_cpu_invalidate_cache(void) | |
1034 | { | |
1035 | int tmp; | |
1036 | asm volatile ( | |
1037 | "movhu (%1),%0 \n" | |
1038 | "or %2,%0 \n" | |
1039 | "movhu %0,(%1) \n" | |
1040 | : "=&r"(tmp) | |
1041 | : "a"(&CHCTR), | |
1042 | "i"(CHCTR_ICINV | CHCTR_DCINV) | |
1043 | : "cc"); | |
1044 | } | |
1045 | ||
1046 | #else /* CONFIG_MN10300_CACHE_ENABLED */ | |
1047 | #define hotplug_cpu_disable_cache() do {} while (0) | |
1048 | #define hotplug_cpu_enable_cache() do {} while (0) | |
1049 | #define hotplug_cpu_invalidate_cache() do {} while (0) | |
1050 | #endif /* CONFIG_MN10300_CACHE_ENABLED */ | |
1051 | ||
1052 | /** | |
1053 | * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug | |
1054 | * @cpumask: List of target CPUs. | |
1055 | * @func: The function to call on those CPUs. | |
1056 | * @info: The context data for the function to be called. | |
1057 | * @wait: Whether to wait for the calls to complete. | |
1058 | * | |
1059 | * Non-maskably call a function on another CPU for hotplug purposes. | |
1060 | * | |
1061 | * This function must be called with maskable interrupts disabled. | |
1062 | */ | |
1063 | static int hotplug_cpu_nmi_call_function(cpumask_t cpumask, | |
1064 | smp_call_func_t func, void *info, | |
1065 | int wait) | |
1066 | { | |
1067 | /* | |
1068 | * The address and the size of nmi_call_func_mask_data | |
1069 | * need to be aligned on L1_CACHE_BYTES. | |
1070 | */ | |
1071 | static struct nmi_call_data_struct nmi_call_func_mask_data | |
1072 | __cacheline_aligned; | |
1073 | unsigned long start, end; | |
1074 | ||
1075 | start = (unsigned long)&nmi_call_func_mask_data; | |
1076 | end = start + sizeof(struct nmi_call_data_struct); | |
1077 | ||
1078 | nmi_call_func_mask_data.func = func; | |
1079 | nmi_call_func_mask_data.info = info; | |
1080 | nmi_call_func_mask_data.started = cpumask; | |
1081 | nmi_call_func_mask_data.wait = wait; | |
1082 | if (wait) | |
1083 | nmi_call_func_mask_data.finished = cpumask; | |
1084 | ||
1085 | spin_lock(&smp_nmi_call_lock); | |
1086 | nmi_call_data = &nmi_call_func_mask_data; | |
1087 | mn10300_local_dcache_flush_range(start, end); | |
1088 | smp_wmb(); | |
1089 | ||
1090 | send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI); | |
1091 | ||
1092 | do { | |
1093 | mn10300_local_dcache_inv_range(start, end); | |
1094 | barrier(); | |
8ea9716f | 1095 | } while (!cpumask_empty(&nmi_call_func_mask_data.started)); |
368dd5ac AT |
1096 | |
1097 | if (wait) { | |
1098 | do { | |
1099 | mn10300_local_dcache_inv_range(start, end); | |
1100 | barrier(); | |
8ea9716f | 1101 | } while (!cpumask_empty(&nmi_call_func_mask_data.finished)); |
368dd5ac AT |
1102 | } |
1103 | ||
1104 | spin_unlock(&smp_nmi_call_lock); | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | static void restart_wakeup_cpu(void) | |
1109 | { | |
1110 | unsigned int cpu = smp_processor_id(); | |
1111 | ||
8ea9716f | 1112 | cpumask_set_cpu(cpu, &cpu_callin_map); |
368dd5ac | 1113 | local_flush_tlb(); |
8ea9716f | 1114 | set_cpu_online(cpu, true); |
368dd5ac AT |
1115 | smp_wmb(); |
1116 | } | |
1117 | ||
1118 | static void prepare_sleep_cpu(void *unused) | |
1119 | { | |
1120 | sleep_mode[smp_processor_id()] = 1; | |
1121 | smp_mb(); | |
1122 | mn10300_local_dcache_flush_inv(); | |
1123 | hotplug_cpu_disable_cache(); | |
1124 | hotplug_cpu_invalidate_cache(); | |
1125 | } | |
1126 | ||
1127 | /* when this function called, IE=0, NMID=0. */ | |
1128 | static void sleep_cpu(void *unused) | |
1129 | { | |
1130 | unsigned int cpu_id = smp_processor_id(); | |
1131 | /* | |
1132 | * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested, | |
1133 | * before this cpu goes in SLEEP mode. | |
1134 | */ | |
1135 | do { | |
1136 | smp_mb(); | |
1137 | __sleep_cpu(); | |
1138 | } while (sleep_mode[cpu_id]); | |
1139 | restart_wakeup_cpu(); | |
1140 | } | |
1141 | ||
1142 | static void run_sleep_cpu(unsigned int cpu) | |
1143 | { | |
1144 | unsigned long flags; | |
8ea9716f | 1145 | cpumask_t cpumask; |
368dd5ac | 1146 | |
8ea9716f | 1147 | cpumask_copy(&cpumask, &cpumask_of(cpu)); |
368dd5ac AT |
1148 | flags = arch_local_cli_save(); |
1149 | hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); | |
1150 | hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); | |
1151 | udelay(1); /* delay for the cpu to sleep. */ | |
1152 | arch_local_irq_restore(flags); | |
1153 | } | |
1154 | ||
1155 | static void wakeup_cpu(void) | |
1156 | { | |
1157 | hotplug_cpu_invalidate_cache(); | |
1158 | hotplug_cpu_enable_cache(); | |
1159 | smp_mb(); | |
1160 | sleep_mode[smp_processor_id()] = 0; | |
1161 | } | |
1162 | ||
1163 | static void run_wakeup_cpu(unsigned int cpu) | |
1164 | { | |
1165 | unsigned long flags; | |
1166 | ||
1167 | flags = arch_local_cli_save(); | |
1168 | #if NR_CPUS == 2 | |
1169 | mn10300_local_dcache_flush_inv(); | |
1170 | #else | |
1171 | /* | |
1172 | * Before waking up the cpu, | |
1173 | * all online cpus should stop and flush D-Cache for global data. | |
1174 | */ | |
1175 | #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y. | |
1176 | #endif | |
1177 | hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1); | |
1178 | arch_local_irq_restore(flags); | |
1179 | } | |
1180 | ||
1181 | #endif /* CONFIG_HOTPLUG_CPU */ |