]>
Commit | Line | Data |
---|---|---|
f8381cba TG |
1 | /* |
2 | * linux/kernel/time/tick-broadcast.c | |
3 | * | |
4 | * This file contains functions which emulate a local clock-event | |
5 | * device via a broadcast event source. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
d7b90689 | 17 | #include <linux/interrupt.h> |
f8381cba TG |
18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/tick.h> | |
22 | ||
23 | #include "tick-internal.h" | |
24 | ||
25 | /* | |
26 | * Broadcast support for broken x86 hardware, where the local apic | |
27 | * timer stops in C3 state. | |
28 | */ | |
29 | ||
a52f5c56 | 30 | static struct tick_device tick_broadcast_device; |
6b954823 RR |
31 | /* FIXME: Use cpumask_var_t. */ |
32 | static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); | |
33 | static DECLARE_BITMAP(tmpmask, NR_CPUS); | |
79bf2bb3 | 34 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
aa276e1c | 35 | static int tick_broadcast_force; |
f8381cba | 36 | |
5590a536 TG |
37 | #ifdef CONFIG_TICK_ONESHOT |
38 | static void tick_broadcast_clear_oneshot(int cpu); | |
39 | #else | |
40 | static inline void tick_broadcast_clear_oneshot(int cpu) { } | |
41 | #endif | |
42 | ||
289f480a IM |
43 | /* |
44 | * Debugging: see timer_list.c | |
45 | */ | |
46 | struct tick_device *tick_get_broadcast_device(void) | |
47 | { | |
48 | return &tick_broadcast_device; | |
49 | } | |
50 | ||
6b954823 | 51 | struct cpumask *tick_get_broadcast_mask(void) |
289f480a | 52 | { |
6b954823 | 53 | return to_cpumask(tick_broadcast_mask); |
289f480a IM |
54 | } |
55 | ||
f8381cba TG |
56 | /* |
57 | * Start the device in periodic mode | |
58 | */ | |
59 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
60 | { | |
18de5bc4 | 61 | if (bc) |
f8381cba TG |
62 | tick_setup_periodic(bc, 1); |
63 | } | |
64 | ||
65 | /* | |
66 | * Check, if the device can be utilized as broadcast device: | |
67 | */ | |
68 | int tick_check_broadcast_device(struct clock_event_device *dev) | |
69 | { | |
4a93232d VP |
70 | if ((tick_broadcast_device.evtdev && |
71 | tick_broadcast_device.evtdev->rating >= dev->rating) || | |
72 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
f8381cba TG |
73 | return 0; |
74 | ||
75 | clockevents_exchange_device(NULL, dev); | |
76 | tick_broadcast_device.evtdev = dev; | |
6b954823 | 77 | if (!cpumask_empty(tick_get_broadcast_mask())) |
f8381cba TG |
78 | tick_broadcast_start_periodic(dev); |
79 | return 1; | |
80 | } | |
81 | ||
82 | /* | |
83 | * Check, if the device is the broadcast device | |
84 | */ | |
85 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
86 | { | |
87 | return (dev && tick_broadcast_device.evtdev == dev); | |
88 | } | |
89 | ||
90 | /* | |
91 | * Check, if the device is disfunctional and a place holder, which | |
92 | * needs to be handled by the broadcast device. | |
93 | */ | |
94 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
95 | { | |
96 | unsigned long flags; | |
97 | int ret = 0; | |
98 | ||
99 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
100 | ||
101 | /* | |
102 | * Devices might be registered with both periodic and oneshot | |
103 | * mode disabled. This signals, that the device needs to be | |
104 | * operated from the broadcast device and is a placeholder for | |
105 | * the cpu local device. | |
106 | */ | |
107 | if (!tick_device_is_functional(dev)) { | |
108 | dev->event_handler = tick_handle_periodic; | |
6b954823 | 109 | cpumask_set_cpu(cpu, tick_get_broadcast_mask()); |
f8381cba TG |
110 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); |
111 | ret = 1; | |
5590a536 TG |
112 | } else { |
113 | /* | |
114 | * When the new device is not affected by the stop | |
115 | * feature and the cpu is marked in the broadcast mask | |
116 | * then clear the broadcast bit. | |
117 | */ | |
118 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { | |
119 | int cpu = smp_processor_id(); | |
f8381cba | 120 | |
6b954823 | 121 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
5590a536 TG |
122 | tick_broadcast_clear_oneshot(cpu); |
123 | } | |
124 | } | |
f8381cba TG |
125 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
126 | return ret; | |
127 | } | |
128 | ||
129 | /* | |
6b954823 | 130 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba | 131 | */ |
6b954823 | 132 | static void tick_do_broadcast(struct cpumask *mask) |
f8381cba | 133 | { |
186e3cb8 | 134 | int cpu = smp_processor_id(); |
f8381cba TG |
135 | struct tick_device *td; |
136 | ||
137 | /* | |
138 | * Check, if the current cpu is in the mask | |
139 | */ | |
6b954823 RR |
140 | if (cpumask_test_cpu(cpu, mask)) { |
141 | cpumask_clear_cpu(cpu, mask); | |
f8381cba TG |
142 | td = &per_cpu(tick_cpu_device, cpu); |
143 | td->evtdev->event_handler(td->evtdev); | |
f8381cba TG |
144 | } |
145 | ||
6b954823 | 146 | if (!cpumask_empty(mask)) { |
f8381cba TG |
147 | /* |
148 | * It might be necessary to actually check whether the devices | |
149 | * have different broadcast functions. For now, just use the | |
150 | * one of the first device. This works as long as we have this | |
151 | * misfeature only on x86 (lapic) | |
152 | */ | |
6b954823 RR |
153 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
154 | td->evtdev->broadcast(mask); | |
f8381cba | 155 | } |
f8381cba TG |
156 | } |
157 | ||
158 | /* | |
159 | * Periodic broadcast: | |
160 | * - invoke the broadcast handlers | |
161 | */ | |
162 | static void tick_do_periodic_broadcast(void) | |
163 | { | |
f8381cba TG |
164 | spin_lock(&tick_broadcast_lock); |
165 | ||
6b954823 RR |
166 | cpumask_and(to_cpumask(tmpmask), |
167 | cpu_online_mask, tick_get_broadcast_mask()); | |
168 | tick_do_broadcast(to_cpumask(tmpmask)); | |
f8381cba TG |
169 | |
170 | spin_unlock(&tick_broadcast_lock); | |
171 | } | |
172 | ||
173 | /* | |
174 | * Event handler for periodic broadcast ticks | |
175 | */ | |
176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
177 | { | |
d4496b39 TG |
178 | ktime_t next; |
179 | ||
f8381cba TG |
180 | tick_do_periodic_broadcast(); |
181 | ||
182 | /* | |
183 | * The device is in periodic mode. No reprogramming necessary: | |
184 | */ | |
185 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | |
186 | return; | |
187 | ||
188 | /* | |
189 | * Setup the next period for devices, which do not have | |
d4496b39 TG |
190 | * periodic mode. We read dev->next_event first and add to it |
191 | * when the event alrady expired. clockevents_program_event() | |
192 | * sets dev->next_event only when the event is really | |
193 | * programmed to the device. | |
f8381cba | 194 | */ |
d4496b39 TG |
195 | for (next = dev->next_event; ;) { |
196 | next = ktime_add(next, tick_period); | |
f8381cba TG |
197 | |
198 | if (!clockevents_program_event(dev, next, ktime_get())) | |
199 | return; | |
200 | tick_do_periodic_broadcast(); | |
201 | } | |
202 | } | |
203 | ||
204 | /* | |
205 | * Powerstate information: The system enters/leaves a state, where | |
206 | * affected devices might stop | |
207 | */ | |
f833bab8 | 208 | static void tick_do_broadcast_on_off(unsigned long *reason) |
f8381cba TG |
209 | { |
210 | struct clock_event_device *bc, *dev; | |
211 | struct tick_device *td; | |
f833bab8 | 212 | unsigned long flags; |
9c17bcda | 213 | int cpu, bc_stopped; |
f8381cba TG |
214 | |
215 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
216 | ||
217 | cpu = smp_processor_id(); | |
218 | td = &per_cpu(tick_cpu_device, cpu); | |
219 | dev = td->evtdev; | |
220 | bc = tick_broadcast_device.evtdev; | |
221 | ||
222 | /* | |
1595f452 | 223 | * Is the device not affected by the powerstate ? |
f8381cba | 224 | */ |
1595f452 | 225 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
f8381cba TG |
226 | goto out; |
227 | ||
3dfbc884 TG |
228 | if (!tick_device_is_functional(dev)) |
229 | goto out; | |
1595f452 | 230 | |
6b954823 | 231 | bc_stopped = cpumask_empty(tick_get_broadcast_mask()); |
9c17bcda | 232 | |
1595f452 TG |
233 | switch (*reason) { |
234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | |
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | |
6b954823 RR |
236 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { |
237 | cpumask_set_cpu(cpu, tick_get_broadcast_mask()); | |
07454bff TG |
238 | if (tick_broadcast_device.mode == |
239 | TICKDEV_MODE_PERIODIC) | |
2344abbc | 240 | clockevents_shutdown(dev); |
f8381cba | 241 | } |
3dfbc884 | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
aa276e1c | 243 | tick_broadcast_force = 1; |
1595f452 TG |
244 | break; |
245 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | |
aa276e1c | 246 | if (!tick_broadcast_force && |
6b954823 RR |
247 | cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { |
248 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); | |
07454bff TG |
249 | if (tick_broadcast_device.mode == |
250 | TICKDEV_MODE_PERIODIC) | |
f8381cba TG |
251 | tick_setup_periodic(dev, 0); |
252 | } | |
1595f452 | 253 | break; |
f8381cba TG |
254 | } |
255 | ||
6b954823 | 256 | if (cpumask_empty(tick_get_broadcast_mask())) { |
9c17bcda | 257 | if (!bc_stopped) |
2344abbc | 258 | clockevents_shutdown(bc); |
9c17bcda | 259 | } else if (bc_stopped) { |
f8381cba TG |
260 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
261 | tick_broadcast_start_periodic(bc); | |
79bf2bb3 TG |
262 | else |
263 | tick_broadcast_setup_oneshot(bc); | |
f8381cba TG |
264 | } |
265 | out: | |
266 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
267 | } | |
268 | ||
269 | /* | |
270 | * Powerstate information: The system enters/leaves a state, where | |
271 | * affected devices might stop. | |
272 | */ | |
273 | void tick_broadcast_on_off(unsigned long reason, int *oncpu) | |
274 | { | |
6b954823 | 275 | if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) |
833df317 | 276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " |
72fcde96 | 277 | "offline CPU #%d\n", *oncpu); |
bf020cb7 | 278 | else |
f833bab8 | 279 | tick_do_broadcast_on_off(&reason); |
f8381cba TG |
280 | } |
281 | ||
282 | /* | |
283 | * Set the periodic handler depending on broadcast on/off | |
284 | */ | |
285 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
286 | { | |
287 | if (!broadcast) | |
288 | dev->event_handler = tick_handle_periodic; | |
289 | else | |
290 | dev->event_handler = tick_handle_periodic_broadcast; | |
291 | } | |
292 | ||
293 | /* | |
294 | * Remove a CPU from broadcasting | |
295 | */ | |
296 | void tick_shutdown_broadcast(unsigned int *cpup) | |
297 | { | |
298 | struct clock_event_device *bc; | |
299 | unsigned long flags; | |
300 | unsigned int cpu = *cpup; | |
301 | ||
302 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
303 | ||
304 | bc = tick_broadcast_device.evtdev; | |
6b954823 | 305 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
f8381cba TG |
306 | |
307 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
6b954823 | 308 | if (bc && cpumask_empty(tick_get_broadcast_mask())) |
2344abbc | 309 | clockevents_shutdown(bc); |
f8381cba TG |
310 | } |
311 | ||
312 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
313 | } | |
79bf2bb3 | 314 | |
6321dd60 TG |
315 | void tick_suspend_broadcast(void) |
316 | { | |
317 | struct clock_event_device *bc; | |
318 | unsigned long flags; | |
319 | ||
320 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
321 | ||
322 | bc = tick_broadcast_device.evtdev; | |
18de5bc4 | 323 | if (bc) |
2344abbc | 324 | clockevents_shutdown(bc); |
6321dd60 TG |
325 | |
326 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
327 | } | |
328 | ||
329 | int tick_resume_broadcast(void) | |
330 | { | |
331 | struct clock_event_device *bc; | |
332 | unsigned long flags; | |
333 | int broadcast = 0; | |
334 | ||
335 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
336 | ||
337 | bc = tick_broadcast_device.evtdev; | |
6321dd60 | 338 | |
cd05a1f8 | 339 | if (bc) { |
18de5bc4 TG |
340 | clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); |
341 | ||
cd05a1f8 TG |
342 | switch (tick_broadcast_device.mode) { |
343 | case TICKDEV_MODE_PERIODIC: | |
6b954823 | 344 | if (!cpumask_empty(tick_get_broadcast_mask())) |
cd05a1f8 | 345 | tick_broadcast_start_periodic(bc); |
6b954823 RR |
346 | broadcast = cpumask_test_cpu(smp_processor_id(), |
347 | tick_get_broadcast_mask()); | |
cd05a1f8 TG |
348 | break; |
349 | case TICKDEV_MODE_ONESHOT: | |
350 | broadcast = tick_resume_broadcast_oneshot(bc); | |
351 | break; | |
352 | } | |
6321dd60 TG |
353 | } |
354 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
355 | ||
356 | return broadcast; | |
357 | } | |
358 | ||
359 | ||
79bf2bb3 TG |
360 | #ifdef CONFIG_TICK_ONESHOT |
361 | ||
6b954823 RR |
362 | /* FIXME: use cpumask_var_t. */ |
363 | static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); | |
79bf2bb3 | 364 | |
289f480a | 365 | /* |
6b954823 | 366 | * Exposed for debugging: see timer_list.c |
289f480a | 367 | */ |
6b954823 | 368 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480a | 369 | { |
6b954823 | 370 | return to_cpumask(tick_broadcast_oneshot_mask); |
289f480a IM |
371 | } |
372 | ||
79bf2bb3 TG |
373 | static int tick_broadcast_set_event(ktime_t expires, int force) |
374 | { | |
375 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
1fb9b7d2 TG |
376 | |
377 | return tick_dev_program_event(bc, expires, force); | |
79bf2bb3 TG |
378 | } |
379 | ||
cd05a1f8 TG |
380 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
381 | { | |
382 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | |
b7e113dc | 383 | return 0; |
cd05a1f8 TG |
384 | } |
385 | ||
fb02fbc1 TG |
386 | /* |
387 | * Called from irq_enter() when idle was interrupted to reenable the | |
388 | * per cpu device. | |
389 | */ | |
390 | void tick_check_oneshot_broadcast(int cpu) | |
391 | { | |
6b954823 | 392 | if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { |
fb02fbc1 TG |
393 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
394 | ||
395 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); | |
396 | } | |
397 | } | |
398 | ||
79bf2bb3 TG |
399 | /* |
400 | * Handle oneshot mode broadcasting | |
401 | */ | |
402 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
403 | { | |
404 | struct tick_device *td; | |
cdc6f27d | 405 | ktime_t now, next_event; |
79bf2bb3 TG |
406 | int cpu; |
407 | ||
408 | spin_lock(&tick_broadcast_lock); | |
409 | again: | |
410 | dev->next_event.tv64 = KTIME_MAX; | |
cdc6f27d | 411 | next_event.tv64 = KTIME_MAX; |
6b954823 | 412 | cpumask_clear(to_cpumask(tmpmask)); |
79bf2bb3 TG |
413 | now = ktime_get(); |
414 | /* Find all expired events */ | |
6b954823 | 415 | for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { |
79bf2bb3 TG |
416 | td = &per_cpu(tick_cpu_device, cpu); |
417 | if (td->evtdev->next_event.tv64 <= now.tv64) | |
6b954823 | 418 | cpumask_set_cpu(cpu, to_cpumask(tmpmask)); |
cdc6f27d TG |
419 | else if (td->evtdev->next_event.tv64 < next_event.tv64) |
420 | next_event.tv64 = td->evtdev->next_event.tv64; | |
79bf2bb3 TG |
421 | } |
422 | ||
423 | /* | |
cdc6f27d TG |
424 | * Wakeup the cpus which have an expired event. |
425 | */ | |
6b954823 | 426 | tick_do_broadcast(to_cpumask(tmpmask)); |
cdc6f27d TG |
427 | |
428 | /* | |
429 | * Two reasons for reprogram: | |
430 | * | |
431 | * - The global event did not expire any CPU local | |
432 | * events. This happens in dyntick mode, as the maximum PIT | |
433 | * delta is quite small. | |
434 | * | |
435 | * - There are pending events on sleeping CPUs which were not | |
436 | * in the event mask | |
79bf2bb3 | 437 | */ |
cdc6f27d | 438 | if (next_event.tv64 != KTIME_MAX) { |
79bf2bb3 | 439 | /* |
cdc6f27d TG |
440 | * Rearm the broadcast device. If event expired, |
441 | * repeat the above | |
79bf2bb3 | 442 | */ |
cdc6f27d | 443 | if (tick_broadcast_set_event(next_event, 0)) |
79bf2bb3 TG |
444 | goto again; |
445 | } | |
446 | spin_unlock(&tick_broadcast_lock); | |
447 | } | |
448 | ||
449 | /* | |
450 | * Powerstate information: The system enters/leaves a state, where | |
451 | * affected devices might stop | |
452 | */ | |
453 | void tick_broadcast_oneshot_control(unsigned long reason) | |
454 | { | |
455 | struct clock_event_device *bc, *dev; | |
456 | struct tick_device *td; | |
457 | unsigned long flags; | |
458 | int cpu; | |
459 | ||
460 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
461 | ||
462 | /* | |
463 | * Periodic mode does not care about the enter/exit of power | |
464 | * states | |
465 | */ | |
466 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
467 | goto out; | |
468 | ||
469 | bc = tick_broadcast_device.evtdev; | |
470 | cpu = smp_processor_id(); | |
471 | td = &per_cpu(tick_cpu_device, cpu); | |
472 | dev = td->evtdev; | |
473 | ||
474 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
475 | goto out; | |
476 | ||
477 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | |
6b954823 RR |
478 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { |
479 | cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); | |
79bf2bb3 TG |
480 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); |
481 | if (dev->next_event.tv64 < bc->next_event.tv64) | |
482 | tick_broadcast_set_event(dev->next_event, 1); | |
483 | } | |
484 | } else { | |
6b954823 RR |
485 | if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { |
486 | cpumask_clear_cpu(cpu, | |
487 | tick_get_broadcast_oneshot_mask()); | |
79bf2bb3 TG |
488 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
489 | if (dev->next_event.tv64 != KTIME_MAX) | |
490 | tick_program_event(dev->next_event, 1); | |
491 | } | |
492 | } | |
493 | ||
494 | out: | |
495 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
496 | } | |
497 | ||
5590a536 TG |
498 | /* |
499 | * Reset the one shot broadcast for a cpu | |
500 | * | |
501 | * Called with tick_broadcast_lock held | |
502 | */ | |
503 | static void tick_broadcast_clear_oneshot(int cpu) | |
504 | { | |
6b954823 | 505 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
5590a536 TG |
506 | } |
507 | ||
6b954823 RR |
508 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
509 | ktime_t expires) | |
7300711e TG |
510 | { |
511 | struct tick_device *td; | |
512 | int cpu; | |
513 | ||
5db0e1e9 | 514 | for_each_cpu(cpu, mask) { |
7300711e TG |
515 | td = &per_cpu(tick_cpu_device, cpu); |
516 | if (td->evtdev) | |
517 | td->evtdev->next_event = expires; | |
518 | } | |
519 | } | |
520 | ||
79bf2bb3 | 521 | /** |
8dce39c2 | 522 | * tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb3 TG |
523 | */ |
524 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |
525 | { | |
9c17bcda TG |
526 | /* Set it up only once ! */ |
527 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | |
7300711e TG |
528 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
529 | int cpu = smp_processor_id(); | |
7300711e | 530 | |
9c17bcda TG |
531 | bc->event_handler = tick_handle_oneshot_broadcast; |
532 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | |
7300711e TG |
533 | |
534 | /* Take the do_timer update */ | |
535 | tick_do_timer_cpu = cpu; | |
536 | ||
537 | /* | |
538 | * We must be careful here. There might be other CPUs | |
539 | * waiting for periodic broadcast. We need to set the | |
540 | * oneshot_mask bits for those and program the | |
541 | * broadcast device to fire. | |
542 | */ | |
6b954823 RR |
543 | cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); |
544 | cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); | |
545 | cpumask_or(tick_get_broadcast_oneshot_mask(), | |
546 | tick_get_broadcast_oneshot_mask(), | |
547 | to_cpumask(tmpmask)); | |
548 | ||
549 | if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { | |
550 | tick_broadcast_init_next_event(to_cpumask(tmpmask), | |
551 | tick_next_period); | |
7300711e TG |
552 | tick_broadcast_set_event(tick_next_period, 1); |
553 | } else | |
554 | bc->next_event.tv64 = KTIME_MAX; | |
9c17bcda | 555 | } |
79bf2bb3 TG |
556 | } |
557 | ||
558 | /* | |
559 | * Select oneshot operating mode for the broadcast device | |
560 | */ | |
561 | void tick_broadcast_switch_to_oneshot(void) | |
562 | { | |
563 | struct clock_event_device *bc; | |
564 | unsigned long flags; | |
565 | ||
566 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
567 | ||
568 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
569 | bc = tick_broadcast_device.evtdev; | |
570 | if (bc) | |
571 | tick_broadcast_setup_oneshot(bc); | |
572 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
573 | } | |
574 | ||
575 | ||
576 | /* | |
577 | * Remove a dead CPU from broadcasting | |
578 | */ | |
579 | void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |
580 | { | |
79bf2bb3 TG |
581 | unsigned long flags; |
582 | unsigned int cpu = *cpup; | |
583 | ||
584 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
585 | ||
31d9b393 TG |
586 | /* |
587 | * Clear the broadcast mask flag for the dead cpu, but do not | |
588 | * stop the broadcast device! | |
589 | */ | |
6b954823 | 590 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
79bf2bb3 | 591 | |
79bf2bb3 TG |
592 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
593 | } | |
594 | ||
27ce4cb4 TG |
595 | /* |
596 | * Check, whether the broadcast device is in one shot mode | |
597 | */ | |
598 | int tick_broadcast_oneshot_active(void) | |
599 | { | |
600 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | |
601 | } | |
602 | ||
79bf2bb3 | 603 | #endif |