]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/time/tick-common.c | |
3 | * | |
4 | * This file contains the base functions to manage periodic tick | |
5 | * related events. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/module.h> | |
22 | ||
23 | #include <asm/irq_regs.h> | |
24 | ||
25 | #include "tick-internal.h" | |
26 | ||
27 | /* | |
28 | * Tick devices | |
29 | */ | |
30 | DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |
31 | /* | |
32 | * Tick next event: keeps track of the tick time | |
33 | */ | |
34 | ktime_t tick_next_period; | |
35 | ktime_t tick_period; | |
36 | ||
37 | /* | |
38 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | |
39 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | |
40 | * variable has two functions: | |
41 | * | |
42 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | |
43 | * timekeeping lock all at once. Only the CPU which is assigned to do the | |
44 | * update is handling it. | |
45 | * | |
46 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | |
47 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | |
48 | * at it will take over and keep the time keeping alive. The handover | |
49 | * procedure also covers cpu hotplug. | |
50 | */ | |
51 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | |
52 | ||
53 | /* | |
54 | * Debugging: see timer_list.c | |
55 | */ | |
56 | struct tick_device *tick_get_device(int cpu) | |
57 | { | |
58 | return &per_cpu(tick_cpu_device, cpu); | |
59 | } | |
60 | ||
61 | /** | |
62 | * tick_is_oneshot_available - check for a oneshot capable event device | |
63 | */ | |
64 | int tick_is_oneshot_available(void) | |
65 | { | |
66 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
67 | ||
68 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
69 | return 0; | |
70 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
71 | return 1; | |
72 | return tick_broadcast_oneshot_available(); | |
73 | } | |
74 | ||
75 | /* | |
76 | * Periodic tick | |
77 | */ | |
78 | static void tick_periodic(int cpu) | |
79 | { | |
80 | if (tick_do_timer_cpu == cpu) { | |
81 | write_seqlock(&jiffies_lock); | |
82 | ||
83 | /* Keep track of the next tick event */ | |
84 | tick_next_period = ktime_add(tick_next_period, tick_period); | |
85 | ||
86 | do_timer(1); | |
87 | write_sequnlock(&jiffies_lock); | |
88 | update_wall_time(); | |
89 | } | |
90 | ||
91 | update_process_times(user_mode(get_irq_regs())); | |
92 | profile_tick(CPU_PROFILING); | |
93 | } | |
94 | ||
95 | /* | |
96 | * Event handler for periodic ticks | |
97 | */ | |
98 | void tick_handle_periodic(struct clock_event_device *dev) | |
99 | { | |
100 | int cpu = smp_processor_id(); | |
101 | ktime_t next = dev->next_event; | |
102 | ||
103 | tick_periodic(cpu); | |
104 | ||
105 | if (dev->state != CLOCK_EVT_STATE_ONESHOT) | |
106 | return; | |
107 | for (;;) { | |
108 | /* | |
109 | * Setup the next period for devices, which do not have | |
110 | * periodic mode: | |
111 | */ | |
112 | next = ktime_add(next, tick_period); | |
113 | ||
114 | if (!clockevents_program_event(dev, next, false)) | |
115 | return; | |
116 | /* | |
117 | * Have to be careful here. If we're in oneshot mode, | |
118 | * before we call tick_periodic() in a loop, we need | |
119 | * to be sure we're using a real hardware clocksource. | |
120 | * Otherwise we could get trapped in an infinite | |
121 | * loop, as the tick_periodic() increments jiffies, | |
122 | * which then will increment time, possibly causing | |
123 | * the loop to trigger again and again. | |
124 | */ | |
125 | if (timekeeping_valid_for_hres()) | |
126 | tick_periodic(cpu); | |
127 | } | |
128 | } | |
129 | ||
130 | /* | |
131 | * Setup the device for a periodic tick | |
132 | */ | |
133 | void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |
134 | { | |
135 | tick_set_periodic_handler(dev, broadcast); | |
136 | ||
137 | /* Broadcast setup ? */ | |
138 | if (!tick_device_is_functional(dev)) | |
139 | return; | |
140 | ||
141 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && | |
142 | !tick_broadcast_oneshot_active()) { | |
143 | clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); | |
144 | } else { | |
145 | unsigned long seq; | |
146 | ktime_t next; | |
147 | ||
148 | do { | |
149 | seq = read_seqbegin(&jiffies_lock); | |
150 | next = tick_next_period; | |
151 | } while (read_seqretry(&jiffies_lock, seq)); | |
152 | ||
153 | clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); | |
154 | ||
155 | for (;;) { | |
156 | if (!clockevents_program_event(dev, next, false)) | |
157 | return; | |
158 | next = ktime_add(next, tick_period); | |
159 | } | |
160 | } | |
161 | } | |
162 | ||
163 | /* | |
164 | * Setup the tick device | |
165 | */ | |
166 | static void tick_setup_device(struct tick_device *td, | |
167 | struct clock_event_device *newdev, int cpu, | |
168 | const struct cpumask *cpumask) | |
169 | { | |
170 | ktime_t next_event; | |
171 | void (*handler)(struct clock_event_device *) = NULL; | |
172 | ||
173 | /* | |
174 | * First device setup ? | |
175 | */ | |
176 | if (!td->evtdev) { | |
177 | /* | |
178 | * If no cpu took the do_timer update, assign it to | |
179 | * this cpu: | |
180 | */ | |
181 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { | |
182 | if (!tick_nohz_full_cpu(cpu)) | |
183 | tick_do_timer_cpu = cpu; | |
184 | else | |
185 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
186 | tick_next_period = ktime_get(); | |
187 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | |
188 | } | |
189 | ||
190 | /* | |
191 | * Startup in periodic mode first. | |
192 | */ | |
193 | td->mode = TICKDEV_MODE_PERIODIC; | |
194 | } else { | |
195 | handler = td->evtdev->event_handler; | |
196 | next_event = td->evtdev->next_event; | |
197 | td->evtdev->event_handler = clockevents_handle_noop; | |
198 | } | |
199 | ||
200 | td->evtdev = newdev; | |
201 | ||
202 | /* | |
203 | * When the device is not per cpu, pin the interrupt to the | |
204 | * current cpu: | |
205 | */ | |
206 | if (!cpumask_equal(newdev->cpumask, cpumask)) | |
207 | irq_set_affinity(newdev->irq, cpumask); | |
208 | ||
209 | /* | |
210 | * When global broadcasting is active, check if the current | |
211 | * device is registered as a placeholder for broadcast mode. | |
212 | * This allows us to handle this x86 misfeature in a generic | |
213 | * way. This function also returns !=0 when we keep the | |
214 | * current active broadcast state for this CPU. | |
215 | */ | |
216 | if (tick_device_uses_broadcast(newdev, cpu)) | |
217 | return; | |
218 | ||
219 | if (td->mode == TICKDEV_MODE_PERIODIC) | |
220 | tick_setup_periodic(newdev, 0); | |
221 | else | |
222 | tick_setup_oneshot(newdev, handler, next_event); | |
223 | } | |
224 | ||
225 | void tick_install_replacement(struct clock_event_device *newdev) | |
226 | { | |
227 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
228 | int cpu = smp_processor_id(); | |
229 | ||
230 | clockevents_exchange_device(td->evtdev, newdev); | |
231 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); | |
232 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | |
233 | tick_oneshot_notify(); | |
234 | } | |
235 | ||
236 | static bool tick_check_percpu(struct clock_event_device *curdev, | |
237 | struct clock_event_device *newdev, int cpu) | |
238 | { | |
239 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | |
240 | return false; | |
241 | if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) | |
242 | return true; | |
243 | /* Check if irq affinity can be set */ | |
244 | if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq)) | |
245 | return false; | |
246 | /* Prefer an existing cpu local device */ | |
247 | if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) | |
248 | return false; | |
249 | return true; | |
250 | } | |
251 | ||
252 | static bool tick_check_preferred(struct clock_event_device *curdev, | |
253 | struct clock_event_device *newdev) | |
254 | { | |
255 | /* Prefer oneshot capable device */ | |
256 | if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) { | |
257 | if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
258 | return false; | |
259 | if (tick_oneshot_mode_active()) | |
260 | return false; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Use the higher rated one, but prefer a CPU local device with a lower | |
265 | * rating than a non-CPU local device | |
266 | */ | |
267 | return !curdev || | |
268 | newdev->rating > curdev->rating || | |
269 | !cpumask_equal(curdev->cpumask, newdev->cpumask); | |
270 | } | |
271 | ||
272 | /* | |
273 | * Check whether the new device is a better fit than curdev. curdev | |
274 | * can be NULL ! | |
275 | */ | |
276 | bool tick_check_replacement(struct clock_event_device *curdev, | |
277 | struct clock_event_device *newdev) | |
278 | { | |
279 | if (!tick_check_percpu(curdev, newdev, smp_processor_id())) | |
280 | return false; | |
281 | ||
282 | return tick_check_preferred(curdev, newdev); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Check, if the new registered device should be used. Called with | |
287 | * clockevents_lock held and interrupts disabled. | |
288 | */ | |
289 | void tick_check_new_device(struct clock_event_device *newdev) | |
290 | { | |
291 | struct clock_event_device *curdev; | |
292 | struct tick_device *td; | |
293 | int cpu; | |
294 | ||
295 | cpu = smp_processor_id(); | |
296 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | |
297 | goto out_bc; | |
298 | ||
299 | td = &per_cpu(tick_cpu_device, cpu); | |
300 | curdev = td->evtdev; | |
301 | ||
302 | /* cpu local device ? */ | |
303 | if (!tick_check_percpu(curdev, newdev, cpu)) | |
304 | goto out_bc; | |
305 | ||
306 | /* Preference decision */ | |
307 | if (!tick_check_preferred(curdev, newdev)) | |
308 | goto out_bc; | |
309 | ||
310 | if (!try_module_get(newdev->owner)) | |
311 | return; | |
312 | ||
313 | /* | |
314 | * Replace the eventually existing device by the new | |
315 | * device. If the current device is the broadcast device, do | |
316 | * not give it back to the clockevents layer ! | |
317 | */ | |
318 | if (tick_is_broadcast_device(curdev)) { | |
319 | clockevents_shutdown(curdev); | |
320 | curdev = NULL; | |
321 | } | |
322 | clockevents_exchange_device(curdev, newdev); | |
323 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); | |
324 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | |
325 | tick_oneshot_notify(); | |
326 | return; | |
327 | ||
328 | out_bc: | |
329 | /* | |
330 | * Can the new device be used as a broadcast device ? | |
331 | */ | |
332 | tick_install_broadcast_device(newdev); | |
333 | } | |
334 | ||
335 | #ifdef CONFIG_HOTPLUG_CPU | |
336 | /* | |
337 | * Transfer the do_timer job away from a dying cpu. | |
338 | * | |
339 | * Called with interrupts disabled. Not locking required. If | |
340 | * tick_do_timer_cpu is owned by this cpu, nothing can change it. | |
341 | */ | |
342 | void tick_handover_do_timer(void) | |
343 | { | |
344 | if (tick_do_timer_cpu == smp_processor_id()) { | |
345 | int cpu = cpumask_first(cpu_online_mask); | |
346 | ||
347 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | |
348 | TICK_DO_TIMER_NONE; | |
349 | } | |
350 | } | |
351 | ||
352 | /* | |
353 | * Shutdown an event device on a given cpu: | |
354 | * | |
355 | * This is called on a life CPU, when a CPU is dead. So we cannot | |
356 | * access the hardware device itself. | |
357 | * We just set the mode and remove it from the lists. | |
358 | */ | |
359 | void tick_shutdown(unsigned int cpu) | |
360 | { | |
361 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | |
362 | struct clock_event_device *dev = td->evtdev; | |
363 | ||
364 | td->mode = TICKDEV_MODE_PERIODIC; | |
365 | if (dev) { | |
366 | /* | |
367 | * Prevent that the clock events layer tries to call | |
368 | * the set mode function! | |
369 | */ | |
370 | dev->state = CLOCK_EVT_STATE_DETACHED; | |
371 | dev->mode = CLOCK_EVT_MODE_UNUSED; | |
372 | clockevents_exchange_device(dev, NULL); | |
373 | dev->event_handler = clockevents_handle_noop; | |
374 | td->evtdev = NULL; | |
375 | } | |
376 | } | |
377 | #endif | |
378 | ||
379 | /** | |
380 | * tick_suspend_local - Suspend the local tick device | |
381 | * | |
382 | * Called from the local cpu for freeze with interrupts disabled. | |
383 | * | |
384 | * No locks required. Nothing can change the per cpu device. | |
385 | */ | |
386 | void tick_suspend_local(void) | |
387 | { | |
388 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
389 | ||
390 | clockevents_shutdown(td->evtdev); | |
391 | } | |
392 | ||
393 | /** | |
394 | * tick_resume_local - Resume the local tick device | |
395 | * | |
396 | * Called from the local CPU for unfreeze or XEN resume magic. | |
397 | * | |
398 | * No locks required. Nothing can change the per cpu device. | |
399 | */ | |
400 | void tick_resume_local(void) | |
401 | { | |
402 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
403 | bool broadcast = tick_resume_check_broadcast(); | |
404 | ||
405 | clockevents_tick_resume(td->evtdev); | |
406 | if (!broadcast) { | |
407 | if (td->mode == TICKDEV_MODE_PERIODIC) | |
408 | tick_setup_periodic(td->evtdev, 0); | |
409 | else | |
410 | tick_resume_oneshot(); | |
411 | } | |
412 | } | |
413 | ||
414 | /** | |
415 | * tick_suspend - Suspend the tick and the broadcast device | |
416 | * | |
417 | * Called from syscore_suspend() via timekeeping_suspend with only one | |
418 | * CPU online and interrupts disabled or from tick_unfreeze() under | |
419 | * tick_freeze_lock. | |
420 | * | |
421 | * No locks required. Nothing can change the per cpu device. | |
422 | */ | |
423 | void tick_suspend(void) | |
424 | { | |
425 | tick_suspend_local(); | |
426 | tick_suspend_broadcast(); | |
427 | } | |
428 | ||
429 | /** | |
430 | * tick_resume - Resume the tick and the broadcast device | |
431 | * | |
432 | * Called from syscore_resume() via timekeeping_resume with only one | |
433 | * CPU online and interrupts disabled. | |
434 | * | |
435 | * No locks required. Nothing can change the per cpu device. | |
436 | */ | |
437 | void tick_resume(void) | |
438 | { | |
439 | tick_resume_broadcast(); | |
440 | tick_resume_local(); | |
441 | } | |
442 | ||
443 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); | |
444 | static unsigned int tick_freeze_depth; | |
445 | ||
446 | /** | |
447 | * tick_freeze - Suspend the local tick and (possibly) timekeeping. | |
448 | * | |
449 | * Check if this is the last online CPU executing the function and if so, | |
450 | * suspend timekeeping. Otherwise suspend the local tick. | |
451 | * | |
452 | * Call with interrupts disabled. Must be balanced with %tick_unfreeze(). | |
453 | * Interrupts must not be enabled before the subsequent %tick_unfreeze(). | |
454 | */ | |
455 | void tick_freeze(void) | |
456 | { | |
457 | raw_spin_lock(&tick_freeze_lock); | |
458 | ||
459 | tick_freeze_depth++; | |
460 | if (tick_freeze_depth == num_online_cpus()) | |
461 | timekeeping_suspend(); | |
462 | else | |
463 | tick_suspend_local(); | |
464 | ||
465 | raw_spin_unlock(&tick_freeze_lock); | |
466 | } | |
467 | ||
468 | /** | |
469 | * tick_unfreeze - Resume the local tick and (possibly) timekeeping. | |
470 | * | |
471 | * Check if this is the first CPU executing the function and if so, resume | |
472 | * timekeeping. Otherwise resume the local tick. | |
473 | * | |
474 | * Call with interrupts disabled. Must be balanced with %tick_freeze(). | |
475 | * Interrupts must not be enabled after the preceding %tick_freeze(). | |
476 | */ | |
477 | void tick_unfreeze(void) | |
478 | { | |
479 | raw_spin_lock(&tick_freeze_lock); | |
480 | ||
481 | if (tick_freeze_depth == num_online_cpus()) | |
482 | timekeeping_resume(); | |
483 | else | |
484 | tick_resume_local(); | |
485 | ||
486 | tick_freeze_depth--; | |
487 | ||
488 | raw_spin_unlock(&tick_freeze_lock); | |
489 | } | |
490 | ||
491 | /** | |
492 | * tick_init - initialize the tick control | |
493 | */ | |
494 | void __init tick_init(void) | |
495 | { | |
496 | tick_broadcast_init(); | |
497 | tick_nohz_init(); | |
498 | } |